summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 17:18:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 17:18:59 -0500
commit45763bf4bc1ebdf8eb95697607e1fd042a3e1221 (patch)
treec5b26c2d5d1190247b59d6d1fe68b8a247351362
parentda2577fe63f865cd9dc785a42c29c0071f567a35 (diff)
parent142a0f83b216a607aebed42e54a1be620765e28c (diff)
Merge tag 'char-misc-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big char/misc driver patch pull request for 5.1-rc1. The largest thing by far is the new habanalabs driver for their AI accelerator chip. For now it is in the drivers/misc directory but will probably move to a new directory soon along with other drivers of this type. Other than that, just the usual set of individual driver updates and fixes. There's an "odd" merge in here from the DRM tree that they asked me to do as the MEI driver is starting to interact with the i915 driver, and it needed some coordination. All of those patches have been properly acked by the relevant subsystem maintainers. All of these have been in linux-next with no reported issues, most for quite some time" * tag 'char-misc-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (219 commits) habanalabs: adjust Kconfig to fix build errors habanalabs: use %px instead of %p in error print habanalabs: use do_div for 64-bit divisions intel_th: gth: Fix an off-by-one in output unassigning habanalabs: fix little-endian<->cpu conversion warnings habanalabs: use NULL to initialize array of pointers habanalabs: fix little-endian<->cpu conversion warnings habanalabs: soft-reset device if context-switch fails habanalabs: print pointer using %p habanalabs: fix memory leak with CBs with unaligned size habanalabs: return correct error code on MMU mapping failure habanalabs: add comments in uapi/misc/habanalabs.h habanalabs: extend QMAN0 job timeout habanalabs: set DMA0 completion to SOB 1007 habanalabs: fix validation of WREG32 to DMA completion habanalabs: fix mmu cache registers init habanalabs: disable CPU access on timeouts habanalabs: add MMU DRAM default page mapping habanalabs: Dissociate RAZWI info from event types misc/habanalabs: adjust Kconfig to fix build errors ...
-rw-r--r--CREDITS2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-vmbus33
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs126
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-output-devices6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-habanalabs190
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt27
-rw-r--r--Documentation/devicetree/bindings/gnss/gnss.txt1
-rw-r--r--Documentation/devicetree/bindings/gnss/mediatek.txt35
-rw-r--r--Documentation/devicetree/bindings/gnss/sirfstar.txt1
-rw-r--r--Documentation/devicetree/bindings/interconnect/interconnect.txt60
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt24
-rw-r--r--Documentation/devicetree/bindings/misc/qcom,fastrpc.txt78
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/driver-api/component.rst17
-rw-r--r--Documentation/driver-api/device_link.rst3
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/interconnect/interconnect.rst94
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/m68k/Kconfig.machine2
-rw-r--r--arch/m68k/atari/Makefile2
-rw-r--r--arch/m68k/atari/nvram.c272
-rw-r--r--arch/m68k/include/asm/atarihw.h6
-rw-r--r--arch/m68k/include/asm/macintosh.h4
-rw-r--r--arch/m68k/kernel/setup_mm.c82
-rw-r--r--arch/m68k/mac/misc.c174
-rw-r--r--arch/parisc/include/asm/io.h9
-rw-r--r--arch/parisc/lib/iomap.c64
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/nvram.h9
-rw-r--r--arch/powerpc/kernel/nvram_64.c158
-rw-r--r--arch/powerpc/kernel/setup_32.c36
-rw-r--r--arch/powerpc/platforms/chrp/Makefile2
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c14
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c9
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powermac/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c556
-rw-r--r--drivers/android/binder_alloc.c303
-rw-r--r--drivers/android/binder_alloc.h47
-rw-r--r--drivers/android/binder_alloc_selftest.c7
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/base/component.c206
-rw-r--r--drivers/char/Kconfig19
-rw-r--r--drivers/char/Makefile6
-rw-r--r--drivers/char/applicom.c35
-rw-r--r--drivers/char/efirtc.c23
-rw-r--r--drivers/char/generic_nvram.c159
-rw-r--r--drivers/char/hpet.c9
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/mbcs.c1
-rw-r--r--drivers/char/nvram.c673
-rw-r--r--drivers/extcon/Kconfig8
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-ptn5150.c339
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/gnss/Kconfig13
-rw-r--r--drivers/gnss/Makefile3
-rw-r--r--drivers/gnss/core.c1
-rw-r--r--drivers/gnss/mtk.c152
-rw-r--r--drivers/gnss/sirf.c256
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.h16
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/hv/channel.c4
-rw-r--r--drivers/hv/channel_mgmt.c18
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/ring_buffer.c14
-rw-r--r--drivers/hv/vmbus_drv.c86
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c12
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c4
-rw-r--r--drivers/hwtracing/intel_th/core.c6
-rw-r--r--drivers/hwtracing/intel_th/gth.c4
-rw-r--r--drivers/hwtracing/intel_th/pti.c16
-rw-r--r--drivers/hwtracing/intel_th/sth.c4
-rw-r--r--drivers/hwtracing/stm/core.c11
-rw-r--r--drivers/interconnect/Kconfig15
-rw-r--r--drivers/interconnect/Makefile6
-rw-r--r--drivers/interconnect/core.c799
-rw-r--r--drivers/interconnect/qcom/Kconfig13
-rw-r--r--drivers/interconnect/qcom/Makefile5
-rw-r--r--drivers/interconnect/qcom/sdm845.c838
-rw-r--r--drivers/macintosh/via-cuda.c8
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/cardreader/rts5227.c64
-rw-r--r--drivers/misc/cardreader/rts5249.c32
-rw-r--r--drivers/misc/cardreader/rts5260.c136
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c40
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h5
-rw-r--r--drivers/misc/enclosure.c4
-rw-r--r--drivers/misc/fastrpc.c1401
-rw-r--r--drivers/misc/habanalabs/Kconfig25
-rw-r--r--drivers/misc/habanalabs/Makefile14
-rw-r--r--drivers/misc/habanalabs/asid.c57
-rw-r--r--drivers/misc/habanalabs/command_buffer.c445
-rw-r--r--drivers/misc/habanalabs/command_submission.c780
-rw-r--r--drivers/misc/habanalabs/context.c215
-rw-r--r--drivers/misc/habanalabs/debugfs.c1077
-rw-r--r--drivers/misc/habanalabs/device.c1140
-rw-r--r--drivers/misc/habanalabs/goya/Makefile3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c5391
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h211
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c254
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c2999
-rw-r--r--drivers/misc/habanalabs/habanalabs.h1464
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c461
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c234
-rw-r--r--drivers/misc/habanalabs/hw_queue.c635
-rw-r--r--drivers/misc/habanalabs/hwmon.c458
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h335
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h191
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h49
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h181
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h1372
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h275
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h118
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h653
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h373
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h1537
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h1153
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h143
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h53
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h243
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h447
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h745
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h143
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h117
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h55
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h1607
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h373
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h347
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h313
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h45
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h186
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h28
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h129
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h30
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h47
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h15
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h56
-rw-r--r--drivers/misc/habanalabs/irq.c327
-rw-r--r--drivers/misc/habanalabs/memory.c1723
-rw-r--r--drivers/misc/habanalabs/mmu.c906
-rw-r--r--drivers/misc/habanalabs/sysfs.c539
-rw-r--r--drivers/misc/hpilo.c14
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/lkdtm/core.c15
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c36
-rw-r--r--drivers/misc/mei/Kconfig10
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c16
-rw-r--r--drivers/misc/mei/bus.c22
-rw-r--r--drivers/misc/mei/hbm.c7
-rw-r--r--drivers/misc/mei/hdcp/Makefile7
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c849
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h377
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mic/Kconfig3
-rw-r--r--drivers/misc/mic/bus/scif_bus.h8
-rw-r--r--drivers/misc/mic/bus/vop_bus.h8
-rw-r--r--drivers/misc/mic/card/mic_device.c8
-rw-r--r--drivers/misc/mic/host/mic_boot.c8
-rw-r--r--drivers/misc/mic/scif/scif_map.h4
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/mic/vop/vop_main.c29
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c51
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/misc/vmw_balloon.c24
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c9
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c39
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c63
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h30
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c36
-rw-r--r--drivers/nvmem/Kconfig2
-rw-r--r--drivers/nvmem/bcm-ocotp.c37
-rw-r--r--drivers/nvmem/core.c42
-rw-r--r--drivers/nvmem/imx-ocotp.c13
-rw-r--r--drivers/nvmem/sc27xx-efuse.c12
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/platform/goldfish/Kconfig4
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/atari_scsi.c10
-rw-r--r--drivers/slimbus/core.c45
-rw-r--r--drivers/uio/uio.c16
-rw-r--r--drivers/uio/uio_pci_generic.c17
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/controlfb.c42
-rw-r--r--drivers/video/fbdev/imsttfb.c23
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c7
-rw-r--r--drivers/video/fbdev/platinumfb.c21
-rw-r--r--drivers/video/fbdev/valkyriefb.c30
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--include/asm-generic/iomap.h22
-rw-r--r--include/drm/drm_audio_component.h1
-rw-r--r--include/drm/drm_hdcp.h18
-rw-r--r--include/drm/i915_component.h5
-rw-r--r--include/drm/i915_drm.h15
-rw-r--r--include/drm/i915_mei_hdcp_interface.h149
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm845.h143
-rw-r--r--include/linux/component.h76
-rw-r--r--include/linux/gnss.h1
-rw-r--r--include/linux/hyperv.h144
-rw-r--r--include/linux/interconnect-provider.h142
-rw-r--r--include/linux/interconnect.h59
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h64
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h64
-rw-r--r--include/linux/mei_cl_bus.h2
-rw-r--r--include/linux/nvram.h133
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/vmw_vmci_defs.h7
-rw-r--r--include/sound/hda_component.h5
-rw-r--r--include/uapi/linux/android/binder.h19
-rw-r--r--include/uapi/linux/pmu.h2
-rw-r--r--include/uapi/misc/fastrpc.h41
-rw-r--r--include/uapi/misc/habanalabs.h450
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/iomap.c140
-rwxr-xr-xscripts/ver_linux6
-rw-r--r--sound/hda/hdac_component.c4
-rw-r--r--sound/hda/hdac_i915.c6
315 files changed, 60732 insertions, 1926 deletions
diff --git a/CREDITS b/CREDITS
index 0175098d4776..8e0342620a06 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1221,7 +1221,7 @@ S: Brazil
1221 1221
1222N: Oded Gabbay 1222N: Oded Gabbay
1223E: oded.gabbay@gmail.com 1223E: oded.gabbay@gmail.com
1224D: AMD KFD maintainer 1224D: HabanaLabs and AMD KFD maintainer
1225S: 12 Shraga Raphaeli 1225S: 12 Shraga Raphaeli
1226S: Petah-Tikva, 4906418 1226S: Petah-Tikva, 4906418
1227S: Israel 1227S: Israel
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 3fed8fdb873d..826689dcc2e6 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -146,3 +146,36 @@ KernelVersion: 4.16
146Contact: Stephen Hemminger <sthemmin@microsoft.com> 146Contact: Stephen Hemminger <sthemmin@microsoft.com>
147Description: Binary file created by uio_hv_generic for ring buffer 147Description: Binary file created by uio_hv_generic for ring buffer
148Users: Userspace drivers 148Users: Userspace drivers
149
150What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/intr_in_full
151Date: February 2019
152KernelVersion: 5.0
153Contact: Michael Kelley <mikelley@microsoft.com>
154Description: Number of guest to host interrupts caused by the inbound ring
155 buffer transitioning from full to not full while a packet is
156 waiting for buffer space to become available
157Users: Debugging tools
158
159What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/intr_out_empty
160Date: February 2019
161KernelVersion: 5.0
162Contact: Michael Kelley <mikelley@microsoft.com>
163Description: Number of guest to host interrupts caused by the outbound ring
164 buffer transitioning from empty to not empty
165Users: Debugging tools
166
167What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_full_first
168Date: February 2019
169KernelVersion: 5.0
170Contact: Michael Kelley <mikelley@microsoft.com>
171Description: Number of write operations that were the first to encounter an
172 outbound ring buffer full condition
173Users: Debugging tools
174
175What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_full_total
176Date: February 2019
177KernelVersion: 5.0
178Contact: Michael Kelley <mikelley@microsoft.com>
179Description: Total number of write operations that encountered an outbound
180 ring buffer full condition
181Users: Debugging tools
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
new file mode 100644
index 000000000000..2f5b80be07a3
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -0,0 +1,126 @@
1What: /sys/kernel/debug/habanalabs/hl<n>/addr
2Date: Jan 2019
3KernelVersion: 5.1
4Contact: oded.gabbay@gmail.com
5Description: Sets the device address to be used for read or write through
6 PCI bar. The acceptable value is a string that starts with "0x"
7
8What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
9Date: Jan 2019
10KernelVersion: 5.1
11Contact: oded.gabbay@gmail.com
12Description: Displays a list with information about the currently allocated
13 command buffers
14
15What: /sys/kernel/debug/habanalabs/hl<n>/command_submission
16Date: Jan 2019
17KernelVersion: 5.1
18Contact: oded.gabbay@gmail.com
19Description: Displays a list with information about the currently active
20 command submissions
21
22What: /sys/kernel/debug/habanalabs/hl<n>/command_submission_jobs
23Date: Jan 2019
24KernelVersion: 5.1
25Contact: oded.gabbay@gmail.com
26Description: Displays a list with detailed information about each JOB (CB) of
27 each active command submission
28
29What: /sys/kernel/debug/habanalabs/hl<n>/data32
30Date: Jan 2019
31KernelVersion: 5.1
32Contact: oded.gabbay@gmail.com
33Description: Allows the root user to read or write directly through the
34 device's PCI bar. Writing to this file generates a write
35 transaction while reading from the file generates a read
36 transcation. This custom interface is needed (instead of using
37 the generic Linux user-space PCI mapping) because the DDR bar
38 is very small compared to the DDR memory and only the driver can
39 move the bar before and after the transaction
40
41What: /sys/kernel/debug/habanalabs/hl<n>/device
42Date: Jan 2019
43KernelVersion: 5.1
44Contact: oded.gabbay@gmail.com
45Description: Enables the root user to set the device to specific state.
46 Valid values are "disable", "enable", "suspend", "resume".
47 User can read this property to see the valid values
48
49What: /sys/kernel/debug/habanalabs/hl<n>/i2c_addr
50Date: Jan 2019
51KernelVersion: 5.1
52Contact: oded.gabbay@gmail.com
53Description: Sets I2C device address for I2C transaction that is generated
54 by the device's CPU
55
56What: /sys/kernel/debug/habanalabs/hl<n>/i2c_bus
57Date: Jan 2019
58KernelVersion: 5.1
59Contact: oded.gabbay@gmail.com
60Description: Sets I2C bus address for I2C transaction that is generated by
61 the device's CPU
62
63What: /sys/kernel/debug/habanalabs/hl<n>/i2c_data
64Date: Jan 2019
65KernelVersion: 5.1
66Contact: oded.gabbay@gmail.com
67Description: Triggers an I2C transaction that is generated by the device's
68 CPU. Writing to this file generates a write transaction while
69 reading from the file generates a read transcation
70
71What: /sys/kernel/debug/habanalabs/hl<n>/i2c_reg
72Date: Jan 2019
73KernelVersion: 5.1
74Contact: oded.gabbay@gmail.com
75Description: Sets I2C register id for I2C transaction that is generated by
76 the device's CPU
77
78What: /sys/kernel/debug/habanalabs/hl<n>/led0
79Date: Jan 2019
80KernelVersion: 5.1
81Contact: oded.gabbay@gmail.com
82Description: Sets the state of the first S/W led on the device
83
84What: /sys/kernel/debug/habanalabs/hl<n>/led1
85Date: Jan 2019
86KernelVersion: 5.1
87Contact: oded.gabbay@gmail.com
88Description: Sets the state of the second S/W led on the device
89
90What: /sys/kernel/debug/habanalabs/hl<n>/led2
91Date: Jan 2019
92KernelVersion: 5.1
93Contact: oded.gabbay@gmail.com
94Description: Sets the state of the third S/W led on the device
95
96What: /sys/kernel/debug/habanalabs/hl<n>/mmu
97Date: Jan 2019
98KernelVersion: 5.1
99Contact: oded.gabbay@gmail.com
100Description: Displays the hop values and physical address for a given ASID
101 and virtual address. The user should write the ASID and VA into
102 the file and then read the file to get the result.
103 e.g. to display info about VA 0x1000 for ASID 1 you need to do:
104 echo "1 0x1000" > /sys/kernel/debug/habanalabs/hl0/mmu
105
106What: /sys/kernel/debug/habanalabs/hl<n>/set_power_state
107Date: Jan 2019
108KernelVersion: 5.1
109Contact: oded.gabbay@gmail.com
110Description: Sets the PCI power state. Valid values are "1" for D0 and "2"
111 for D3Hot
112
113What: /sys/kernel/debug/habanalabs/hl<n>/userptr
114Date: Jan 2019
115KernelVersion: 5.1
116Contact: oded.gabbay@gmail.com
117Description: Displays a list with information about the currently user
118 pointers (user virtual addresses) that are pinned and mapped
119 to DMA addresses
120
121What: /sys/kernel/debug/habanalabs/hl<n>/vm
122Date: Jan 2019
123KernelVersion: 5.1
124Contact: oded.gabbay@gmail.com
125Description: Displays a list with information about all the active virtual
126 address mappings per ASID
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
index 4d48a9451866..d1f667104944 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
@@ -3,11 +3,13 @@ Date: June 2015
3KernelVersion: 4.3 3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> 4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description: (RW) Writes of 1 or 0 enable or disable trace output to this 5Description: (RW) Writes of 1 or 0 enable or disable trace output to this
6 output device. Reads return current status. 6 output device. Reads return current status. Requires that the
7 correstponding output port driver be loaded.
7 8
8What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/port 9What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/port
9Date: June 2015 10Date: June 2015
10KernelVersion: 4.3 11KernelVersion: 4.3
11Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> 12Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
12Description: (RO) Port number, corresponding to this output device on the 13Description: (RO) Port number, corresponding to this output device on the
13 switch (GTH). 14 switch (GTH) or "unassigned" if the corresponding output
15 port driver is not loaded.
diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs
new file mode 100644
index 000000000000..78b2bcf316a3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-habanalabs
@@ -0,0 +1,190 @@
1What: /sys/class/habanalabs/hl<n>/armcp_kernel_ver
2Date: Jan 2019
3KernelVersion: 5.1
4Contact: oded.gabbay@gmail.com
5Description: Version of the Linux kernel running on the device's CPU
6
7What: /sys/class/habanalabs/hl<n>/armcp_ver
8Date: Jan 2019
9KernelVersion: 5.1
10Contact: oded.gabbay@gmail.com
11Description: Version of the application running on the device's CPU
12
13What: /sys/class/habanalabs/hl<n>/cpld_ver
14Date: Jan 2019
15KernelVersion: 5.1
16Contact: oded.gabbay@gmail.com
17Description: Version of the Device's CPLD F/W
18
19What: /sys/class/habanalabs/hl<n>/device_type
20Date: Jan 2019
21KernelVersion: 5.1
22Contact: oded.gabbay@gmail.com
23Description: Displays the code name of the device according to its type.
24 The supported values are: "GOYA"
25
26What: /sys/class/habanalabs/hl<n>/eeprom
27Date: Jan 2019
28KernelVersion: 5.1
29Contact: oded.gabbay@gmail.com
30Description: A binary file attribute that contains the contents of the
31 on-board EEPROM
32
33What: /sys/class/habanalabs/hl<n>/fuse_ver
34Date: Jan 2019
35KernelVersion: 5.1
36Contact: oded.gabbay@gmail.com
37Description: Displays the device's version from the eFuse
38
39What: /sys/class/habanalabs/hl<n>/hard_reset
40Date: Jan 2019
41KernelVersion: 5.1
42Contact: oded.gabbay@gmail.com
43Description: Interface to trigger a hard-reset operation for the device.
44 Hard-reset will reset ALL internal components of the device
45 except for the PCI interface and the internal PLLs
46
47What: /sys/class/habanalabs/hl<n>/hard_reset_cnt
48Date: Jan 2019
49KernelVersion: 5.1
50Contact: oded.gabbay@gmail.com
51Description: Displays how many times the device have undergone a hard-reset
52 operation since the driver was loaded
53
54What: /sys/class/habanalabs/hl<n>/high_pll
55Date: Jan 2019
56KernelVersion: 5.1
57Contact: oded.gabbay@gmail.com
58Description: Allows the user to set the maximum clock frequency for MME, TPC
59 and IC when the power management profile is set to "automatic".
60
61What: /sys/class/habanalabs/hl<n>/ic_clk
62Date: Jan 2019
63KernelVersion: 5.1
64Contact: oded.gabbay@gmail.com
65Description: Allows the user to set the maximum clock frequency of the
66 Interconnect fabric. Writes to this parameter affect the device
67 only when the power management profile is set to "manual" mode.
68 The device IC clock might be set to lower value then the
69 maximum. The user should read the ic_clk_curr to see the actual
70 frequency value of the IC
71
72What: /sys/class/habanalabs/hl<n>/ic_clk_curr
73Date: Jan 2019
74KernelVersion: 5.1
75Contact: oded.gabbay@gmail.com
76Description: Displays the current clock frequency of the Interconnect fabric
77
78What: /sys/class/habanalabs/hl<n>/infineon_ver
79Date: Jan 2019
80KernelVersion: 5.1
81Contact: oded.gabbay@gmail.com
82Description: Version of the Device's power supply F/W code
83
84What: /sys/class/habanalabs/hl<n>/max_power
85Date: Jan 2019
86KernelVersion: 5.1
87Contact: oded.gabbay@gmail.com
88Description: Allows the user to set the maximum power consumption of the
89 device in milliwatts.
90
91What: /sys/class/habanalabs/hl<n>/mme_clk
92Date: Jan 2019
93KernelVersion: 5.1
94Contact: oded.gabbay@gmail.com
95Description: Allows the user to set the maximum clock frequency of the
96 MME compute engine. Writes to this parameter affect the device
97 only when the power management profile is set to "manual" mode.
98 The device MME clock might be set to lower value then the
99 maximum. The user should read the mme_clk_curr to see the actual
100 frequency value of the MME
101
102What: /sys/class/habanalabs/hl<n>/mme_clk_curr
103Date: Jan 2019
104KernelVersion: 5.1
105Contact: oded.gabbay@gmail.com
106Description: Displays the current clock frequency of the MME compute engine
107
108What: /sys/class/habanalabs/hl<n>/pci_addr
109Date: Jan 2019
110KernelVersion: 5.1
111Contact: oded.gabbay@gmail.com
112Description: Displays the PCI address of the device. This is needed so the
113 user would be able to open a device based on its PCI address
114
115What: /sys/class/habanalabs/hl<n>/pm_mng_profile
116Date: Jan 2019
117KernelVersion: 5.1
118Contact: oded.gabbay@gmail.com
119Description: Power management profile. Values are "auto", "manual". In "auto"
120 mode, the driver will set the maximum clock frequency to a high
121 value when a user-space process opens the device's file (unless
122 it was already opened by another process). The driver will set
123 the max clock frequency to a low value when there are no user
124 processes that are opened on the device's file. In "manual"
125 mode, the user sets the maximum clock frequency by writing to
126 ic_clk, mme_clk and tpc_clk
127
128
129What: /sys/class/habanalabs/hl<n>/preboot_btl_ver
130Date: Jan 2019
131KernelVersion: 5.1
132Contact: oded.gabbay@gmail.com
133Description: Version of the device's preboot F/W code
134
135What: /sys/class/habanalabs/hl<n>/soft_reset
136Date: Jan 2019
137KernelVersion: 5.1
138Contact: oded.gabbay@gmail.com
139Description: Interface to trigger a soft-reset operation for the device.
140 Soft-reset will reset only the compute and DMA engines of the
141 device
142
143What: /sys/class/habanalabs/hl<n>/soft_reset_cnt
144Date: Jan 2019
145KernelVersion: 5.1
146Contact: oded.gabbay@gmail.com
147Description: Displays how many times the device have undergone a soft-reset
148 operation since the driver was loaded
149
150What: /sys/class/habanalabs/hl<n>/status
151Date: Jan 2019
152KernelVersion: 5.1
153Contact: oded.gabbay@gmail.com
154Description: Status of the card: "Operational", "Malfunction", "In reset".
155
156What: /sys/class/habanalabs/hl<n>/thermal_ver
157Date: Jan 2019
158KernelVersion: 5.1
159Contact: oded.gabbay@gmail.com
160Description: Version of the Device's thermal daemon
161
162What: /sys/class/habanalabs/hl<n>/tpc_clk
163Date: Jan 2019
164KernelVersion: 5.1
165Contact: oded.gabbay@gmail.com
166Description: Allows the user to set the maximum clock frequency of the
167 TPC compute engines. Writes to this parameter affect the device
168 only when the power management profile is set to "manual" mode.
169 The device TPC clock might be set to lower value then the
170 maximum. The user should read the tpc_clk_curr to see the actual
171 frequency value of the TPC
172
173What: /sys/class/habanalabs/hl<n>/tpc_clk_curr
174Date: Jan 2019
175KernelVersion: 5.1
176Contact: oded.gabbay@gmail.com
177Description: Displays the current clock frequency of the TPC compute engines
178
179What: /sys/class/habanalabs/hl<n>/uboot_ver
180Date: Jan 2019
181KernelVersion: 5.1
182Contact: oded.gabbay@gmail.com
183Description: Version of the u-boot running on the device's CPU
184
185What: /sys/class/habanalabs/hl<n>/write_open_cnt
186Date: Jan 2019
187KernelVersion: 5.1
188Contact: oded.gabbay@gmail.com
189Description: Displays the total number of user processes that are currently
190 opened on the device's file
diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt
new file mode 100644
index 000000000000..936fbdf12815
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt
@@ -0,0 +1,27 @@
1* PTN5150 CC (Configuration Channel) Logic device
2
3PTN5150 is a small thin low power CC logic chip supporting the USB Type-C
4connector application with CC control logic detection and indication functions.
5It is interfaced to the host controller using an I2C interface.
6
7Required properties:
8- compatible: should be "nxp,ptn5150"
9- reg: specifies the I2C slave address of the device
10- int-gpio: should contain a phandle and GPIO specifier for the GPIO pin
11 connected to the PTN5150's INTB pin.
12- vbus-gpio: should contain a phandle and GPIO specifier for the GPIO pin which
13 is used to control VBUS.
14- pinctrl-names : a pinctrl state named "default" must be defined.
15- pinctrl-0 : phandle referencing pin configuration of interrupt and vbus
16 control.
17
18Example:
19 ptn5150@1d {
20 compatible = "nxp,ptn5150";
21 reg = <0x1d>;
22 int-gpio = <&msmgpio 78 GPIO_ACTIVE_HIGH>;
23 vbus-gpio = <&msmgpio 148 GPIO_ACTIVE_HIGH>;
24 pinctrl-names = "default";
25 pinctrl-0 = <&ptn5150_default>;
26 status = "okay";
27 };
diff --git a/Documentation/devicetree/bindings/gnss/gnss.txt b/Documentation/devicetree/bindings/gnss/gnss.txt
index f1e4a2ff47c5..f547bd4549fe 100644
--- a/Documentation/devicetree/bindings/gnss/gnss.txt
+++ b/Documentation/devicetree/bindings/gnss/gnss.txt
@@ -17,6 +17,7 @@ Required properties:
17 represents 17 represents
18 18
19Optional properties: 19Optional properties:
20- lna-supply : Separate supply for an LNA
20- enable-gpios : GPIO used to enable the device 21- enable-gpios : GPIO used to enable the device
21- timepulse-gpios : Time pulse GPIO 22- timepulse-gpios : Time pulse GPIO
22 23
diff --git a/Documentation/devicetree/bindings/gnss/mediatek.txt b/Documentation/devicetree/bindings/gnss/mediatek.txt
new file mode 100644
index 000000000000..80cb802813c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gnss/mediatek.txt
@@ -0,0 +1,35 @@
1Mediatek-based GNSS Receiver DT binding
2
3Mediatek chipsets are used in GNSS-receiver modules produced by several
4vendors and can use a UART interface.
5
6Please see Documentation/devicetree/bindings/gnss/gnss.txt for generic
7properties.
8
9Required properties:
10
11- compatible : Must be
12
13 "globaltop,pa6h"
14
15- vcc-supply : Main voltage regulator (pin name: VCC)
16
17Optional properties:
18
19- current-speed : Default UART baud rate
20- gnss-fix-gpios : GPIO used to determine device position fix state
21 (pin name: FIX, 3D_FIX)
22- reset-gpios : GPIO used to reset the device (pin name: RESET, NRESET)
23- timepulse-gpios : Time pulse GPIO (pin name: PPS1, 1PPS)
24- vbackup-supply : Backup voltage regulator (pin name: VBAT, VBACKUP)
25
26Example:
27
28serial@1234 {
29 compatible = "ns16550a";
30
31 gnss {
32 compatible = "globaltop,pa6h";
33 vcc-supply = <&vcc_3v3>;
34 };
35};
diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.txt b/Documentation/devicetree/bindings/gnss/sirfstar.txt
index 648d183cdb77..f4252b6b660b 100644
--- a/Documentation/devicetree/bindings/gnss/sirfstar.txt
+++ b/Documentation/devicetree/bindings/gnss/sirfstar.txt
@@ -12,6 +12,7 @@ Required properties:
12 12
13 "fastrax,uc430" 13 "fastrax,uc430"
14 "linx,r4" 14 "linx,r4"
15 "wi2wi,w2sg0004"
15 "wi2wi,w2sg0008i" 16 "wi2wi,w2sg0008i"
16 "wi2wi,w2sg0084i" 17 "wi2wi,w2sg0084i"
17 18
diff --git a/Documentation/devicetree/bindings/interconnect/interconnect.txt b/Documentation/devicetree/bindings/interconnect/interconnect.txt
new file mode 100644
index 000000000000..5a3c575b387a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/interconnect.txt
@@ -0,0 +1,60 @@
1Interconnect Provider Device Tree Bindings
2=========================================
3
4The purpose of this document is to define a common set of generic interconnect
5providers/consumers properties.
6
7
8= interconnect providers =
9
10The interconnect provider binding is intended to represent the interconnect
11controllers in the system. Each provider registers a set of interconnect
12nodes, which expose the interconnect related capabilities of the interconnect
13to consumer drivers. These capabilities can be throughput, latency, priority
14etc. The consumer drivers set constraints on interconnect path (or endpoints)
15depending on the use case. Interconnect providers can also be interconnect
16consumers, such as in the case where two network-on-chip fabrics interface
17directly.
18
19Required properties:
20- compatible : contains the interconnect provider compatible string
21- #interconnect-cells : number of cells in a interconnect specifier needed to
22 encode the interconnect node id
23
24Example:
25
26 snoc: interconnect@580000 {
27 compatible = "qcom,msm8916-snoc";
28 #interconnect-cells = <1>;
29 reg = <0x580000 0x14000>;
30 clock-names = "bus_clk", "bus_a_clk";
31 clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
32 <&rpmcc RPM_SMD_SNOC_A_CLK>;
33 };
34
35
36= interconnect consumers =
37
38The interconnect consumers are device nodes which dynamically express their
39bandwidth requirements along interconnect paths they are connected to. There
40can be multiple interconnect providers on a SoC and the consumer may consume
41multiple paths from different providers depending on use case and the
42components it has to interact with.
43
44Required properties:
45interconnects : Pairs of phandles and interconnect provider specifier to denote
46 the edge source and destination ports of the interconnect path.
47
48Optional properties:
49interconnect-names : List of interconnect path name strings sorted in the same
50 order as the interconnects property. Consumers drivers will use
51 interconnect-names to match interconnect paths with interconnect
52 specifier pairs.
53
54Example:
55
56 sdhci@7864000 {
57 ...
58 interconnects = <&pnoc MASTER_SDCC_1 &bimc SLAVE_EBI_CH0>;
59 interconnect-names = "sdhc-mem";
60 };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
new file mode 100644
index 000000000000..5c4f1d911630
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
@@ -0,0 +1,24 @@
1Qualcomm SDM845 Network-On-Chip interconnect driver binding
2-----------------------------------------------------------
3
4SDM845 interconnect providers support system bandwidth requirements through
5RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
6able to communicate with the BCM through the Resource State Coordinator (RSC)
7associated with each execution environment. Provider nodes must reside within
8an RPMh device node pertaining to their RSC and each provider maps to a single
9RPMh resource.
10
11Required properties :
12- compatible : shall contain only one of the following:
13 "qcom,sdm845-rsc-hlos"
14- #interconnect-cells : should contain 1
15
16Examples:
17
18apps_rsc: rsc {
19 rsc_hlos: interconnect {
20 compatible = "qcom,sdm845-rsc-hlos";
21 #interconnect-cells = <1>;
22 };
23};
24
diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
new file mode 100644
index 000000000000..2a1827ab50d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
@@ -0,0 +1,78 @@
1Qualcomm Technologies, Inc. FastRPC Driver
2
3The FastRPC implements an IPC (Inter-Processor Communication)
4mechanism that allows for clients to transparently make remote method
5invocations across DSP and APPS boundaries. This enables developers
6to offload tasks to the DSP and free up the application processor for
7other tasks.
8
9- compatible:
10 Usage: required
11 Value type: <stringlist>
12 Definition: must be "qcom,fastrpc"
13
14- label
15 Usage: required
16 Value type: <string>
17 Definition: should specify the dsp domain name this fastrpc
18 corresponds to. must be one of this: "adsp", "mdsp", "sdsp", "cdsp"
19
20- #address-cells
21 Usage: required
22 Value type: <u32>
23 Definition: Must be 1
24
25- #size-cells
26 Usage: required
27 Value type: <u32>
28 Definition: Must be 0
29
30= COMPUTE BANKS
31Each subnode of the Fastrpc represents compute context banks available
32on the dsp.
33- All Compute context banks MUST contain the following properties:
34
35- compatible:
36 Usage: required
37 Value type: <stringlist>
38 Definition: must be "qcom,fastrpc-compute-cb"
39
40- reg
41 Usage: required
42 Value type: <u32>
43 Definition: Context Bank ID.
44
45- qcom,nsessions:
46 Usage: Optional
47 Value type: <u32>
48 Defination: A value indicating how many sessions can share this
49 context bank. Defaults to 1 when this property
50 is not specified.
51
52Example:
53
54adsp-pil {
55 compatible = "qcom,msm8996-adsp-pil";
56 ...
57 smd-edge {
58 label = "lpass";
59 fastrpc {
60 compatible = "qcom,fastrpc";
61 qcom,smd-channels = "fastrpcsmd-apps-dsp";
62 label = "adsp";
63 #address-cells = <1>;
64 #size-cells = <0>;
65
66 cb@1 {
67 compatible = "qcom,fastrpc-compute-cb";
68 reg = <1>;
69 };
70
71 cb@2 {
72 compatible = "qcom,fastrpc-compute-cb";
73 reg = <2>;
74 };
75 ...
76 };
77 };
78};
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 792bc5fafeb9..7a999a135e56 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,7 +1,7 @@
1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings 1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
2 2
3This binding represents the on-chip eFuse OTP controller found on 3This binding represents the on-chip eFuse OTP controller found on
4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL and i.MX6SLL SoCs. 4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ and i.MX6SLL SoCs.
5 5
6Required properties: 6Required properties:
7- compatible: should be one of 7- compatible: should be one of
@@ -9,8 +9,10 @@ Required properties:
9 "fsl,imx6sl-ocotp" (i.MX6SL), or 9 "fsl,imx6sl-ocotp" (i.MX6SL), or
10 "fsl,imx6sx-ocotp" (i.MX6SX), 10 "fsl,imx6sx-ocotp" (i.MX6SX),
11 "fsl,imx6ul-ocotp" (i.MX6UL), 11 "fsl,imx6ul-ocotp" (i.MX6UL),
12 "fsl,imx6ull-ocotp" (i.MX6ULL/ULZ),
12 "fsl,imx7d-ocotp" (i.MX7D/S), 13 "fsl,imx7d-ocotp" (i.MX7D/S),
13 "fsl,imx6sll-ocotp" (i.MX6SLL), 14 "fsl,imx6sll-ocotp" (i.MX6SLL),
15 "fsl,imx7ulp-ocotp" (i.MX7ULP),
14 followed by "syscon". 16 followed by "syscon".
15- #address-cells : Should be 1 17- #address-cells : Should be 1
16- #size-cells : Should be 1 18- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 8f574c778290..542bbf304f13 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -154,6 +154,7 @@ geniatech Geniatech, Inc.
154giantec Giantec Semiconductor, Inc. 154giantec Giantec Semiconductor, Inc.
155giantplus Giantplus Technology Co., Ltd. 155giantplus Giantplus Technology Co., Ltd.
156globalscale Globalscale Technologies, Inc. 156globalscale Globalscale Technologies, Inc.
157globaltop GlobalTop Technology, Inc.
157gmt Global Mixed-mode Technology, Inc. 158gmt Global Mixed-mode Technology, Inc.
158goodix Shenzhen Huiding Technology Co., Ltd. 159goodix Shenzhen Huiding Technology Co., Ltd.
159google Google, Inc. 160google Google, Inc.
diff --git a/Documentation/driver-api/component.rst b/Documentation/driver-api/component.rst
new file mode 100644
index 000000000000..2da4a8f20607
--- /dev/null
+++ b/Documentation/driver-api/component.rst
@@ -0,0 +1,17 @@
1======================================
2Component Helper for Aggregate Drivers
3======================================
4
5.. kernel-doc:: drivers/base/component.c
6 :doc: overview
7
8
9API
10===
11
12.. kernel-doc:: include/linux/component.h
13 :internal:
14
15.. kernel-doc:: drivers/base/component.c
16 :export:
17
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
index d6763272e747..2d5919b2b337 100644
--- a/Documentation/driver-api/device_link.rst
+++ b/Documentation/driver-api/device_link.rst
@@ -1,6 +1,9 @@
1.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>` 1.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
2.. |struct generic_pm_domain| replace:: :c:type:`struct generic_pm_domain <generic_pm_domain>` 2.. |struct generic_pm_domain| replace:: :c:type:`struct generic_pm_domain <generic_pm_domain>`
3 3
4
5.. _device_link:
6
4============ 7============
5Device links 8Device links
6============ 9============
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index ab38ced66a44..c0b600ed9961 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -22,6 +22,7 @@ available subsections can be seen below.
22 device_connection 22 device_connection
23 dma-buf 23 dma-buf
24 device_link 24 device_link
25 component
25 message-based 26 message-based
26 sound 27 sound
27 frame-buffer 28 frame-buffer
diff --git a/Documentation/interconnect/interconnect.rst b/Documentation/interconnect/interconnect.rst
new file mode 100644
index 000000000000..b8107dcc4cd3
--- /dev/null
+++ b/Documentation/interconnect/interconnect.rst
@@ -0,0 +1,94 @@
1.. SPDX-License-Identifier: GPL-2.0
2
3=====================================
4GENERIC SYSTEM INTERCONNECT SUBSYSTEM
5=====================================
6
7Introduction
8------------
9
10This framework is designed to provide a standard kernel interface to control
11the settings of the interconnects on an SoC. These settings can be throughput,
12latency and priority between multiple interconnected devices or functional
13blocks. This can be controlled dynamically in order to save power or provide
14maximum performance.
15
16The interconnect bus is hardware with configurable parameters, which can be
17set on a data path according to the requests received from various drivers.
18An example of interconnect buses are the interconnects between various
19components or functional blocks in chipsets. There can be multiple interconnects
20on an SoC that can be multi-tiered.
21
22Below is a simplified diagram of a real-world SoC interconnect bus topology.
23
24::
25
26 +----------------+ +----------------+
27 | HW Accelerator |--->| M NoC |<---------------+
28 +----------------+ +----------------+ |
29 | | +------------+
30 +-----+ +-------------+ V +------+ | |
31 | DDR | | +--------+ | PCIe | | |
32 +-----+ | | Slaves | +------+ | |
33 ^ ^ | +--------+ | | C NoC |
34 | | V V | |
35 +------------------+ +------------------------+ | | +-----+
36 | |-->| |-->| |-->| CPU |
37 | |-->| |<--| | +-----+
38 | Mem NoC | | S NoC | +------------+
39 | |<--| |---------+ |
40 | |<--| |<------+ | | +--------+
41 +------------------+ +------------------------+ | | +-->| Slaves |
42 ^ ^ ^ ^ ^ | | +--------+
43 | | | | | | V
44 +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
45 | CPUs | | | GPU | | DSP | | Masters |-->| P NoC |-->| Slaves |
46 +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
47 |
48 +-------+
49 | Modem |
50 +-------+
51
52Terminology
53-----------
54
55Interconnect provider is the software definition of the interconnect hardware.
56The interconnect providers on the above diagram are M NoC, S NoC, C NoC, P NoC
57and Mem NoC.
58
59Interconnect node is the software definition of the interconnect hardware
60port. Each interconnect provider consists of multiple interconnect nodes,
61which are connected to other SoC components including other interconnect
62providers. The point on the diagram where the CPUs connect to the memory is
63called an interconnect node, which belongs to the Mem NoC interconnect provider.
64
65Interconnect endpoints are the first or the last element of the path. Every
66endpoint is a node, but not every node is an endpoint.
67
68Interconnect path is everything between two endpoints including all the nodes
69that have to be traversed to reach from a source to destination node. It may
70include multiple master-slave pairs across several interconnect providers.
71
72Interconnect consumers are the entities which make use of the data paths exposed
73by the providers. The consumers send requests to providers requesting various
74throughput, latency and priority. Usually the consumers are device drivers, that
75send request based on their needs. An example for a consumer is a video decoder
76that supports various formats and image sizes.
77
78Interconnect providers
79----------------------
80
81Interconnect provider is an entity that implements methods to initialize and
82configure interconnect bus hardware. The interconnect provider drivers should
83be registered with the interconnect provider core.
84
85.. kernel-doc:: include/linux/interconnect-provider.h
86
87Interconnect consumers
88----------------------
89
90Interconnect consumers are the clients which use the interconnect APIs to
91get paths between endpoints and set their bandwidth/latency/QoS requirements
92for these interconnect paths.
93
94.. kernel-doc:: include/linux/interconnect.h
diff --git a/MAINTAINERS b/MAINTAINERS
index d1559363898f..108f3b1b7a79 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6699,6 +6699,15 @@ F: drivers/clocksource/h8300_*.c
6699F: drivers/clk/h8300/ 6699F: drivers/clk/h8300/
6700F: drivers/irqchip/irq-renesas-h8*.c 6700F: drivers/irqchip/irq-renesas-h8*.c
6701 6701
6702HABANALABS PCI DRIVER
6703M: Oded Gabbay <oded.gabbay@gmail.com>
6704T: git https://github.com/HabanaAI/linux.git
6705S: Supported
6706F: drivers/misc/habanalabs/
6707F: include/uapi/misc/habanalabs.h
6708F: Documentation/ABI/testing/sysfs-driver-habanalabs
6709F: Documentation/ABI/testing/debugfs-driver-habanalabs
6710
6702HACKRF MEDIA DRIVER 6711HACKRF MEDIA DRIVER
6703M: Antti Palosaari <crope@iki.fi> 6712M: Antti Palosaari <crope@iki.fi>
6704L: linux-media@vger.kernel.org 6713L: linux-media@vger.kernel.org
@@ -7056,7 +7065,7 @@ M: Haiyang Zhang <haiyangz@microsoft.com>
7056M: Stephen Hemminger <sthemmin@microsoft.com> 7065M: Stephen Hemminger <sthemmin@microsoft.com>
7057M: Sasha Levin <sashal@kernel.org> 7066M: Sasha Levin <sashal@kernel.org>
7058T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git 7067T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
7059L: devel@linuxdriverproject.org 7068L: linux-hyperv@vger.kernel.org
7060S: Supported 7069S: Supported
7061F: Documentation/networking/device_drivers/microsoft/netvsc.txt 7070F: Documentation/networking/device_drivers/microsoft/netvsc.txt
7062F: arch/x86/include/asm/mshyperv.h 7071F: arch/x86/include/asm/mshyperv.h
@@ -7941,6 +7950,16 @@ L: linux-gpio@vger.kernel.org
7941S: Maintained 7950S: Maintained
7942F: drivers/gpio/gpio-intel-mid.c 7951F: drivers/gpio/gpio-intel-mid.c
7943 7952
7953INTERCONNECT API
7954M: Georgi Djakov <georgi.djakov@linaro.org>
7955S: Maintained
7956F: Documentation/interconnect/
7957F: Documentation/devicetree/bindings/interconnect/
7958F: drivers/interconnect/
7959F: include/dt-bindings/interconnect/
7960F: include/linux/interconnect-provider.h
7961F: include/linux/interconnect.h
7962
7944INVENSENSE MPU-3050 GYROSCOPE DRIVER 7963INVENSENSE MPU-3050 GYROSCOPE DRIVER
7945M: Linus Walleij <linus.walleij@linaro.org> 7964M: Linus Walleij <linus.walleij@linaro.org>
7946L: linux-iio@vger.kernel.org 7965L: linux-iio@vger.kernel.org
diff --git a/arch/Kconfig b/arch/Kconfig
index 3aff508ffd86..33687dddd86a 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -711,6 +711,9 @@ config HAVE_ARCH_HASH
711 file which provides platform-specific implementations of some 711 file which provides platform-specific implementations of some
712 functions in <linux/hash.h> or fs/namei.c. 712 functions in <linux/hash.h> or fs/namei.c.
713 713
714config HAVE_ARCH_NVRAM_OPS
715 bool
716
714config ISA_BUS_API 717config ISA_BUS_API
715 def_bool ISA 718 def_bool ISA
716 719
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 328ba83d735b..c01e103492fd 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -16,6 +16,7 @@ config ATARI
16 bool "Atari support" 16 bool "Atari support"
17 depends on MMU 17 depends on MMU
18 select MMU_MOTOROLA if MMU 18 select MMU_MOTOROLA if MMU
19 select HAVE_ARCH_NVRAM_OPS
19 help 20 help
20 This option enables support for the 68000-based Atari series of 21 This option enables support for the 68000-based Atari series of
21 computers (including the TT, Falcon and Medusa). If you plan to use 22 computers (including the TT, Falcon and Medusa). If you plan to use
@@ -26,6 +27,7 @@ config MAC
26 bool "Macintosh support" 27 bool "Macintosh support"
27 depends on MMU 28 depends on MMU
28 select MMU_MOTOROLA if MMU 29 select MMU_MOTOROLA if MMU
30 select HAVE_ARCH_NVRAM_OPS
29 help 31 help
30 This option enables support for the Apple Macintosh series of 32 This option enables support for the Apple Macintosh series of
31 computers (yes, there is experimental support now, at least for part 33 computers (yes, there is experimental support now, at least for part
diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile
index 0cac723306f9..0b86bb6cfa87 100644
--- a/arch/m68k/atari/Makefile
+++ b/arch/m68k/atari/Makefile
@@ -6,3 +6,5 @@ obj-y := config.o time.o debug.o ataints.o stdma.o \
6 atasound.o stram.o 6 atasound.o stram.o
7 7
8obj-$(CONFIG_ATARI_KBD_CORE) += atakeyb.o 8obj-$(CONFIG_ATARI_KBD_CORE) += atakeyb.o
9
10obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/m68k/atari/nvram.c b/arch/m68k/atari/nvram.c
new file mode 100644
index 000000000000..7000d2443aa3
--- /dev/null
+++ b/arch/m68k/atari/nvram.c
@@ -0,0 +1,272 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * CMOS/NV-RAM driver for Atari. Adapted from drivers/char/nvram.c.
4 * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
5 * idea by and with help from Richard Jelinek <rj@suse.de>
6 * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
7 * Further contributions from Cesar Barros, Erik Gilling, Tim Hockin and
8 * Wim Van Sebroeck.
9 */
10
11#include <linux/errno.h>
12#include <linux/init.h>
13#include <linux/mc146818rtc.h>
14#include <linux/module.h>
15#include <linux/nvram.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/spinlock.h>
19#include <linux/types.h>
20#include <asm/atarihw.h>
21#include <asm/atariints.h>
22
23#define NVRAM_BYTES 50
24
25/* It is worth noting that these functions all access bytes of general
26 * purpose memory in the NVRAM - that is to say, they all add the
27 * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not
28 * know about the RTC cruft.
29 */
30
31/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
32 * rtc_lock held. Due to the index-port/data-port design of the RTC, we
33 * don't want two different things trying to get to it at once. (e.g. the
34 * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
35 */
36
37static unsigned char __nvram_read_byte(int i)
38{
39 return CMOS_READ(NVRAM_FIRST_BYTE + i);
40}
41
42/* This races nicely with trying to read with checksum checking */
43static void __nvram_write_byte(unsigned char c, int i)
44{
45 CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
46}
47
48/* On Ataris, the checksum is over all bytes except the checksum bytes
49 * themselves; these are at the very end.
50 */
51#define ATARI_CKS_RANGE_START 0
52#define ATARI_CKS_RANGE_END 47
53#define ATARI_CKS_LOC 48
54
55static int __nvram_check_checksum(void)
56{
57 int i;
58 unsigned char sum = 0;
59
60 for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
61 sum += __nvram_read_byte(i);
62 return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
63 (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
64}
65
66static void __nvram_set_checksum(void)
67{
68 int i;
69 unsigned char sum = 0;
70
71 for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
72 sum += __nvram_read_byte(i);
73 __nvram_write_byte(~sum, ATARI_CKS_LOC);
74 __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
75}
76
77long atari_nvram_set_checksum(void)
78{
79 spin_lock_irq(&rtc_lock);
80 __nvram_set_checksum();
81 spin_unlock_irq(&rtc_lock);
82 return 0;
83}
84
85long atari_nvram_initialize(void)
86{
87 loff_t i;
88
89 spin_lock_irq(&rtc_lock);
90 for (i = 0; i < NVRAM_BYTES; ++i)
91 __nvram_write_byte(0, i);
92 __nvram_set_checksum();
93 spin_unlock_irq(&rtc_lock);
94 return 0;
95}
96
97ssize_t atari_nvram_read(char *buf, size_t count, loff_t *ppos)
98{
99 char *p = buf;
100 loff_t i;
101
102 spin_lock_irq(&rtc_lock);
103 if (!__nvram_check_checksum()) {
104 spin_unlock_irq(&rtc_lock);
105 return -EIO;
106 }
107 for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
108 *p = __nvram_read_byte(i);
109 spin_unlock_irq(&rtc_lock);
110
111 *ppos = i;
112 return p - buf;
113}
114
115ssize_t atari_nvram_write(char *buf, size_t count, loff_t *ppos)
116{
117 char *p = buf;
118 loff_t i;
119
120 spin_lock_irq(&rtc_lock);
121 if (!__nvram_check_checksum()) {
122 spin_unlock_irq(&rtc_lock);
123 return -EIO;
124 }
125 for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
126 __nvram_write_byte(*p, i);
127 __nvram_set_checksum();
128 spin_unlock_irq(&rtc_lock);
129
130 *ppos = i;
131 return p - buf;
132}
133
134ssize_t atari_nvram_get_size(void)
135{
136 return NVRAM_BYTES;
137}
138
139#ifdef CONFIG_PROC_FS
140static struct {
141 unsigned char val;
142 const char *name;
143} boot_prefs[] = {
144 { 0x80, "TOS" },
145 { 0x40, "ASV" },
146 { 0x20, "NetBSD (?)" },
147 { 0x10, "Linux" },
148 { 0x00, "unspecified" },
149};
150
151static const char * const languages[] = {
152 "English (US)",
153 "German",
154 "French",
155 "English (UK)",
156 "Spanish",
157 "Italian",
158 "6 (undefined)",
159 "Swiss (French)",
160 "Swiss (German)",
161};
162
163static const char * const dateformat[] = {
164 "MM%cDD%cYY",
165 "DD%cMM%cYY",
166 "YY%cMM%cDD",
167 "YY%cDD%cMM",
168 "4 (undefined)",
169 "5 (undefined)",
170 "6 (undefined)",
171 "7 (undefined)",
172};
173
174static const char * const colors[] = {
175 "2", "4", "16", "256", "65536", "??", "??", "??"
176};
177
178static void atari_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
179 void *offset)
180{
181 int checksum;
182 int i;
183 unsigned int vmode;
184
185 spin_lock_irq(&rtc_lock);
186 checksum = __nvram_check_checksum();
187 spin_unlock_irq(&rtc_lock);
188
189 seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
190
191 seq_puts(seq, "Boot preference : ");
192 for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i)
193 if (nvram[1] == boot_prefs[i].val) {
194 seq_printf(seq, "%s\n", boot_prefs[i].name);
195 break;
196 }
197 if (i < 0)
198 seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
199
200 seq_printf(seq, "SCSI arbitration : %s\n",
201 (nvram[16] & 0x80) ? "on" : "off");
202 seq_puts(seq, "SCSI host ID : ");
203 if (nvram[16] & 0x80)
204 seq_printf(seq, "%d\n", nvram[16] & 7);
205 else
206 seq_puts(seq, "n/a\n");
207
208 if (!MACH_IS_FALCON)
209 return;
210
211 seq_puts(seq, "OS language : ");
212 if (nvram[6] < ARRAY_SIZE(languages))
213 seq_printf(seq, "%s\n", languages[nvram[6]]);
214 else
215 seq_printf(seq, "%u (undefined)\n", nvram[6]);
216 seq_puts(seq, "Keyboard language: ");
217 if (nvram[7] < ARRAY_SIZE(languages))
218 seq_printf(seq, "%s\n", languages[nvram[7]]);
219 else
220 seq_printf(seq, "%u (undefined)\n", nvram[7]);
221 seq_puts(seq, "Date format : ");
222 seq_printf(seq, dateformat[nvram[8] & 7],
223 nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
224 seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
225 seq_puts(seq, "Boot delay : ");
226 if (nvram[10] == 0)
227 seq_puts(seq, "default\n");
228 else
229 seq_printf(seq, "%ds%s\n", nvram[10],
230 nvram[10] < 8 ? ", no memory test" : "");
231
232 vmode = (nvram[14] << 8) | nvram[15];
233 seq_printf(seq,
234 "Video mode : %s colors, %d columns, %s %s monitor\n",
235 colors[vmode & 7], vmode & 8 ? 80 : 40,
236 vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
237 seq_printf(seq,
238 " %soverscan, compat. mode %s%s\n",
239 vmode & 64 ? "" : "no ", vmode & 128 ? "on" : "off",
240 vmode & 256 ?
241 (vmode & 16 ? ", line doubling" : ", half screen") : "");
242}
243
244static int nvram_proc_read(struct seq_file *seq, void *offset)
245{
246 unsigned char contents[NVRAM_BYTES];
247 int i;
248
249 spin_lock_irq(&rtc_lock);
250 for (i = 0; i < NVRAM_BYTES; ++i)
251 contents[i] = __nvram_read_byte(i);
252 spin_unlock_irq(&rtc_lock);
253
254 atari_nvram_proc_read(contents, seq, offset);
255
256 return 0;
257}
258
259static int __init atari_nvram_init(void)
260{
261 if (!(MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK)))
262 return -ENODEV;
263
264 if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
265 pr_err("nvram: can't create /proc/driver/nvram\n");
266 return -ENOMEM;
267 }
268
269 return 0;
270}
271device_initcall(atari_nvram_init);
272#endif /* CONFIG_PROC_FS */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 9000b249d225..533008262b69 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -33,6 +33,12 @@ extern int atari_dont_touch_floppy_select;
33 33
34extern int atari_SCC_reset_done; 34extern int atari_SCC_reset_done;
35 35
36extern ssize_t atari_nvram_read(char *, size_t, loff_t *);
37extern ssize_t atari_nvram_write(char *, size_t, loff_t *);
38extern ssize_t atari_nvram_get_size(void);
39extern long atari_nvram_set_checksum(void);
40extern long atari_nvram_initialize(void);
41
36/* convenience macros for testing machine type */ 42/* convenience macros for testing machine type */
37#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST) 43#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST)
38#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \ 44#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 08cee11180e6..d9a08bed4b12 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -19,6 +19,10 @@ extern void mac_init_IRQ(void);
19extern void mac_irq_enable(struct irq_data *data); 19extern void mac_irq_enable(struct irq_data *data);
20extern void mac_irq_disable(struct irq_data *data); 20extern void mac_irq_disable(struct irq_data *data);
21 21
22extern unsigned char mac_pram_read_byte(int);
23extern void mac_pram_write_byte(unsigned char, int);
24extern ssize_t mac_pram_get_size(void);
25
22/* 26/*
23 * Macintosh Table 27 * Macintosh Table
24 */ 28 */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index ad0195cbe042..528484feff80 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -24,6 +24,7 @@
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/nvram.h>
27#include <linux/initrd.h> 28#include <linux/initrd.h>
28 29
29#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
@@ -37,13 +38,14 @@
37#ifdef CONFIG_AMIGA 38#ifdef CONFIG_AMIGA
38#include <asm/amigahw.h> 39#include <asm/amigahw.h>
39#endif 40#endif
40#ifdef CONFIG_ATARI
41#include <asm/atarihw.h> 41#include <asm/atarihw.h>
42#ifdef CONFIG_ATARI
42#include <asm/atari_stram.h> 43#include <asm/atari_stram.h>
43#endif 44#endif
44#ifdef CONFIG_SUN3X 45#ifdef CONFIG_SUN3X
45#include <asm/dvma.h> 46#include <asm/dvma.h>
46#endif 47#endif
48#include <asm/macintosh.h>
47#include <asm/natfeat.h> 49#include <asm/natfeat.h>
48 50
49#if !FPSTATESIZE || !NR_IRQS 51#if !FPSTATESIZE || !NR_IRQS
@@ -547,3 +549,81 @@ static int __init adb_probe_sync_enable (char *str) {
547 549
548__setup("adb_sync", adb_probe_sync_enable); 550__setup("adb_sync", adb_probe_sync_enable);
549#endif /* CONFIG_ADB */ 551#endif /* CONFIG_ADB */
552
553#if IS_ENABLED(CONFIG_NVRAM)
554#ifdef CONFIG_MAC
555static unsigned char m68k_nvram_read_byte(int addr)
556{
557 if (MACH_IS_MAC)
558 return mac_pram_read_byte(addr);
559 return 0xff;
560}
561
562static void m68k_nvram_write_byte(unsigned char val, int addr)
563{
564 if (MACH_IS_MAC)
565 mac_pram_write_byte(val, addr);
566}
567#endif /* CONFIG_MAC */
568
569#ifdef CONFIG_ATARI
570static ssize_t m68k_nvram_read(char *buf, size_t count, loff_t *ppos)
571{
572 if (MACH_IS_ATARI)
573 return atari_nvram_read(buf, count, ppos);
574 else if (MACH_IS_MAC)
575 return nvram_read_bytes(buf, count, ppos);
576 return -EINVAL;
577}
578
579static ssize_t m68k_nvram_write(char *buf, size_t count, loff_t *ppos)
580{
581 if (MACH_IS_ATARI)
582 return atari_nvram_write(buf, count, ppos);
583 else if (MACH_IS_MAC)
584 return nvram_write_bytes(buf, count, ppos);
585 return -EINVAL;
586}
587
588static long m68k_nvram_set_checksum(void)
589{
590 if (MACH_IS_ATARI)
591 return atari_nvram_set_checksum();
592 return -EINVAL;
593}
594
595static long m68k_nvram_initialize(void)
596{
597 if (MACH_IS_ATARI)
598 return atari_nvram_initialize();
599 return -EINVAL;
600}
601#endif /* CONFIG_ATARI */
602
603static ssize_t m68k_nvram_get_size(void)
604{
605 if (MACH_IS_ATARI)
606 return atari_nvram_get_size();
607 else if (MACH_IS_MAC)
608 return mac_pram_get_size();
609 return -ENODEV;
610}
611
612/* Atari device drivers call .read (to get checksum validation) whereas
613 * Mac and PowerMac device drivers just use .read_byte.
614 */
615const struct nvram_ops arch_nvram_ops = {
616#ifdef CONFIG_MAC
617 .read_byte = m68k_nvram_read_byte,
618 .write_byte = m68k_nvram_write_byte,
619#endif
620#ifdef CONFIG_ATARI
621 .read = m68k_nvram_read,
622 .write = m68k_nvram_write,
623 .set_checksum = m68k_nvram_set_checksum,
624 .initialize = m68k_nvram_initialize,
625#endif
626 .get_size = m68k_nvram_get_size,
627};
628EXPORT_SYMBOL(arch_nvram_ops);
629#endif /* CONFIG_NVRAM */
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 1423e1fe0261..90f4e9ca1276 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -36,8 +36,9 @@
36 36
37static void (*rom_reset)(void); 37static void (*rom_reset)(void);
38 38
39#if IS_ENABLED(CONFIG_NVRAM)
39#ifdef CONFIG_ADB_CUDA 40#ifdef CONFIG_ADB_CUDA
40static __u8 cuda_read_pram(int offset) 41static unsigned char cuda_pram_read_byte(int offset)
41{ 42{
42 struct adb_request req; 43 struct adb_request req;
43 44
@@ -49,7 +50,7 @@ static __u8 cuda_read_pram(int offset)
49 return req.reply[3]; 50 return req.reply[3];
50} 51}
51 52
52static void cuda_write_pram(int offset, __u8 data) 53static void cuda_pram_write_byte(unsigned char data, int offset)
53{ 54{
54 struct adb_request req; 55 struct adb_request req;
55 56
@@ -62,29 +63,29 @@ static void cuda_write_pram(int offset, __u8 data)
62#endif /* CONFIG_ADB_CUDA */ 63#endif /* CONFIG_ADB_CUDA */
63 64
64#ifdef CONFIG_ADB_PMU 65#ifdef CONFIG_ADB_PMU
65static __u8 pmu_read_pram(int offset) 66static unsigned char pmu_pram_read_byte(int offset)
66{ 67{
67 struct adb_request req; 68 struct adb_request req;
68 69
69 if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM, 70 if (pmu_request(&req, NULL, 3, PMU_READ_XPRAM,
70 (offset >> 8) & 0xFF, offset & 0xFF) < 0) 71 offset & 0xFF, 1) < 0)
71 return 0; 72 return 0;
72 while (!req.complete) 73 pmu_wait_complete(&req);
73 pmu_poll(); 74
74 return req.reply[3]; 75 return req.reply[0];
75} 76}
76 77
77static void pmu_write_pram(int offset, __u8 data) 78static void pmu_pram_write_byte(unsigned char data, int offset)
78{ 79{
79 struct adb_request req; 80 struct adb_request req;
80 81
81 if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM, 82 if (pmu_request(&req, NULL, 4, PMU_WRITE_XPRAM,
82 (offset >> 8) & 0xFF, offset & 0xFF, data) < 0) 83 offset & 0xFF, 1, data) < 0)
83 return; 84 return;
84 while (!req.complete) 85 pmu_wait_complete(&req);
85 pmu_poll();
86} 86}
87#endif /* CONFIG_ADB_PMU */ 87#endif /* CONFIG_ADB_PMU */
88#endif /* CONFIG_NVRAM */
88 89
89/* 90/*
90 * VIA PRAM/RTC access routines 91 * VIA PRAM/RTC access routines
@@ -93,7 +94,7 @@ static void pmu_write_pram(int offset, __u8 data)
93 * the RTC should be enabled. 94 * the RTC should be enabled.
94 */ 95 */
95 96
96static __u8 via_pram_readbyte(void) 97static __u8 via_rtc_recv(void)
97{ 98{
98 int i, reg; 99 int i, reg;
99 __u8 data; 100 __u8 data;
@@ -120,7 +121,7 @@ static __u8 via_pram_readbyte(void)
120 return data; 121 return data;
121} 122}
122 123
123static void via_pram_writebyte(__u8 data) 124static void via_rtc_send(__u8 data)
124{ 125{
125 int i, reg, bit; 126 int i, reg, bit;
126 127
@@ -137,6 +138,31 @@ static void via_pram_writebyte(__u8 data)
137} 138}
138 139
139/* 140/*
141 * These values can be found in Inside Macintosh vol. III ch. 2
142 * which has a description of the RTC chip in the original Mac.
143 */
144
145#define RTC_FLG_READ BIT(7)
146#define RTC_FLG_WRITE_PROTECT BIT(7)
147#define RTC_CMD_READ(r) (RTC_FLG_READ | (r << 2))
148#define RTC_CMD_WRITE(r) (r << 2)
149#define RTC_REG_SECONDS_0 0
150#define RTC_REG_SECONDS_1 1
151#define RTC_REG_SECONDS_2 2
152#define RTC_REG_SECONDS_3 3
153#define RTC_REG_WRITE_PROTECT 13
154
155/*
156 * Inside Mac has no information about two-byte RTC commands but
157 * the MAME/MESS source code has the essentials.
158 */
159
160#define RTC_REG_XPRAM 14
161#define RTC_CMD_XPRAM_READ (RTC_CMD_READ(RTC_REG_XPRAM) << 8)
162#define RTC_CMD_XPRAM_WRITE (RTC_CMD_WRITE(RTC_REG_XPRAM) << 8)
163#define RTC_CMD_XPRAM_ARG(a) (((a & 0xE0) << 3) | ((a & 0x1F) << 2))
164
165/*
140 * Execute a VIA PRAM/RTC command. For read commands 166 * Execute a VIA PRAM/RTC command. For read commands
141 * data should point to a one-byte buffer for the 167 * data should point to a one-byte buffer for the
142 * resulting data. For write commands it should point 168 * resulting data. For write commands it should point
@@ -145,29 +171,33 @@ static void via_pram_writebyte(__u8 data)
145 * This function disables all interrupts while running. 171 * This function disables all interrupts while running.
146 */ 172 */
147 173
148static void via_pram_command(int command, __u8 *data) 174static void via_rtc_command(int command, __u8 *data)
149{ 175{
150 unsigned long flags; 176 unsigned long flags;
151 int is_read; 177 int is_read;
152 178
153 local_irq_save(flags); 179 local_irq_save(flags);
154 180
181 /* The least significant bits must be 0b01 according to Inside Mac */
182
183 command = (command & ~3) | 1;
184
155 /* Enable the RTC and make sure the strobe line is high */ 185 /* Enable the RTC and make sure the strobe line is high */
156 186
157 via1[vBufB] = (via1[vBufB] | VIA1B_vRTCClk) & ~VIA1B_vRTCEnb; 187 via1[vBufB] = (via1[vBufB] | VIA1B_vRTCClk) & ~VIA1B_vRTCEnb;
158 188
159 if (command & 0xFF00) { /* extended (two-byte) command */ 189 if (command & 0xFF00) { /* extended (two-byte) command */
160 via_pram_writebyte((command & 0xFF00) >> 8); 190 via_rtc_send((command & 0xFF00) >> 8);
161 via_pram_writebyte(command & 0xFF); 191 via_rtc_send(command & 0xFF);
162 is_read = command & 0x8000; 192 is_read = command & (RTC_FLG_READ << 8);
163 } else { /* one-byte command */ 193 } else { /* one-byte command */
164 via_pram_writebyte(command); 194 via_rtc_send(command);
165 is_read = command & 0x80; 195 is_read = command & RTC_FLG_READ;
166 } 196 }
167 if (is_read) { 197 if (is_read) {
168 *data = via_pram_readbyte(); 198 *data = via_rtc_recv();
169 } else { 199 } else {
170 via_pram_writebyte(*data); 200 via_rtc_send(*data);
171 } 201 }
172 202
173 /* All done, disable the RTC */ 203 /* All done, disable the RTC */
@@ -177,14 +207,30 @@ static void via_pram_command(int command, __u8 *data)
177 local_irq_restore(flags); 207 local_irq_restore(flags);
178} 208}
179 209
180static __u8 via_read_pram(int offset) 210#if IS_ENABLED(CONFIG_NVRAM)
211static unsigned char via_pram_read_byte(int offset)
181{ 212{
182 return 0; 213 unsigned char temp;
214
215 via_rtc_command(RTC_CMD_XPRAM_READ | RTC_CMD_XPRAM_ARG(offset), &temp);
216
217 return temp;
183} 218}
184 219
185static void via_write_pram(int offset, __u8 data) 220static void via_pram_write_byte(unsigned char data, int offset)
186{ 221{
222 unsigned char temp;
223
224 temp = 0x55;
225 via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
226
227 temp = data;
228 via_rtc_command(RTC_CMD_XPRAM_WRITE | RTC_CMD_XPRAM_ARG(offset), &temp);
229
230 temp = 0x55 | RTC_FLG_WRITE_PROTECT;
231 via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
187} 232}
233#endif /* CONFIG_NVRAM */
188 234
189/* 235/*
190 * Return the current time in seconds since January 1, 1904. 236 * Return the current time in seconds since January 1, 1904.
@@ -201,10 +247,10 @@ static time64_t via_read_time(void)
201 } result, last_result; 247 } result, last_result;
202 int count = 1; 248 int count = 1;
203 249
204 via_pram_command(0x81, &last_result.cdata[3]); 250 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0), &last_result.cdata[3]);
205 via_pram_command(0x85, &last_result.cdata[2]); 251 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1), &last_result.cdata[2]);
206 via_pram_command(0x89, &last_result.cdata[1]); 252 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2), &last_result.cdata[1]);
207 via_pram_command(0x8D, &last_result.cdata[0]); 253 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3), &last_result.cdata[0]);
208 254
209 /* 255 /*
210 * The NetBSD guys say to loop until you get the same reading 256 * The NetBSD guys say to loop until you get the same reading
@@ -212,10 +258,14 @@ static time64_t via_read_time(void)
212 */ 258 */
213 259
214 while (1) { 260 while (1) {
215 via_pram_command(0x81, &result.cdata[3]); 261 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0),
216 via_pram_command(0x85, &result.cdata[2]); 262 &result.cdata[3]);
217 via_pram_command(0x89, &result.cdata[1]); 263 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1),
218 via_pram_command(0x8D, &result.cdata[0]); 264 &result.cdata[2]);
265 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2),
266 &result.cdata[1]);
267 via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3),
268 &result.cdata[0]);
219 269
220 if (result.idata == last_result.idata) 270 if (result.idata == last_result.idata)
221 return (time64_t)result.idata - RTC_OFFSET; 271 return (time64_t)result.idata - RTC_OFFSET;
@@ -254,18 +304,18 @@ static void via_set_rtc_time(struct rtc_time *tm)
254 /* Clear the write protect bit */ 304 /* Clear the write protect bit */
255 305
256 temp = 0x55; 306 temp = 0x55;
257 via_pram_command(0x35, &temp); 307 via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
258 308
259 data.idata = lower_32_bits(time + RTC_OFFSET); 309 data.idata = lower_32_bits(time + RTC_OFFSET);
260 via_pram_command(0x01, &data.cdata[3]); 310 via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_0), &data.cdata[3]);
261 via_pram_command(0x05, &data.cdata[2]); 311 via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_1), &data.cdata[2]);
262 via_pram_command(0x09, &data.cdata[1]); 312 via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_2), &data.cdata[1]);
263 via_pram_command(0x0D, &data.cdata[0]); 313 via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_3), &data.cdata[0]);
264 314
265 /* Set the write protect bit */ 315 /* Set the write protect bit */
266 316
267 temp = 0xD5; 317 temp = 0x55 | RTC_FLG_WRITE_PROTECT;
268 via_pram_command(0x35, &temp); 318 via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
269} 319}
270 320
271static void via_shutdown(void) 321static void via_shutdown(void)
@@ -326,66 +376,58 @@ static void cuda_shutdown(void)
326 *------------------------------------------------------------------- 376 *-------------------------------------------------------------------
327 */ 377 */
328 378
329void mac_pram_read(int offset, __u8 *buffer, int len) 379#if IS_ENABLED(CONFIG_NVRAM)
380unsigned char mac_pram_read_byte(int addr)
330{ 381{
331 __u8 (*func)(int);
332 int i;
333
334 switch (macintosh_config->adb_type) { 382 switch (macintosh_config->adb_type) {
335 case MAC_ADB_IOP: 383 case MAC_ADB_IOP:
336 case MAC_ADB_II: 384 case MAC_ADB_II:
337 case MAC_ADB_PB1: 385 case MAC_ADB_PB1:
338 func = via_read_pram; 386 return via_pram_read_byte(addr);
339 break;
340#ifdef CONFIG_ADB_CUDA 387#ifdef CONFIG_ADB_CUDA
341 case MAC_ADB_EGRET: 388 case MAC_ADB_EGRET:
342 case MAC_ADB_CUDA: 389 case MAC_ADB_CUDA:
343 func = cuda_read_pram; 390 return cuda_pram_read_byte(addr);
344 break;
345#endif 391#endif
346#ifdef CONFIG_ADB_PMU 392#ifdef CONFIG_ADB_PMU
347 case MAC_ADB_PB2: 393 case MAC_ADB_PB2:
348 func = pmu_read_pram; 394 return pmu_pram_read_byte(addr);
349 break;
350#endif 395#endif
351 default: 396 default:
352 return; 397 return 0xFF;
353 }
354 for (i = 0 ; i < len ; i++) {
355 buffer[i] = (*func)(offset++);
356 } 398 }
357} 399}
358 400
359void mac_pram_write(int offset, __u8 *buffer, int len) 401void mac_pram_write_byte(unsigned char val, int addr)
360{ 402{
361 void (*func)(int, __u8);
362 int i;
363
364 switch (macintosh_config->adb_type) { 403 switch (macintosh_config->adb_type) {
365 case MAC_ADB_IOP: 404 case MAC_ADB_IOP:
366 case MAC_ADB_II: 405 case MAC_ADB_II:
367 case MAC_ADB_PB1: 406 case MAC_ADB_PB1:
368 func = via_write_pram; 407 via_pram_write_byte(val, addr);
369 break; 408 break;
370#ifdef CONFIG_ADB_CUDA 409#ifdef CONFIG_ADB_CUDA
371 case MAC_ADB_EGRET: 410 case MAC_ADB_EGRET:
372 case MAC_ADB_CUDA: 411 case MAC_ADB_CUDA:
373 func = cuda_write_pram; 412 cuda_pram_write_byte(val, addr);
374 break; 413 break;
375#endif 414#endif
376#ifdef CONFIG_ADB_PMU 415#ifdef CONFIG_ADB_PMU
377 case MAC_ADB_PB2: 416 case MAC_ADB_PB2:
378 func = pmu_write_pram; 417 pmu_pram_write_byte(val, addr);
379 break; 418 break;
380#endif 419#endif
381 default: 420 default:
382 return; 421 break;
383 }
384 for (i = 0 ; i < len ; i++) {
385 (*func)(offset++, buffer[i]);
386 } 422 }
387} 423}
388 424
425ssize_t mac_pram_get_size(void)
426{
427 return 256;
428}
429#endif /* CONFIG_NVRAM */
430
389void mac_poweroff(void) 431void mac_poweroff(void)
390{ 432{
391 if (oss_present) { 433 if (oss_present) {
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index afe493b23d04..30a8315d5c07 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -311,6 +311,15 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
311 * value for either 32 or 64 bit mode */ 311 * value for either 32 or 64 bit mode */
312#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) 312#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
313 313
314#define ioread64 ioread64
315#define ioread64be ioread64be
316#define iowrite64 iowrite64
317#define iowrite64be iowrite64be
318extern u64 ioread64(void __iomem *addr);
319extern u64 ioread64be(void __iomem *addr);
320extern void iowrite64(u64 val, void __iomem *addr);
321extern void iowrite64be(u64 val, void __iomem *addr);
322
314#include <asm-generic/iomap.h> 323#include <asm-generic/iomap.h>
315 324
316/* 325/*
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 4b19e6e64fb7..0195aec657e2 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -48,11 +48,15 @@ struct iomap_ops {
48 unsigned int (*read16be)(void __iomem *); 48 unsigned int (*read16be)(void __iomem *);
49 unsigned int (*read32)(void __iomem *); 49 unsigned int (*read32)(void __iomem *);
50 unsigned int (*read32be)(void __iomem *); 50 unsigned int (*read32be)(void __iomem *);
51 u64 (*read64)(void __iomem *);
52 u64 (*read64be)(void __iomem *);
51 void (*write8)(u8, void __iomem *); 53 void (*write8)(u8, void __iomem *);
52 void (*write16)(u16, void __iomem *); 54 void (*write16)(u16, void __iomem *);
53 void (*write16be)(u16, void __iomem *); 55 void (*write16be)(u16, void __iomem *);
54 void (*write32)(u32, void __iomem *); 56 void (*write32)(u32, void __iomem *);
55 void (*write32be)(u32, void __iomem *); 57 void (*write32be)(u32, void __iomem *);
58 void (*write64)(u64, void __iomem *);
59 void (*write64be)(u64, void __iomem *);
56 void (*read8r)(void __iomem *, void *, unsigned long); 60 void (*read8r)(void __iomem *, void *, unsigned long);
57 void (*read16r)(void __iomem *, void *, unsigned long); 61 void (*read16r)(void __iomem *, void *, unsigned long);
58 void (*read32r)(void __iomem *, void *, unsigned long); 62 void (*read32r)(void __iomem *, void *, unsigned long);
@@ -171,6 +175,16 @@ static unsigned int iomem_read32be(void __iomem *addr)
171 return __raw_readl(addr); 175 return __raw_readl(addr);
172} 176}
173 177
178static u64 iomem_read64(void __iomem *addr)
179{
180 return readq(addr);
181}
182
183static u64 iomem_read64be(void __iomem *addr)
184{
185 return __raw_readq(addr);
186}
187
174static void iomem_write8(u8 datum, void __iomem *addr) 188static void iomem_write8(u8 datum, void __iomem *addr)
175{ 189{
176 writeb(datum, addr); 190 writeb(datum, addr);
@@ -196,6 +210,16 @@ static void iomem_write32be(u32 datum, void __iomem *addr)
196 __raw_writel(datum, addr); 210 __raw_writel(datum, addr);
197} 211}
198 212
213static void iomem_write64(u64 datum, void __iomem *addr)
214{
215 writel(datum, addr);
216}
217
218static void iomem_write64be(u64 datum, void __iomem *addr)
219{
220 __raw_writel(datum, addr);
221}
222
199static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) 223static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
200{ 224{
201 while (count--) { 225 while (count--) {
@@ -250,11 +274,15 @@ static const struct iomap_ops iomem_ops = {
250 .read16be = iomem_read16be, 274 .read16be = iomem_read16be,
251 .read32 = iomem_read32, 275 .read32 = iomem_read32,
252 .read32be = iomem_read32be, 276 .read32be = iomem_read32be,
277 .read64 = iomem_read64,
278 .read64be = iomem_read64be,
253 .write8 = iomem_write8, 279 .write8 = iomem_write8,
254 .write16 = iomem_write16, 280 .write16 = iomem_write16,
255 .write16be = iomem_write16be, 281 .write16be = iomem_write16be,
256 .write32 = iomem_write32, 282 .write32 = iomem_write32,
257 .write32be = iomem_write32be, 283 .write32be = iomem_write32be,
284 .write64 = iomem_write64,
285 .write64be = iomem_write64be,
258 .read8r = iomem_read8r, 286 .read8r = iomem_read8r,
259 .read16r = iomem_read16r, 287 .read16r = iomem_read16r,
260 .read32r = iomem_read32r, 288 .read32r = iomem_read32r,
@@ -304,6 +332,20 @@ unsigned int ioread32be(void __iomem *addr)
304 return *((u32 *)addr); 332 return *((u32 *)addr);
305} 333}
306 334
335u64 ioread64(void __iomem *addr)
336{
337 if (unlikely(INDIRECT_ADDR(addr)))
338 return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr);
339 return le64_to_cpup((u64 *)addr);
340}
341
342u64 ioread64be(void __iomem *addr)
343{
344 if (unlikely(INDIRECT_ADDR(addr)))
345 return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr);
346 return *((u64 *)addr);
347}
348
307void iowrite8(u8 datum, void __iomem *addr) 349void iowrite8(u8 datum, void __iomem *addr)
308{ 350{
309 if (unlikely(INDIRECT_ADDR(addr))) { 351 if (unlikely(INDIRECT_ADDR(addr))) {
@@ -349,6 +391,24 @@ void iowrite32be(u32 datum, void __iomem *addr)
349 } 391 }
350} 392}
351 393
394void iowrite64(u64 datum, void __iomem *addr)
395{
396 if (unlikely(INDIRECT_ADDR(addr))) {
397 iomap_ops[ADDR_TO_REGION(addr)]->write64(datum, addr);
398 } else {
399 *((u64 *)addr) = cpu_to_le64(datum);
400 }
401}
402
403void iowrite64be(u64 datum, void __iomem *addr)
404{
405 if (unlikely(INDIRECT_ADDR(addr))) {
406 iomap_ops[ADDR_TO_REGION(addr)]->write64be(datum, addr);
407 } else {
408 *((u64 *)addr) = datum;
409 }
410}
411
352/* Repeating interfaces */ 412/* Repeating interfaces */
353 413
354void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) 414void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
@@ -449,11 +509,15 @@ EXPORT_SYMBOL(ioread16);
449EXPORT_SYMBOL(ioread16be); 509EXPORT_SYMBOL(ioread16be);
450EXPORT_SYMBOL(ioread32); 510EXPORT_SYMBOL(ioread32);
451EXPORT_SYMBOL(ioread32be); 511EXPORT_SYMBOL(ioread32be);
512EXPORT_SYMBOL(ioread64);
513EXPORT_SYMBOL(ioread64be);
452EXPORT_SYMBOL(iowrite8); 514EXPORT_SYMBOL(iowrite8);
453EXPORT_SYMBOL(iowrite16); 515EXPORT_SYMBOL(iowrite16);
454EXPORT_SYMBOL(iowrite16be); 516EXPORT_SYMBOL(iowrite16be);
455EXPORT_SYMBOL(iowrite32); 517EXPORT_SYMBOL(iowrite32);
456EXPORT_SYMBOL(iowrite32be); 518EXPORT_SYMBOL(iowrite32be);
519EXPORT_SYMBOL(iowrite64);
520EXPORT_SYMBOL(iowrite64be);
457EXPORT_SYMBOL(ioread8_rep); 521EXPORT_SYMBOL(ioread8_rep);
458EXPORT_SYMBOL(ioread16_rep); 522EXPORT_SYMBOL(ioread16_rep);
459EXPORT_SYMBOL(ioread32_rep); 523EXPORT_SYMBOL(ioread32_rep);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 375d0dc0dc7d..7deb3ea2dd3f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -179,6 +179,7 @@ config PPC
179 select HAVE_ARCH_KGDB 179 select HAVE_ARCH_KGDB
180 select HAVE_ARCH_MMAP_RND_BITS 180 select HAVE_ARCH_MMAP_RND_BITS
181 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT 181 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
182 select HAVE_ARCH_NVRAM_OPS
182 select HAVE_ARCH_SECCOMP_FILTER 183 select HAVE_ARCH_SECCOMP_FILTER
183 select HAVE_ARCH_TRACEHOOK 184 select HAVE_ARCH_TRACEHOOK
184 select HAVE_CBPF_JIT if !PPC64 185 select HAVE_CBPF_JIT if !PPC64
@@ -275,11 +276,6 @@ config SYSVIPC_COMPAT
275 depends on COMPAT && SYSVIPC 276 depends on COMPAT && SYSVIPC
276 default y 277 default y
277 278
278# All PPC32s use generic nvram driver through ppc_md
279config GENERIC_NVRAM
280 bool
281 default y if PPC32
282
283config SCHED_OMIT_FRAME_POINTER 279config SCHED_OMIT_FRAME_POINTER
284 bool 280 bool
285 default y 281 default y
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 7f19fbd3ba55..4b73847e9b95 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -783,8 +783,10 @@ extern void __iounmap_at(void *ea, unsigned long size);
783 783
784#define mmio_read16be(addr) readw_be(addr) 784#define mmio_read16be(addr) readw_be(addr)
785#define mmio_read32be(addr) readl_be(addr) 785#define mmio_read32be(addr) readl_be(addr)
786#define mmio_read64be(addr) readq_be(addr)
786#define mmio_write16be(val, addr) writew_be(val, addr) 787#define mmio_write16be(val, addr) writew_be(val, addr)
787#define mmio_write32be(val, addr) writel_be(val, addr) 788#define mmio_write32be(val, addr) writel_be(val, addr)
789#define mmio_write64be(val, addr) writeq_be(val, addr)
788#define mmio_insb(addr, dst, count) readsb(addr, dst, count) 790#define mmio_insb(addr, dst, count) readsb(addr, dst, count)
789#define mmio_insw(addr, dst, count) readsw(addr, dst, count) 791#define mmio_insw(addr, dst, count) readsw(addr, dst, count)
790#define mmio_insl(addr, dst, count) readsl(addr, dst, count) 792#define mmio_insl(addr, dst, count) readsl(addr, dst, count)
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 09a518bb7c03..629a5cdcc865 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -78,9 +78,6 @@ extern int pmac_get_partition(int partition);
78extern u8 pmac_xpram_read(int xpaddr); 78extern u8 pmac_xpram_read(int xpaddr);
79extern void pmac_xpram_write(int xpaddr, u8 data); 79extern void pmac_xpram_write(int xpaddr, u8 data);
80 80
81/* Synchronize NVRAM */
82extern void nvram_sync(void);
83
84/* Initialize NVRAM OS partition */ 81/* Initialize NVRAM OS partition */
85extern int __init nvram_init_os_partition(struct nvram_os_partition *part); 82extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
86 83
@@ -98,10 +95,4 @@ extern int nvram_write_os_partition(struct nvram_os_partition *part,
98 unsigned int err_type, 95 unsigned int err_type,
99 unsigned int error_log_cnt); 96 unsigned int error_log_cnt);
100 97
101/* Determine NVRAM size */
102extern ssize_t nvram_get_size(void);
103
104/* Normal access to NVRAM */
105extern unsigned char nvram_read_byte(int i);
106extern void nvram_write_byte(unsigned char c, int i);
107#endif /* _ASM_POWERPC_NVRAM_H */ 98#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 38b03a330cd2..244d2462e781 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -7,12 +7,6 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * /dev/nvram driver for PPC64 9 * /dev/nvram driver for PPC64
10 *
11 * This perhaps should live in drivers/char
12 *
13 * TODO: Split the /dev/nvram part (that one can use
14 * drivers/char/generic_nvram.c) from the arch & partition
15 * parsing code.
16 */ 10 */
17 11
18#include <linux/types.h> 12#include <linux/types.h>
@@ -714,137 +708,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
714 spin_unlock_irqrestore(&lock, flags); 708 spin_unlock_irqrestore(&lock, flags);
715} 709}
716 710
717static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
718{
719 if (ppc_md.nvram_size == NULL)
720 return -ENODEV;
721 return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
722 ppc_md.nvram_size());
723}
724
725
726static ssize_t dev_nvram_read(struct file *file, char __user *buf,
727 size_t count, loff_t *ppos)
728{
729 ssize_t ret;
730 char *tmp = NULL;
731 ssize_t size;
732
733 if (!ppc_md.nvram_size) {
734 ret = -ENODEV;
735 goto out;
736 }
737
738 size = ppc_md.nvram_size();
739 if (size < 0) {
740 ret = size;
741 goto out;
742 }
743
744 if (*ppos >= size) {
745 ret = 0;
746 goto out;
747 }
748
749 count = min_t(size_t, count, size - *ppos);
750 count = min(count, PAGE_SIZE);
751
752 tmp = kmalloc(count, GFP_KERNEL);
753 if (!tmp) {
754 ret = -ENOMEM;
755 goto out;
756 }
757
758 ret = ppc_md.nvram_read(tmp, count, ppos);
759 if (ret <= 0)
760 goto out;
761
762 if (copy_to_user(buf, tmp, ret))
763 ret = -EFAULT;
764
765out:
766 kfree(tmp);
767 return ret;
768
769}
770
771static ssize_t dev_nvram_write(struct file *file, const char __user *buf,
772 size_t count, loff_t *ppos)
773{
774 ssize_t ret;
775 char *tmp = NULL;
776 ssize_t size;
777
778 ret = -ENODEV;
779 if (!ppc_md.nvram_size)
780 goto out;
781
782 ret = 0;
783 size = ppc_md.nvram_size();
784 if (*ppos >= size || size < 0)
785 goto out;
786
787 count = min_t(size_t, count, size - *ppos);
788 count = min(count, PAGE_SIZE);
789
790 tmp = memdup_user(buf, count);
791 if (IS_ERR(tmp)) {
792 ret = PTR_ERR(tmp);
793 goto out;
794 }
795
796 ret = ppc_md.nvram_write(tmp, count, ppos);
797
798 kfree(tmp);
799out:
800 return ret;
801}
802
803static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
804 unsigned long arg)
805{
806 switch(cmd) {
807#ifdef CONFIG_PPC_PMAC
808 case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
809 printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
810 /* fall through */
811 case IOC_NVRAM_GET_OFFSET: {
812 int part, offset;
813
814 if (!machine_is(powermac))
815 return -EINVAL;
816 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
817 return -EFAULT;
818 if (part < pmac_nvram_OF || part > pmac_nvram_NR)
819 return -EINVAL;
820 offset = pmac_get_partition(part);
821 if (offset < 0)
822 return offset;
823 if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
824 return -EFAULT;
825 return 0;
826 }
827#endif /* CONFIG_PPC_PMAC */
828 default:
829 return -EINVAL;
830 }
831}
832
833static const struct file_operations nvram_fops = {
834 .owner = THIS_MODULE,
835 .llseek = dev_nvram_llseek,
836 .read = dev_nvram_read,
837 .write = dev_nvram_write,
838 .unlocked_ioctl = dev_nvram_ioctl,
839};
840
841static struct miscdevice nvram_dev = {
842 NVRAM_MINOR,
843 "nvram",
844 &nvram_fops
845};
846
847
848#ifdef DEBUG_NVRAM 711#ifdef DEBUG_NVRAM
849static void __init nvram_print_partitions(char * label) 712static void __init nvram_print_partitions(char * label)
850{ 713{
@@ -992,6 +855,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
992 long size = 0; 855 long size = 0;
993 int rc; 856 int rc;
994 857
858 BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
859
995 /* Convert sizes from bytes to blocks */ 860 /* Convert sizes from bytes to blocks */
996 req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; 861 req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
997 min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; 862 min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
@@ -1192,22 +1057,3 @@ int __init nvram_scan_partitions(void)
1192 kfree(header); 1057 kfree(header);
1193 return err; 1058 return err;
1194} 1059}
1195
1196static int __init nvram_init(void)
1197{
1198 int rc;
1199
1200 BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
1201
1202 if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
1203 return -ENODEV;
1204
1205 rc = misc_register(&nvram_dev);
1206 if (rc != 0) {
1207 printk(KERN_ERR "nvram_init: failed to register device\n");
1208 return rc;
1209 }
1210
1211 return rc;
1212}
1213device_initcall(nvram_init);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 947f904688b0..c31082233a25 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -17,6 +17,7 @@
17#include <linux/console.h> 17#include <linux/console.h>
18#include <linux/memblock.h> 18#include <linux/memblock.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/nvram.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
22#include <asm/prom.h> 23#include <asm/prom.h>
@@ -147,41 +148,6 @@ static int __init ppc_setup_l3cr(char *str)
147} 148}
148__setup("l3cr=", ppc_setup_l3cr); 149__setup("l3cr=", ppc_setup_l3cr);
149 150
150#ifdef CONFIG_GENERIC_NVRAM
151
152/* Generic nvram hooks used by drivers/char/gen_nvram.c */
153unsigned char nvram_read_byte(int addr)
154{
155 if (ppc_md.nvram_read_val)
156 return ppc_md.nvram_read_val(addr);
157 return 0xff;
158}
159EXPORT_SYMBOL(nvram_read_byte);
160
161void nvram_write_byte(unsigned char val, int addr)
162{
163 if (ppc_md.nvram_write_val)
164 ppc_md.nvram_write_val(addr, val);
165}
166EXPORT_SYMBOL(nvram_write_byte);
167
168ssize_t nvram_get_size(void)
169{
170 if (ppc_md.nvram_size)
171 return ppc_md.nvram_size();
172 return -1;
173}
174EXPORT_SYMBOL(nvram_get_size);
175
176void nvram_sync(void)
177{
178 if (ppc_md.nvram_sync)
179 ppc_md.nvram_sync();
180}
181EXPORT_SYMBOL(nvram_sync);
182
183#endif /* CONFIG_NVRAM */
184
185static int __init ppc_init(void) 151static int __init ppc_init(void)
186{ 152{
187 /* clear the progress line */ 153 /* clear the progress line */
diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile
index 4b3bfadc70fa..dc3465cc8bc6 100644
--- a/arch/powerpc/platforms/chrp/Makefile
+++ b/arch/powerpc/platforms/chrp/Makefile
@@ -1,3 +1,3 @@
1obj-y += setup.o time.o pegasos_eth.o pci.o 1obj-y += setup.o time.o pegasos_eth.o pci.o
2obj-$(CONFIG_SMP) += smp.o 2obj-$(CONFIG_SMP) += smp.o
3obj-$(CONFIG_NVRAM) += nvram.o 3obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index 791b86398e1d..37ac20ccbb19 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -24,7 +24,7 @@ static unsigned int nvram_size;
24static unsigned char nvram_buf[4]; 24static unsigned char nvram_buf[4];
25static DEFINE_SPINLOCK(nvram_lock); 25static DEFINE_SPINLOCK(nvram_lock);
26 26
27static unsigned char chrp_nvram_read(int addr) 27static unsigned char chrp_nvram_read_val(int addr)
28{ 28{
29 unsigned int done; 29 unsigned int done;
30 unsigned long flags; 30 unsigned long flags;
@@ -46,7 +46,7 @@ static unsigned char chrp_nvram_read(int addr)
46 return ret; 46 return ret;
47} 47}
48 48
49static void chrp_nvram_write(int addr, unsigned char val) 49static void chrp_nvram_write_val(int addr, unsigned char val)
50{ 50{
51 unsigned int done; 51 unsigned int done;
52 unsigned long flags; 52 unsigned long flags;
@@ -64,6 +64,11 @@ static void chrp_nvram_write(int addr, unsigned char val)
64 spin_unlock_irqrestore(&nvram_lock, flags); 64 spin_unlock_irqrestore(&nvram_lock, flags);
65} 65}
66 66
67static ssize_t chrp_nvram_size(void)
68{
69 return nvram_size;
70}
71
67void __init chrp_nvram_init(void) 72void __init chrp_nvram_init(void)
68{ 73{
69 struct device_node *nvram; 74 struct device_node *nvram;
@@ -85,8 +90,9 @@ void __init chrp_nvram_init(void)
85 printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); 90 printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
86 of_node_put(nvram); 91 of_node_put(nvram);
87 92
88 ppc_md.nvram_read_val = chrp_nvram_read; 93 ppc_md.nvram_read_val = chrp_nvram_read_val;
89 ppc_md.nvram_write_val = chrp_nvram_write; 94 ppc_md.nvram_write_val = chrp_nvram_write_val;
95 ppc_md.nvram_size = chrp_nvram_size;
90 96
91 return; 97 return;
92} 98}
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 9438fa0fc355..fcf6f2342ef4 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -549,7 +549,7 @@ static void __init chrp_init_IRQ(void)
549static void __init 549static void __init
550chrp_init2(void) 550chrp_init2(void)
551{ 551{
552#ifdef CONFIG_NVRAM 552#if IS_ENABLED(CONFIG_NVRAM)
553 chrp_nvram_init(); 553 chrp_nvram_init();
554#endif 554#endif
555 555
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 923bfb340433..20ebf35d7913 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -15,7 +15,5 @@ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
15# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really 15# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
16# CONFIG_NVRAM=y 16# CONFIG_NVRAM=y
17obj-$(CONFIG_NVRAM:m=y) += nvram.o 17obj-$(CONFIG_NVRAM:m=y) += nvram.o
18# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
19obj-$(CONFIG_PPC64) += nvram.o
20obj-$(CONFIG_PPC32) += bootx_init.o 18obj-$(CONFIG_PPC32) += bootx_init.o
21obj-$(CONFIG_SMP) += smp.o 19obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index ae54d7fe68f3..9360cdc408c1 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -147,6 +147,11 @@ static ssize_t core99_nvram_size(void)
147static volatile unsigned char __iomem *nvram_addr; 147static volatile unsigned char __iomem *nvram_addr;
148static int nvram_mult; 148static int nvram_mult;
149 149
150static ssize_t ppc32_nvram_size(void)
151{
152 return NVRAM_SIZE;
153}
154
150static unsigned char direct_nvram_read_byte(int addr) 155static unsigned char direct_nvram_read_byte(int addr)
151{ 156{
152 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); 157 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
@@ -590,21 +595,25 @@ int __init pmac_nvram_init(void)
590 nvram_mult = 1; 595 nvram_mult = 1;
591 ppc_md.nvram_read_val = direct_nvram_read_byte; 596 ppc_md.nvram_read_val = direct_nvram_read_byte;
592 ppc_md.nvram_write_val = direct_nvram_write_byte; 597 ppc_md.nvram_write_val = direct_nvram_write_byte;
598 ppc_md.nvram_size = ppc32_nvram_size;
593 } else if (nvram_naddrs == 1) { 599 } else if (nvram_naddrs == 1) {
594 nvram_data = ioremap(r1.start, s1); 600 nvram_data = ioremap(r1.start, s1);
595 nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; 601 nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
596 ppc_md.nvram_read_val = direct_nvram_read_byte; 602 ppc_md.nvram_read_val = direct_nvram_read_byte;
597 ppc_md.nvram_write_val = direct_nvram_write_byte; 603 ppc_md.nvram_write_val = direct_nvram_write_byte;
604 ppc_md.nvram_size = ppc32_nvram_size;
598 } else if (nvram_naddrs == 2) { 605 } else if (nvram_naddrs == 2) {
599 nvram_addr = ioremap(r1.start, s1); 606 nvram_addr = ioremap(r1.start, s1);
600 nvram_data = ioremap(r2.start, s2); 607 nvram_data = ioremap(r2.start, s2);
601 ppc_md.nvram_read_val = indirect_nvram_read_byte; 608 ppc_md.nvram_read_val = indirect_nvram_read_byte;
602 ppc_md.nvram_write_val = indirect_nvram_write_byte; 609 ppc_md.nvram_write_val = indirect_nvram_write_byte;
610 ppc_md.nvram_size = ppc32_nvram_size;
603 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { 611 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
604#ifdef CONFIG_ADB_PMU 612#ifdef CONFIG_ADB_PMU
605 nvram_naddrs = -1; 613 nvram_naddrs = -1;
606 ppc_md.nvram_read_val = pmu_nvram_read_byte; 614 ppc_md.nvram_read_val = pmu_nvram_read_byte;
607 ppc_md.nvram_write_val = pmu_nvram_write_byte; 615 ppc_md.nvram_write_val = pmu_nvram_write_byte;
616 ppc_md.nvram_size = ppc32_nvram_size;
608#endif /* CONFIG_ADB_PMU */ 617#endif /* CONFIG_ADB_PMU */
609 } else { 618 } else {
610 printk(KERN_ERR "Incompatible type of NVRAM\n"); 619 printk(KERN_ERR "Incompatible type of NVRAM\n");
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 2e8221e20ee8..b7efcf336589 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -316,8 +316,7 @@ static void __init pmac_setup_arch(void)
316 find_via_pmu(); 316 find_via_pmu();
317 smu_init(); 317 smu_init();
318 318
319#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \ 319#if IS_ENABLED(CONFIG_NVRAM)
320 defined(CONFIG_PPC64)
321 pmac_nvram_init(); 320 pmac_nvram_init();
322#endif 321#endif
323#ifdef CONFIG_PPC32 322#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index f157e3d071f2..b36ddee17c87 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -68,7 +68,7 @@
68long __init pmac_time_init(void) 68long __init pmac_time_init(void)
69{ 69{
70 s32 delta = 0; 70 s32 delta = 0;
71#ifdef CONFIG_NVRAM 71#if defined(CONFIG_NVRAM) && defined(CONFIG_PPC32)
72 int dst; 72 int dst;
73 73
74 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16; 74 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 69cedc1b3b8a..1136a38ff039 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -7,8 +7,6 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * /dev/nvram driver for PPC64 9 * /dev/nvram driver for PPC64
10 *
11 * This perhaps should live in drivers/char
12 */ 10 */
13 11
14 12
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4f9f99057ff8..45f9decb9848 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -228,4 +228,6 @@ source "drivers/siox/Kconfig"
228 228
229source "drivers/slimbus/Kconfig" 229source "drivers/slimbus/Kconfig"
230 230
231source "drivers/interconnect/Kconfig"
232
231endmenu 233endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e1ce029d28fd..bb15b9d0e793 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/
186obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ 186obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
187obj-$(CONFIG_SIOX) += siox/ 187obj-$(CONFIG_SIOX) += siox/
188obj-$(CONFIG_GNSS) += gnss/ 188obj-$(CONFIG_GNSS) += gnss/
189obj-$(CONFIG_INTERCONNECT) += interconnect/
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4c190f8d1f4c..6fdf2abe4598 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
10 10
11config ANDROID_BINDER_IPC 11config ANDROID_BINDER_IPC
12 bool "Android Binder IPC Driver" 12 bool "Android Binder IPC Driver"
13 depends on MMU && !CPU_CACHE_VIVT 13 depends on MMU
14 default n 14 default n
15 ---help--- 15 ---help---
16 Binder is used in Android for both communication between processes, 16 Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4d2b2ad1ee0e..8685882da64c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -329,6 +329,8 @@ struct binder_error {
329 * (invariant after initialized) 329 * (invariant after initialized)
330 * @min_priority: minimum scheduling priority 330 * @min_priority: minimum scheduling priority
331 * (invariant after initialized) 331 * (invariant after initialized)
332 * @txn_security_ctx: require sender's security context
333 * (invariant after initialized)
332 * @async_todo: list of async work items 334 * @async_todo: list of async work items
333 * (protected by @proc->inner_lock) 335 * (protected by @proc->inner_lock)
334 * 336 *
@@ -365,6 +367,7 @@ struct binder_node {
365 * invariant after initialization 367 * invariant after initialization
366 */ 368 */
367 u8 accept_fds:1; 369 u8 accept_fds:1;
370 u8 txn_security_ctx:1;
368 u8 min_priority; 371 u8 min_priority;
369 }; 372 };
370 bool has_async_transaction; 373 bool has_async_transaction;
@@ -615,6 +618,7 @@ struct binder_transaction {
615 long saved_priority; 618 long saved_priority;
616 kuid_t sender_euid; 619 kuid_t sender_euid;
617 struct list_head fd_fixups; 620 struct list_head fd_fixups;
621 binder_uintptr_t security_ctx;
618 /** 622 /**
619 * @lock: protects @from, @to_proc, and @to_thread 623 * @lock: protects @from, @to_proc, and @to_thread
620 * 624 *
@@ -625,6 +629,26 @@ struct binder_transaction {
625}; 629};
626 630
627/** 631/**
632 * struct binder_object - union of flat binder object types
633 * @hdr: generic object header
634 * @fbo: binder object (nodes and refs)
635 * @fdo: file descriptor object
636 * @bbo: binder buffer pointer
637 * @fdao: file descriptor array
638 *
639 * Used for type-independent object copies
640 */
641struct binder_object {
642 union {
643 struct binder_object_header hdr;
644 struct flat_binder_object fbo;
645 struct binder_fd_object fdo;
646 struct binder_buffer_object bbo;
647 struct binder_fd_array_object fdao;
648 };
649};
650
651/**
628 * binder_proc_lock() - Acquire outer lock for given binder_proc 652 * binder_proc_lock() - Acquire outer lock for given binder_proc
629 * @proc: struct binder_proc to acquire 653 * @proc: struct binder_proc to acquire
630 * 654 *
@@ -1152,6 +1176,7 @@ static struct binder_node *binder_init_node_ilocked(
1152 node->work.type = BINDER_WORK_NODE; 1176 node->work.type = BINDER_WORK_NODE;
1153 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1177 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1154 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1178 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1179 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1155 spin_lock_init(&node->lock); 1180 spin_lock_init(&node->lock);
1156 INIT_LIST_HEAD(&node->work.entry); 1181 INIT_LIST_HEAD(&node->work.entry);
1157 INIT_LIST_HEAD(&node->async_todo); 1182 INIT_LIST_HEAD(&node->async_todo);
@@ -2012,26 +2037,33 @@ static void binder_cleanup_transaction(struct binder_transaction *t,
2012} 2037}
2013 2038
2014/** 2039/**
2015 * binder_validate_object() - checks for a valid metadata object in a buffer. 2040 * binder_get_object() - gets object and checks for valid metadata
2041 * @proc: binder_proc owning the buffer
2016 * @buffer: binder_buffer that we're parsing. 2042 * @buffer: binder_buffer that we're parsing.
2017 * @offset: offset in the buffer at which to validate an object. 2043 * @offset: offset in the @buffer at which to validate an object.
2044 * @object: struct binder_object to read into
2018 * 2045 *
2019 * Return: If there's a valid metadata object at @offset in @buffer, the 2046 * Return: If there's a valid metadata object at @offset in @buffer, the
2020 * size of that object. Otherwise, it returns zero. 2047 * size of that object. Otherwise, it returns zero. The object
2048 * is read into the struct binder_object pointed to by @object.
2021 */ 2049 */
2022static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 2050static size_t binder_get_object(struct binder_proc *proc,
2051 struct binder_buffer *buffer,
2052 unsigned long offset,
2053 struct binder_object *object)
2023{ 2054{
2024 /* Check if we can read a header first */ 2055 size_t read_size;
2025 struct binder_object_header *hdr; 2056 struct binder_object_header *hdr;
2026 size_t object_size = 0; 2057 size_t object_size = 0;
2027 2058
2028 if (buffer->data_size < sizeof(*hdr) || 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2029 offset > buffer->data_size - sizeof(*hdr) || 2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
2030 !IS_ALIGNED(offset, sizeof(u32)))
2031 return 0; 2061 return 0;
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size);
2032 2064
2033 /* Ok, now see if we can read a complete object. */ 2065 /* Ok, now see if we read a complete object. */
2034 hdr = (struct binder_object_header *)(buffer->data + offset); 2066 hdr = &object->hdr;
2035 switch (hdr->type) { 2067 switch (hdr->type) {
2036 case BINDER_TYPE_BINDER: 2068 case BINDER_TYPE_BINDER:
2037 case BINDER_TYPE_WEAK_BINDER: 2069 case BINDER_TYPE_WEAK_BINDER:
@@ -2060,10 +2092,13 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2060 2092
2061/** 2093/**
2062 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 2094 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2095 * @proc: binder_proc owning the buffer
2063 * @b: binder_buffer containing the object 2096 * @b: binder_buffer containing the object
2097 * @object: struct binder_object to read into
2064 * @index: index in offset array at which the binder_buffer_object is 2098 * @index: index in offset array at which the binder_buffer_object is
2065 * located 2099 * located
2066 * @start: points to the start of the offset array 2100 * @start_offset: points to the start of the offset array
2101 * @object_offsetp: offset of @object read from @b
2067 * @num_valid: the number of valid offsets in the offset array 2102 * @num_valid: the number of valid offsets in the offset array
2068 * 2103 *
2069 * Return: If @index is within the valid range of the offset array 2104 * Return: If @index is within the valid range of the offset array
@@ -2074,34 +2109,46 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2074 * Note that the offset found in index @index itself is not 2109 * Note that the offset found in index @index itself is not
2075 * verified; this function assumes that @num_valid elements 2110 * verified; this function assumes that @num_valid elements
2076 * from @start were previously verified to have valid offsets. 2111 * from @start were previously verified to have valid offsets.
2112 * If @object_offsetp is non-NULL, then the offset within
2113 * @b is written to it.
2077 */ 2114 */
2078static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 2115static struct binder_buffer_object *binder_validate_ptr(
2079 binder_size_t index, 2116 struct binder_proc *proc,
2080 binder_size_t *start, 2117 struct binder_buffer *b,
2081 binder_size_t num_valid) 2118 struct binder_object *object,
2119 binder_size_t index,
2120 binder_size_t start_offset,
2121 binder_size_t *object_offsetp,
2122 binder_size_t num_valid)
2082{ 2123{
2083 struct binder_buffer_object *buffer_obj; 2124 size_t object_size;
2084 binder_size_t *offp; 2125 binder_size_t object_offset;
2126 unsigned long buffer_offset;
2085 2127
2086 if (index >= num_valid) 2128 if (index >= num_valid)
2087 return NULL; 2129 return NULL;
2088 2130
2089 offp = start + index; 2131 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2090 buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 2132 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2091 if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 2133 b, buffer_offset, sizeof(object_offset));
2134 object_size = binder_get_object(proc, b, object_offset, object);
2135 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2092 return NULL; 2136 return NULL;
2137 if (object_offsetp)
2138 *object_offsetp = object_offset;
2093 2139
2094 return buffer_obj; 2140 return &object->bbo;
2095} 2141}
2096 2142
2097/** 2143/**
2098 * binder_validate_fixup() - validates pointer/fd fixups happen in order. 2144 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2145 * @proc: binder_proc owning the buffer
2099 * @b: transaction buffer 2146 * @b: transaction buffer
2100 * @objects_start start of objects buffer 2147 * @objects_start_offset: offset to start of objects buffer
2101 * @buffer: binder_buffer_object in which to fix up 2148 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2102 * @offset: start offset in @buffer to fix up 2149 * @fixup_offset: start offset in @buffer to fix up
2103 * @last_obj: last binder_buffer_object that we fixed up in 2150 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2104 * @last_min_offset: minimum fixup offset in @last_obj 2151 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2105 * 2152 *
2106 * Return: %true if a fixup in buffer @buffer at offset @offset is 2153 * Return: %true if a fixup in buffer @buffer at offset @offset is
2107 * allowed. 2154 * allowed.
@@ -2132,28 +2179,41 @@ static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2132 * C (parent = A, offset = 16) 2179 * C (parent = A, offset = 16)
2133 * D (parent = B, offset = 0) // B is not A or any of A's parents 2180 * D (parent = B, offset = 0) // B is not A or any of A's parents
2134 */ 2181 */
2135static bool binder_validate_fixup(struct binder_buffer *b, 2182static bool binder_validate_fixup(struct binder_proc *proc,
2136 binder_size_t *objects_start, 2183 struct binder_buffer *b,
2137 struct binder_buffer_object *buffer, 2184 binder_size_t objects_start_offset,
2185 binder_size_t buffer_obj_offset,
2138 binder_size_t fixup_offset, 2186 binder_size_t fixup_offset,
2139 struct binder_buffer_object *last_obj, 2187 binder_size_t last_obj_offset,
2140 binder_size_t last_min_offset) 2188 binder_size_t last_min_offset)
2141{ 2189{
2142 if (!last_obj) { 2190 if (!last_obj_offset) {
2143 /* Nothing to fix up in */ 2191 /* Nothing to fix up in */
2144 return false; 2192 return false;
2145 } 2193 }
2146 2194
2147 while (last_obj != buffer) { 2195 while (last_obj_offset != buffer_obj_offset) {
2196 unsigned long buffer_offset;
2197 struct binder_object last_object;
2198 struct binder_buffer_object *last_bbo;
2199 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2200 &last_object);
2201 if (object_size != sizeof(*last_bbo))
2202 return false;
2203
2204 last_bbo = &last_object.bbo;
2148 /* 2205 /*
2149 * Safe to retrieve the parent of last_obj, since it 2206 * Safe to retrieve the parent of last_obj, since it
2150 * was already previously verified by the driver. 2207 * was already previously verified by the driver.
2151 */ 2208 */
2152 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 2209 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2153 return false; 2210 return false;
2154 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 2211 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2155 last_obj = (struct binder_buffer_object *) 2212 buffer_offset = objects_start_offset +
2156 (b->data + *(objects_start + last_obj->parent)); 2213 sizeof(binder_size_t) * last_bbo->parent,
2214 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2215 b, buffer_offset,
2216 sizeof(last_obj_offset));
2157 } 2217 }
2158 return (fixup_offset >= last_min_offset); 2218 return (fixup_offset >= last_min_offset);
2159} 2219}
@@ -2218,35 +2278,42 @@ static void binder_deferred_fd_close(int fd)
2218 2278
2219static void binder_transaction_buffer_release(struct binder_proc *proc, 2279static void binder_transaction_buffer_release(struct binder_proc *proc,
2220 struct binder_buffer *buffer, 2280 struct binder_buffer *buffer,
2221 binder_size_t *failed_at) 2281 binder_size_t failed_at,
2282 bool is_failure)
2222{ 2283{
2223 binder_size_t *offp, *off_start, *off_end;
2224 int debug_id = buffer->debug_id; 2284 int debug_id = buffer->debug_id;
2285 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2225 2286
2226 binder_debug(BINDER_DEBUG_TRANSACTION, 2287 binder_debug(BINDER_DEBUG_TRANSACTION,
2227 "%d buffer release %d, size %zd-%zd, failed at %pK\n", 2288 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2228 proc->pid, buffer->debug_id, 2289 proc->pid, buffer->debug_id,
2229 buffer->data_size, buffer->offsets_size, failed_at); 2290 buffer->data_size, buffer->offsets_size,
2291 (unsigned long long)failed_at);
2230 2292
2231 if (buffer->target_node) 2293 if (buffer->target_node)
2232 binder_dec_node(buffer->target_node, 1, 0); 2294 binder_dec_node(buffer->target_node, 1, 0);
2233 2295
2234 off_start = (binder_size_t *)(buffer->data + 2296 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2235 ALIGN(buffer->data_size, sizeof(void *))); 2297 off_end_offset = is_failure ? failed_at :
2236 if (failed_at) 2298 off_start_offset + buffer->offsets_size;
2237 off_end = failed_at; 2299 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2238 else 2300 buffer_offset += sizeof(binder_size_t)) {
2239 off_end = (void *)off_start + buffer->offsets_size;
2240 for (offp = off_start; offp < off_end; offp++) {
2241 struct binder_object_header *hdr; 2301 struct binder_object_header *hdr;
2242 size_t object_size = binder_validate_object(buffer, *offp); 2302 size_t object_size;
2243 2303 struct binder_object object;
2304 binder_size_t object_offset;
2305
2306 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2307 buffer, buffer_offset,
2308 sizeof(object_offset));
2309 object_size = binder_get_object(proc, buffer,
2310 object_offset, &object);
2244 if (object_size == 0) { 2311 if (object_size == 0) {
2245 pr_err("transaction release %d bad object at offset %lld, size %zd\n", 2312 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2246 debug_id, (u64)*offp, buffer->data_size); 2313 debug_id, (u64)object_offset, buffer->data_size);
2247 continue; 2314 continue;
2248 } 2315 }
2249 hdr = (struct binder_object_header *)(buffer->data + *offp); 2316 hdr = &object.hdr;
2250 switch (hdr->type) { 2317 switch (hdr->type) {
2251 case BINDER_TYPE_BINDER: 2318 case BINDER_TYPE_BINDER:
2252 case BINDER_TYPE_WEAK_BINDER: { 2319 case BINDER_TYPE_WEAK_BINDER: {
@@ -2309,10 +2376,11 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2309 case BINDER_TYPE_FDA: { 2376 case BINDER_TYPE_FDA: {
2310 struct binder_fd_array_object *fda; 2377 struct binder_fd_array_object *fda;
2311 struct binder_buffer_object *parent; 2378 struct binder_buffer_object *parent;
2312 uintptr_t parent_buffer; 2379 struct binder_object ptr_object;
2313 u32 *fd_array; 2380 binder_size_t fda_offset;
2314 size_t fd_index; 2381 size_t fd_index;
2315 binder_size_t fd_buf_size; 2382 binder_size_t fd_buf_size;
2383 binder_size_t num_valid;
2316 2384
2317 if (proc->tsk != current->group_leader) { 2385 if (proc->tsk != current->group_leader) {
2318 /* 2386 /*
@@ -2323,23 +2391,19 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2323 continue; 2391 continue;
2324 } 2392 }
2325 2393
2394 num_valid = (buffer_offset - off_start_offset) /
2395 sizeof(binder_size_t);
2326 fda = to_binder_fd_array_object(hdr); 2396 fda = to_binder_fd_array_object(hdr);
2327 parent = binder_validate_ptr(buffer, fda->parent, 2397 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2328 off_start, 2398 fda->parent,
2329 offp - off_start); 2399 off_start_offset,
2400 NULL,
2401 num_valid);
2330 if (!parent) { 2402 if (!parent) {
2331 pr_err("transaction release %d bad parent offset\n", 2403 pr_err("transaction release %d bad parent offset\n",
2332 debug_id); 2404 debug_id);
2333 continue; 2405 continue;
2334 } 2406 }
2335 /*
2336 * Since the parent was already fixed up, convert it
2337 * back to kernel address space to access it
2338 */
2339 parent_buffer = parent->buffer -
2340 binder_alloc_get_user_buffer_offset(
2341 &proc->alloc);
2342
2343 fd_buf_size = sizeof(u32) * fda->num_fds; 2407 fd_buf_size = sizeof(u32) * fda->num_fds;
2344 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 2408 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2345 pr_err("transaction release %d invalid number of fds (%lld)\n", 2409 pr_err("transaction release %d invalid number of fds (%lld)\n",
@@ -2353,9 +2417,29 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2353 debug_id, (u64)fda->num_fds); 2417 debug_id, (u64)fda->num_fds);
2354 continue; 2418 continue;
2355 } 2419 }
2356 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2420 /*
2357 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2421 * the source data for binder_buffer_object is visible
2358 binder_deferred_fd_close(fd_array[fd_index]); 2422 * to user-space and the @buffer element is the user
2423 * pointer to the buffer_object containing the fd_array.
2424 * Convert the address to an offset relative to
2425 * the base of the transaction buffer.
2426 */
2427 fda_offset =
2428 (parent->buffer - (uintptr_t)buffer->user_data) +
2429 fda->parent_offset;
2430 for (fd_index = 0; fd_index < fda->num_fds;
2431 fd_index++) {
2432 u32 fd;
2433 binder_size_t offset = fda_offset +
2434 fd_index * sizeof(fd);
2435
2436 binder_alloc_copy_from_buffer(&proc->alloc,
2437 &fd,
2438 buffer,
2439 offset,
2440 sizeof(fd));
2441 binder_deferred_fd_close(fd);
2442 }
2359 } break; 2443 } break;
2360 default: 2444 default:
2361 pr_err("transaction release %d bad object type %x\n", 2445 pr_err("transaction release %d bad object type %x\n",
@@ -2491,7 +2575,7 @@ done:
2491 return ret; 2575 return ret;
2492} 2576}
2493 2577
2494static int binder_translate_fd(u32 *fdp, 2578static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2495 struct binder_transaction *t, 2579 struct binder_transaction *t,
2496 struct binder_thread *thread, 2580 struct binder_thread *thread,
2497 struct binder_transaction *in_reply_to) 2581 struct binder_transaction *in_reply_to)
@@ -2502,7 +2586,6 @@ static int binder_translate_fd(u32 *fdp,
2502 struct file *file; 2586 struct file *file;
2503 int ret = 0; 2587 int ret = 0;
2504 bool target_allows_fd; 2588 bool target_allows_fd;
2505 int fd = *fdp;
2506 2589
2507 if (in_reply_to) 2590 if (in_reply_to)
2508 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2591 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
@@ -2541,7 +2624,7 @@ static int binder_translate_fd(u32 *fdp,
2541 goto err_alloc; 2624 goto err_alloc;
2542 } 2625 }
2543 fixup->file = file; 2626 fixup->file = file;
2544 fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; 2627 fixup->offset = fd_offset;
2545 trace_binder_transaction_fd_send(t, fd, fixup->offset); 2628 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2546 list_add_tail(&fixup->fixup_entry, &t->fd_fixups); 2629 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2547 2630
@@ -2562,8 +2645,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2562 struct binder_transaction *in_reply_to) 2645 struct binder_transaction *in_reply_to)
2563{ 2646{
2564 binder_size_t fdi, fd_buf_size; 2647 binder_size_t fdi, fd_buf_size;
2565 uintptr_t parent_buffer; 2648 binder_size_t fda_offset;
2566 u32 *fd_array;
2567 struct binder_proc *proc = thread->proc; 2649 struct binder_proc *proc = thread->proc;
2568 struct binder_proc *target_proc = t->to_proc; 2650 struct binder_proc *target_proc = t->to_proc;
2569 2651
@@ -2581,20 +2663,29 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2581 return -EINVAL; 2663 return -EINVAL;
2582 } 2664 }
2583 /* 2665 /*
2584 * Since the parent was already fixed up, convert it 2666 * the source data for binder_buffer_object is visible
2585 * back to the kernel address space to access it 2667 * to user-space and the @buffer element is the user
2668 * pointer to the buffer_object containing the fd_array.
2669 * Convert the address to an offset relative to
2670 * the base of the transaction buffer.
2586 */ 2671 */
2587 parent_buffer = parent->buffer - 2672 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2588 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2673 fda->parent_offset;
2589 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2674 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2590 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2591 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2675 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2592 proc->pid, thread->pid); 2676 proc->pid, thread->pid);
2593 return -EINVAL; 2677 return -EINVAL;
2594 } 2678 }
2595 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2679 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2596 int ret = binder_translate_fd(&fd_array[fdi], t, thread, 2680 u32 fd;
2597 in_reply_to); 2681 int ret;
2682 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2683
2684 binder_alloc_copy_from_buffer(&target_proc->alloc,
2685 &fd, t->buffer,
2686 offset, sizeof(fd));
2687 ret = binder_translate_fd(fd, offset, t, thread,
2688 in_reply_to);
2598 if (ret < 0) 2689 if (ret < 0)
2599 return ret; 2690 return ret;
2600 } 2691 }
@@ -2604,30 +2695,34 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2604static int binder_fixup_parent(struct binder_transaction *t, 2695static int binder_fixup_parent(struct binder_transaction *t,
2605 struct binder_thread *thread, 2696 struct binder_thread *thread,
2606 struct binder_buffer_object *bp, 2697 struct binder_buffer_object *bp,
2607 binder_size_t *off_start, 2698 binder_size_t off_start_offset,
2608 binder_size_t num_valid, 2699 binder_size_t num_valid,
2609 struct binder_buffer_object *last_fixup_obj, 2700 binder_size_t last_fixup_obj_off,
2610 binder_size_t last_fixup_min_off) 2701 binder_size_t last_fixup_min_off)
2611{ 2702{
2612 struct binder_buffer_object *parent; 2703 struct binder_buffer_object *parent;
2613 u8 *parent_buffer;
2614 struct binder_buffer *b = t->buffer; 2704 struct binder_buffer *b = t->buffer;
2615 struct binder_proc *proc = thread->proc; 2705 struct binder_proc *proc = thread->proc;
2616 struct binder_proc *target_proc = t->to_proc; 2706 struct binder_proc *target_proc = t->to_proc;
2707 struct binder_object object;
2708 binder_size_t buffer_offset;
2709 binder_size_t parent_offset;
2617 2710
2618 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 2711 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2619 return 0; 2712 return 0;
2620 2713
2621 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 2714 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2715 off_start_offset, &parent_offset,
2716 num_valid);
2622 if (!parent) { 2717 if (!parent) {
2623 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2718 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2624 proc->pid, thread->pid); 2719 proc->pid, thread->pid);
2625 return -EINVAL; 2720 return -EINVAL;
2626 } 2721 }
2627 2722
2628 if (!binder_validate_fixup(b, off_start, 2723 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2629 parent, bp->parent_offset, 2724 parent_offset, bp->parent_offset,
2630 last_fixup_obj, 2725 last_fixup_obj_off,
2631 last_fixup_min_off)) { 2726 last_fixup_min_off)) {
2632 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2727 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2633 proc->pid, thread->pid); 2728 proc->pid, thread->pid);
@@ -2641,10 +2736,10 @@ static int binder_fixup_parent(struct binder_transaction *t,
2641 proc->pid, thread->pid); 2736 proc->pid, thread->pid);
2642 return -EINVAL; 2737 return -EINVAL;
2643 } 2738 }
2644 parent_buffer = (u8 *)((uintptr_t)parent->buffer - 2739 buffer_offset = bp->parent_offset +
2645 binder_alloc_get_user_buffer_offset( 2740 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2646 &target_proc->alloc)); 2741 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2647 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2742 &bp->buffer, sizeof(bp->buffer));
2648 2743
2649 return 0; 2744 return 0;
2650} 2745}
@@ -2763,9 +2858,10 @@ static void binder_transaction(struct binder_proc *proc,
2763 struct binder_transaction *t; 2858 struct binder_transaction *t;
2764 struct binder_work *w; 2859 struct binder_work *w;
2765 struct binder_work *tcomplete; 2860 struct binder_work *tcomplete;
2766 binder_size_t *offp, *off_end, *off_start; 2861 binder_size_t buffer_offset = 0;
2862 binder_size_t off_start_offset, off_end_offset;
2767 binder_size_t off_min; 2863 binder_size_t off_min;
2768 u8 *sg_bufp, *sg_buf_end; 2864 binder_size_t sg_buf_offset, sg_buf_end_offset;
2769 struct binder_proc *target_proc = NULL; 2865 struct binder_proc *target_proc = NULL;
2770 struct binder_thread *target_thread = NULL; 2866 struct binder_thread *target_thread = NULL;
2771 struct binder_node *target_node = NULL; 2867 struct binder_node *target_node = NULL;
@@ -2774,10 +2870,12 @@ static void binder_transaction(struct binder_proc *proc,
2774 uint32_t return_error = 0; 2870 uint32_t return_error = 0;
2775 uint32_t return_error_param = 0; 2871 uint32_t return_error_param = 0;
2776 uint32_t return_error_line = 0; 2872 uint32_t return_error_line = 0;
2777 struct binder_buffer_object *last_fixup_obj = NULL; 2873 binder_size_t last_fixup_obj_off = 0;
2778 binder_size_t last_fixup_min_off = 0; 2874 binder_size_t last_fixup_min_off = 0;
2779 struct binder_context *context = proc->context; 2875 struct binder_context *context = proc->context;
2780 int t_debug_id = atomic_inc_return(&binder_last_id); 2876 int t_debug_id = atomic_inc_return(&binder_last_id);
2877 char *secctx = NULL;
2878 u32 secctx_sz = 0;
2781 2879
2782 e = binder_transaction_log_add(&binder_transaction_log); 2880 e = binder_transaction_log_add(&binder_transaction_log);
2783 e->debug_id = t_debug_id; 2881 e->debug_id = t_debug_id;
@@ -3020,6 +3118,20 @@ static void binder_transaction(struct binder_proc *proc,
3020 t->flags = tr->flags; 3118 t->flags = tr->flags;
3021 t->priority = task_nice(current); 3119 t->priority = task_nice(current);
3022 3120
3121 if (target_node && target_node->txn_security_ctx) {
3122 u32 secid;
3123
3124 security_task_getsecid(proc->tsk, &secid);
3125 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3126 if (ret) {
3127 return_error = BR_FAILED_REPLY;
3128 return_error_param = ret;
3129 return_error_line = __LINE__;
3130 goto err_get_secctx_failed;
3131 }
3132 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
3133 }
3134
3023 trace_binder_transaction(reply, t, target_node); 3135 trace_binder_transaction(reply, t, target_node);
3024 3136
3025 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, 3137 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3036,16 +3148,30 @@ static void binder_transaction(struct binder_proc *proc,
3036 t->buffer = NULL; 3148 t->buffer = NULL;
3037 goto err_binder_alloc_buf_failed; 3149 goto err_binder_alloc_buf_failed;
3038 } 3150 }
3151 if (secctx) {
3152 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3153 ALIGN(tr->offsets_size, sizeof(void *)) +
3154 ALIGN(extra_buffers_size, sizeof(void *)) -
3155 ALIGN(secctx_sz, sizeof(u64));
3156
3157 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3158 binder_alloc_copy_to_buffer(&target_proc->alloc,
3159 t->buffer, buf_offset,
3160 secctx, secctx_sz);
3161 security_release_secctx(secctx, secctx_sz);
3162 secctx = NULL;
3163 }
3039 t->buffer->debug_id = t->debug_id; 3164 t->buffer->debug_id = t->debug_id;
3040 t->buffer->transaction = t; 3165 t->buffer->transaction = t;
3041 t->buffer->target_node = target_node; 3166 t->buffer->target_node = target_node;
3042 trace_binder_transaction_alloc_buf(t->buffer); 3167 trace_binder_transaction_alloc_buf(t->buffer);
3043 off_start = (binder_size_t *)(t->buffer->data +
3044 ALIGN(tr->data_size, sizeof(void *)));
3045 offp = off_start;
3046 3168
3047 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 3169 if (binder_alloc_copy_user_to_buffer(
3048 tr->data.ptr.buffer, tr->data_size)) { 3170 &target_proc->alloc,
3171 t->buffer, 0,
3172 (const void __user *)
3173 (uintptr_t)tr->data.ptr.buffer,
3174 tr->data_size)) {
3049 binder_user_error("%d:%d got transaction with invalid data ptr\n", 3175 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3050 proc->pid, thread->pid); 3176 proc->pid, thread->pid);
3051 return_error = BR_FAILED_REPLY; 3177 return_error = BR_FAILED_REPLY;
@@ -3053,8 +3179,13 @@ static void binder_transaction(struct binder_proc *proc,
3053 return_error_line = __LINE__; 3179 return_error_line = __LINE__;
3054 goto err_copy_data_failed; 3180 goto err_copy_data_failed;
3055 } 3181 }
3056 if (copy_from_user(offp, (const void __user *)(uintptr_t) 3182 if (binder_alloc_copy_user_to_buffer(
3057 tr->data.ptr.offsets, tr->offsets_size)) { 3183 &target_proc->alloc,
3184 t->buffer,
3185 ALIGN(tr->data_size, sizeof(void *)),
3186 (const void __user *)
3187 (uintptr_t)tr->data.ptr.offsets,
3188 tr->offsets_size)) {
3058 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3189 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3059 proc->pid, thread->pid); 3190 proc->pid, thread->pid);
3060 return_error = BR_FAILED_REPLY; 3191 return_error = BR_FAILED_REPLY;
@@ -3079,17 +3210,30 @@ static void binder_transaction(struct binder_proc *proc,
3079 return_error_line = __LINE__; 3210 return_error_line = __LINE__;
3080 goto err_bad_offset; 3211 goto err_bad_offset;
3081 } 3212 }
3082 off_end = (void *)off_start + tr->offsets_size; 3213 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3083 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 3214 buffer_offset = off_start_offset;
3084 sg_buf_end = sg_bufp + extra_buffers_size; 3215 off_end_offset = off_start_offset + tr->offsets_size;
3216 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3217 sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
3085 off_min = 0; 3218 off_min = 0;
3086 for (; offp < off_end; offp++) { 3219 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3220 buffer_offset += sizeof(binder_size_t)) {
3087 struct binder_object_header *hdr; 3221 struct binder_object_header *hdr;
3088 size_t object_size = binder_validate_object(t->buffer, *offp); 3222 size_t object_size;
3089 3223 struct binder_object object;
3090 if (object_size == 0 || *offp < off_min) { 3224 binder_size_t object_offset;
3225
3226 binder_alloc_copy_from_buffer(&target_proc->alloc,
3227 &object_offset,
3228 t->buffer,
3229 buffer_offset,
3230 sizeof(object_offset));
3231 object_size = binder_get_object(target_proc, t->buffer,
3232 object_offset, &object);
3233 if (object_size == 0 || object_offset < off_min) {
3091 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 3234 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3092 proc->pid, thread->pid, (u64)*offp, 3235 proc->pid, thread->pid,
3236 (u64)object_offset,
3093 (u64)off_min, 3237 (u64)off_min,
3094 (u64)t->buffer->data_size); 3238 (u64)t->buffer->data_size);
3095 return_error = BR_FAILED_REPLY; 3239 return_error = BR_FAILED_REPLY;
@@ -3098,8 +3242,8 @@ static void binder_transaction(struct binder_proc *proc,
3098 goto err_bad_offset; 3242 goto err_bad_offset;
3099 } 3243 }
3100 3244
3101 hdr = (struct binder_object_header *)(t->buffer->data + *offp); 3245 hdr = &object.hdr;
3102 off_min = *offp + object_size; 3246 off_min = object_offset + object_size;
3103 switch (hdr->type) { 3247 switch (hdr->type) {
3104 case BINDER_TYPE_BINDER: 3248 case BINDER_TYPE_BINDER:
3105 case BINDER_TYPE_WEAK_BINDER: { 3249 case BINDER_TYPE_WEAK_BINDER: {
@@ -3113,6 +3257,9 @@ static void binder_transaction(struct binder_proc *proc,
3113 return_error_line = __LINE__; 3257 return_error_line = __LINE__;
3114 goto err_translate_failed; 3258 goto err_translate_failed;
3115 } 3259 }
3260 binder_alloc_copy_to_buffer(&target_proc->alloc,
3261 t->buffer, object_offset,
3262 fp, sizeof(*fp));
3116 } break; 3263 } break;
3117 case BINDER_TYPE_HANDLE: 3264 case BINDER_TYPE_HANDLE:
3118 case BINDER_TYPE_WEAK_HANDLE: { 3265 case BINDER_TYPE_WEAK_HANDLE: {
@@ -3126,12 +3273,17 @@ static void binder_transaction(struct binder_proc *proc,
3126 return_error_line = __LINE__; 3273 return_error_line = __LINE__;
3127 goto err_translate_failed; 3274 goto err_translate_failed;
3128 } 3275 }
3276 binder_alloc_copy_to_buffer(&target_proc->alloc,
3277 t->buffer, object_offset,
3278 fp, sizeof(*fp));
3129 } break; 3279 } break;
3130 3280
3131 case BINDER_TYPE_FD: { 3281 case BINDER_TYPE_FD: {
3132 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3282 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3133 int ret = binder_translate_fd(&fp->fd, t, thread, 3283 binder_size_t fd_offset = object_offset +
3134 in_reply_to); 3284 (uintptr_t)&fp->fd - (uintptr_t)fp;
3285 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3286 thread, in_reply_to);
3135 3287
3136 if (ret < 0) { 3288 if (ret < 0) {
3137 return_error = BR_FAILED_REPLY; 3289 return_error = BR_FAILED_REPLY;
@@ -3140,14 +3292,23 @@ static void binder_transaction(struct binder_proc *proc,
3140 goto err_translate_failed; 3292 goto err_translate_failed;
3141 } 3293 }
3142 fp->pad_binder = 0; 3294 fp->pad_binder = 0;
3295 binder_alloc_copy_to_buffer(&target_proc->alloc,
3296 t->buffer, object_offset,
3297 fp, sizeof(*fp));
3143 } break; 3298 } break;
3144 case BINDER_TYPE_FDA: { 3299 case BINDER_TYPE_FDA: {
3300 struct binder_object ptr_object;
3301 binder_size_t parent_offset;
3145 struct binder_fd_array_object *fda = 3302 struct binder_fd_array_object *fda =
3146 to_binder_fd_array_object(hdr); 3303 to_binder_fd_array_object(hdr);
3304 size_t num_valid = (buffer_offset - off_start_offset) *
3305 sizeof(binder_size_t);
3147 struct binder_buffer_object *parent = 3306 struct binder_buffer_object *parent =
3148 binder_validate_ptr(t->buffer, fda->parent, 3307 binder_validate_ptr(target_proc, t->buffer,
3149 off_start, 3308 &ptr_object, fda->parent,
3150 offp - off_start); 3309 off_start_offset,
3310 &parent_offset,
3311 num_valid);
3151 if (!parent) { 3312 if (!parent) {
3152 binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 3313 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3153 proc->pid, thread->pid); 3314 proc->pid, thread->pid);
@@ -3156,9 +3317,11 @@ static void binder_transaction(struct binder_proc *proc,
3156 return_error_line = __LINE__; 3317 return_error_line = __LINE__;
3157 goto err_bad_parent; 3318 goto err_bad_parent;
3158 } 3319 }
3159 if (!binder_validate_fixup(t->buffer, off_start, 3320 if (!binder_validate_fixup(target_proc, t->buffer,
3160 parent, fda->parent_offset, 3321 off_start_offset,
3161 last_fixup_obj, 3322 parent_offset,
3323 fda->parent_offset,
3324 last_fixup_obj_off,
3162 last_fixup_min_off)) { 3325 last_fixup_min_off)) {
3163 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 3326 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3164 proc->pid, thread->pid); 3327 proc->pid, thread->pid);
@@ -3175,14 +3338,15 @@ static void binder_transaction(struct binder_proc *proc,
3175 return_error_line = __LINE__; 3338 return_error_line = __LINE__;
3176 goto err_translate_failed; 3339 goto err_translate_failed;
3177 } 3340 }
3178 last_fixup_obj = parent; 3341 last_fixup_obj_off = parent_offset;
3179 last_fixup_min_off = 3342 last_fixup_min_off =
3180 fda->parent_offset + sizeof(u32) * fda->num_fds; 3343 fda->parent_offset + sizeof(u32) * fda->num_fds;
3181 } break; 3344 } break;
3182 case BINDER_TYPE_PTR: { 3345 case BINDER_TYPE_PTR: {
3183 struct binder_buffer_object *bp = 3346 struct binder_buffer_object *bp =
3184 to_binder_buffer_object(hdr); 3347 to_binder_buffer_object(hdr);
3185 size_t buf_left = sg_buf_end - sg_bufp; 3348 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3349 size_t num_valid;
3186 3350
3187 if (bp->length > buf_left) { 3351 if (bp->length > buf_left) {
3188 binder_user_error("%d:%d got transaction with too large buffer\n", 3352 binder_user_error("%d:%d got transaction with too large buffer\n",
@@ -3192,9 +3356,13 @@ static void binder_transaction(struct binder_proc *proc,
3192 return_error_line = __LINE__; 3356 return_error_line = __LINE__;
3193 goto err_bad_offset; 3357 goto err_bad_offset;
3194 } 3358 }
3195 if (copy_from_user(sg_bufp, 3359 if (binder_alloc_copy_user_to_buffer(
3196 (const void __user *)(uintptr_t) 3360 &target_proc->alloc,
3197 bp->buffer, bp->length)) { 3361 t->buffer,
3362 sg_buf_offset,
3363 (const void __user *)
3364 (uintptr_t)bp->buffer,
3365 bp->length)) {
3198 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 3366 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3199 proc->pid, thread->pid); 3367 proc->pid, thread->pid);
3200 return_error_param = -EFAULT; 3368 return_error_param = -EFAULT;
@@ -3203,14 +3371,16 @@ static void binder_transaction(struct binder_proc *proc,
3203 goto err_copy_data_failed; 3371 goto err_copy_data_failed;
3204 } 3372 }
3205 /* Fixup buffer pointer to target proc address space */ 3373 /* Fixup buffer pointer to target proc address space */
3206 bp->buffer = (uintptr_t)sg_bufp + 3374 bp->buffer = (uintptr_t)
3207 binder_alloc_get_user_buffer_offset( 3375 t->buffer->user_data + sg_buf_offset;
3208 &target_proc->alloc); 3376 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3209 sg_bufp += ALIGN(bp->length, sizeof(u64)); 3377
3210 3378 num_valid = (buffer_offset - off_start_offset) *
3211 ret = binder_fixup_parent(t, thread, bp, off_start, 3379 sizeof(binder_size_t);
3212 offp - off_start, 3380 ret = binder_fixup_parent(t, thread, bp,
3213 last_fixup_obj, 3381 off_start_offset,
3382 num_valid,
3383 last_fixup_obj_off,
3214 last_fixup_min_off); 3384 last_fixup_min_off);
3215 if (ret < 0) { 3385 if (ret < 0) {
3216 return_error = BR_FAILED_REPLY; 3386 return_error = BR_FAILED_REPLY;
@@ -3218,7 +3388,10 @@ static void binder_transaction(struct binder_proc *proc,
3218 return_error_line = __LINE__; 3388 return_error_line = __LINE__;
3219 goto err_translate_failed; 3389 goto err_translate_failed;
3220 } 3390 }
3221 last_fixup_obj = bp; 3391 binder_alloc_copy_to_buffer(&target_proc->alloc,
3392 t->buffer, object_offset,
3393 bp, sizeof(*bp));
3394 last_fixup_obj_off = object_offset;
3222 last_fixup_min_off = 0; 3395 last_fixup_min_off = 0;
3223 } break; 3396 } break;
3224 default: 3397 default:
@@ -3298,13 +3471,17 @@ err_bad_parent:
3298err_copy_data_failed: 3471err_copy_data_failed:
3299 binder_free_txn_fixups(t); 3472 binder_free_txn_fixups(t);
3300 trace_binder_transaction_failed_buffer_release(t->buffer); 3473 trace_binder_transaction_failed_buffer_release(t->buffer);
3301 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3474 binder_transaction_buffer_release(target_proc, t->buffer,
3475 buffer_offset, true);
3302 if (target_node) 3476 if (target_node)
3303 binder_dec_node_tmpref(target_node); 3477 binder_dec_node_tmpref(target_node);
3304 target_node = NULL; 3478 target_node = NULL;
3305 t->buffer->transaction = NULL; 3479 t->buffer->transaction = NULL;
3306 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3480 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3307err_binder_alloc_buf_failed: 3481err_binder_alloc_buf_failed:
3482 if (secctx)
3483 security_release_secctx(secctx, secctx_sz);
3484err_get_secctx_failed:
3308 kfree(tcomplete); 3485 kfree(tcomplete);
3309 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3486 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3310err_alloc_tcomplete_failed: 3487err_alloc_tcomplete_failed:
@@ -3396,7 +3573,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3396 binder_node_inner_unlock(buf_node); 3573 binder_node_inner_unlock(buf_node);
3397 } 3574 }
3398 trace_binder_transaction_buffer_release(buffer); 3575 trace_binder_transaction_buffer_release(buffer);
3399 binder_transaction_buffer_release(proc, buffer, NULL); 3576 binder_transaction_buffer_release(proc, buffer, 0, false);
3400 binder_alloc_free_buf(&proc->alloc, buffer); 3577 binder_alloc_free_buf(&proc->alloc, buffer);
3401} 3578}
3402 3579
@@ -3915,6 +4092,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
3915 4092
3916/** 4093/**
3917 * binder_apply_fd_fixups() - finish fd translation 4094 * binder_apply_fd_fixups() - finish fd translation
4095 * @proc: binder_proc associated @t->buffer
3918 * @t: binder transaction with list of fd fixups 4096 * @t: binder transaction with list of fd fixups
3919 * 4097 *
3920 * Now that we are in the context of the transaction target 4098 * Now that we are in the context of the transaction target
@@ -3926,14 +4104,14 @@ static int binder_wait_for_work(struct binder_thread *thread,
3926 * fput'ing files that have not been processed and ksys_close'ing 4104 * fput'ing files that have not been processed and ksys_close'ing
3927 * any fds that have already been allocated. 4105 * any fds that have already been allocated.
3928 */ 4106 */
3929static int binder_apply_fd_fixups(struct binder_transaction *t) 4107static int binder_apply_fd_fixups(struct binder_proc *proc,
4108 struct binder_transaction *t)
3930{ 4109{
3931 struct binder_txn_fd_fixup *fixup, *tmp; 4110 struct binder_txn_fd_fixup *fixup, *tmp;
3932 int ret = 0; 4111 int ret = 0;
3933 4112
3934 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { 4113 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3935 int fd = get_unused_fd_flags(O_CLOEXEC); 4114 int fd = get_unused_fd_flags(O_CLOEXEC);
3936 u32 *fdp;
3937 4115
3938 if (fd < 0) { 4116 if (fd < 0) {
3939 binder_debug(BINDER_DEBUG_TRANSACTION, 4117 binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -3948,33 +4126,20 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
3948 trace_binder_transaction_fd_recv(t, fd, fixup->offset); 4126 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3949 fd_install(fd, fixup->file); 4127 fd_install(fd, fixup->file);
3950 fixup->file = NULL; 4128 fixup->file = NULL;
3951 fdp = (u32 *)(t->buffer->data + fixup->offset); 4129 binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3952 /* 4130 fixup->offset, &fd,
3953 * This store can cause problems for CPUs with a 4131 sizeof(u32));
3954 * VIVT cache (eg ARMv5) since the cache cannot
3955 * detect virtual aliases to the same physical cacheline.
3956 * To support VIVT, this address and the user-space VA
3957 * would both need to be flushed. Since this kernel
3958 * VA is not constructed via page_to_virt(), we can't
3959 * use flush_dcache_page() on it, so we'd have to use
3960 * an internal function. If devices with VIVT ever
3961 * need to run Android, we'll either need to go back
3962 * to patching the translated fd from the sender side
3963 * (using the non-standard kernel functions), or rework
3964 * how the kernel uses the buffer to use page_to_virt()
3965 * addresses instead of allocating in our own vm area.
3966 *
3967 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3968 */
3969 *fdp = fd;
3970 } 4132 }
3971 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { 4133 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3972 if (fixup->file) { 4134 if (fixup->file) {
3973 fput(fixup->file); 4135 fput(fixup->file);
3974 } else if (ret) { 4136 } else if (ret) {
3975 u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); 4137 u32 fd;
3976 4138
3977 binder_deferred_fd_close(*fdp); 4139 binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4140 t->buffer, fixup->offset,
4141 sizeof(fd));
4142 binder_deferred_fd_close(fd);
3978 } 4143 }
3979 list_del(&fixup->fixup_entry); 4144 list_del(&fixup->fixup_entry);
3980 kfree(fixup); 4145 kfree(fixup);
@@ -4036,11 +4201,13 @@ retry:
4036 4201
4037 while (1) { 4202 while (1) {
4038 uint32_t cmd; 4203 uint32_t cmd;
4039 struct binder_transaction_data tr; 4204 struct binder_transaction_data_secctx tr;
4205 struct binder_transaction_data *trd = &tr.transaction_data;
4040 struct binder_work *w = NULL; 4206 struct binder_work *w = NULL;
4041 struct list_head *list = NULL; 4207 struct list_head *list = NULL;
4042 struct binder_transaction *t = NULL; 4208 struct binder_transaction *t = NULL;
4043 struct binder_thread *t_from; 4209 struct binder_thread *t_from;
4210 size_t trsize = sizeof(*trd);
4044 4211
4045 binder_inner_proc_lock(proc); 4212 binder_inner_proc_lock(proc);
4046 if (!binder_worklist_empty_ilocked(&thread->todo)) 4213 if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4240,8 +4407,8 @@ retry:
4240 if (t->buffer->target_node) { 4407 if (t->buffer->target_node) {
4241 struct binder_node *target_node = t->buffer->target_node; 4408 struct binder_node *target_node = t->buffer->target_node;
4242 4409
4243 tr.target.ptr = target_node->ptr; 4410 trd->target.ptr = target_node->ptr;
4244 tr.cookie = target_node->cookie; 4411 trd->cookie = target_node->cookie;
4245 t->saved_priority = task_nice(current); 4412 t->saved_priority = task_nice(current);
4246 if (t->priority < target_node->min_priority && 4413 if (t->priority < target_node->min_priority &&
4247 !(t->flags & TF_ONE_WAY)) 4414 !(t->flags & TF_ONE_WAY))
@@ -4251,25 +4418,26 @@ retry:
4251 binder_set_nice(target_node->min_priority); 4418 binder_set_nice(target_node->min_priority);
4252 cmd = BR_TRANSACTION; 4419 cmd = BR_TRANSACTION;
4253 } else { 4420 } else {
4254 tr.target.ptr = 0; 4421 trd->target.ptr = 0;
4255 tr.cookie = 0; 4422 trd->cookie = 0;
4256 cmd = BR_REPLY; 4423 cmd = BR_REPLY;
4257 } 4424 }
4258 tr.code = t->code; 4425 trd->code = t->code;
4259 tr.flags = t->flags; 4426 trd->flags = t->flags;
4260 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 4427 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4261 4428
4262 t_from = binder_get_txn_from(t); 4429 t_from = binder_get_txn_from(t);
4263 if (t_from) { 4430 if (t_from) {
4264 struct task_struct *sender = t_from->proc->tsk; 4431 struct task_struct *sender = t_from->proc->tsk;
4265 4432
4266 tr.sender_pid = task_tgid_nr_ns(sender, 4433 trd->sender_pid =
4267 task_active_pid_ns(current)); 4434 task_tgid_nr_ns(sender,
4435 task_active_pid_ns(current));
4268 } else { 4436 } else {
4269 tr.sender_pid = 0; 4437 trd->sender_pid = 0;
4270 } 4438 }
4271 4439
4272 ret = binder_apply_fd_fixups(t); 4440 ret = binder_apply_fd_fixups(proc, t);
4273 if (ret) { 4441 if (ret) {
4274 struct binder_buffer *buffer = t->buffer; 4442 struct binder_buffer *buffer = t->buffer;
4275 bool oneway = !!(t->flags & TF_ONE_WAY); 4443 bool oneway = !!(t->flags & TF_ONE_WAY);
@@ -4297,15 +4465,18 @@ retry:
4297 } 4465 }
4298 continue; 4466 continue;
4299 } 4467 }
4300 tr.data_size = t->buffer->data_size; 4468 trd->data_size = t->buffer->data_size;
4301 tr.offsets_size = t->buffer->offsets_size; 4469 trd->offsets_size = t->buffer->offsets_size;
4302 tr.data.ptr.buffer = (binder_uintptr_t) 4470 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4303 ((uintptr_t)t->buffer->data + 4471 trd->data.ptr.offsets = trd->data.ptr.buffer +
4304 binder_alloc_get_user_buffer_offset(&proc->alloc));
4305 tr.data.ptr.offsets = tr.data.ptr.buffer +
4306 ALIGN(t->buffer->data_size, 4472 ALIGN(t->buffer->data_size,
4307 sizeof(void *)); 4473 sizeof(void *));
4308 4474
4475 tr.secctx = t->security_ctx;
4476 if (t->security_ctx) {
4477 cmd = BR_TRANSACTION_SEC_CTX;
4478 trsize = sizeof(tr);
4479 }
4309 if (put_user(cmd, (uint32_t __user *)ptr)) { 4480 if (put_user(cmd, (uint32_t __user *)ptr)) {
4310 if (t_from) 4481 if (t_from)
4311 binder_thread_dec_tmpref(t_from); 4482 binder_thread_dec_tmpref(t_from);
@@ -4316,7 +4487,7 @@ retry:
4316 return -EFAULT; 4487 return -EFAULT;
4317 } 4488 }
4318 ptr += sizeof(uint32_t); 4489 ptr += sizeof(uint32_t);
4319 if (copy_to_user(ptr, &tr, sizeof(tr))) { 4490 if (copy_to_user(ptr, &tr, trsize)) {
4320 if (t_from) 4491 if (t_from)
4321 binder_thread_dec_tmpref(t_from); 4492 binder_thread_dec_tmpref(t_from);
4322 4493
@@ -4325,7 +4496,7 @@ retry:
4325 4496
4326 return -EFAULT; 4497 return -EFAULT;
4327 } 4498 }
4328 ptr += sizeof(tr); 4499 ptr += trsize;
4329 4500
4330 trace_binder_transaction_received(t); 4501 trace_binder_transaction_received(t);
4331 binder_stat_br(proc, thread, cmd); 4502 binder_stat_br(proc, thread, cmd);
@@ -4333,16 +4504,18 @@ retry:
4333 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 4504 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4334 proc->pid, thread->pid, 4505 proc->pid, thread->pid,
4335 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 4506 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4336 "BR_REPLY", 4507 (cmd == BR_TRANSACTION_SEC_CTX) ?
4508 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4337 t->debug_id, t_from ? t_from->proc->pid : 0, 4509 t->debug_id, t_from ? t_from->proc->pid : 0,
4338 t_from ? t_from->pid : 0, cmd, 4510 t_from ? t_from->pid : 0, cmd,
4339 t->buffer->data_size, t->buffer->offsets_size, 4511 t->buffer->data_size, t->buffer->offsets_size,
4340 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 4512 (u64)trd->data.ptr.buffer,
4513 (u64)trd->data.ptr.offsets);
4341 4514
4342 if (t_from) 4515 if (t_from)
4343 binder_thread_dec_tmpref(t_from); 4516 binder_thread_dec_tmpref(t_from);
4344 t->buffer->allow_user_free = 1; 4517 t->buffer->allow_user_free = 1;
4345 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 4518 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4346 binder_inner_proc_lock(thread->proc); 4519 binder_inner_proc_lock(thread->proc);
4347 t->to_parent = thread->transaction_stack; 4520 t->to_parent = thread->transaction_stack;
4348 t->to_thread = thread; 4521 t->to_thread = thread;
@@ -4690,7 +4863,8 @@ out:
4690 return ret; 4863 return ret;
4691} 4864}
4692 4865
4693static int binder_ioctl_set_ctx_mgr(struct file *filp) 4866static int binder_ioctl_set_ctx_mgr(struct file *filp,
4867 struct flat_binder_object *fbo)
4694{ 4868{
4695 int ret = 0; 4869 int ret = 0;
4696 struct binder_proc *proc = filp->private_data; 4870 struct binder_proc *proc = filp->private_data;
@@ -4719,7 +4893,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
4719 } else { 4893 } else {
4720 context->binder_context_mgr_uid = curr_euid; 4894 context->binder_context_mgr_uid = curr_euid;
4721 } 4895 }
4722 new_node = binder_new_node(proc, NULL); 4896 new_node = binder_new_node(proc, fbo);
4723 if (!new_node) { 4897 if (!new_node) {
4724 ret = -ENOMEM; 4898 ret = -ENOMEM;
4725 goto out; 4899 goto out;
@@ -4842,8 +5016,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4842 binder_inner_proc_unlock(proc); 5016 binder_inner_proc_unlock(proc);
4843 break; 5017 break;
4844 } 5018 }
5019 case BINDER_SET_CONTEXT_MGR_EXT: {
5020 struct flat_binder_object fbo;
5021
5022 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5023 ret = -EINVAL;
5024 goto err;
5025 }
5026 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5027 if (ret)
5028 goto err;
5029 break;
5030 }
4845 case BINDER_SET_CONTEXT_MGR: 5031 case BINDER_SET_CONTEXT_MGR:
4846 ret = binder_ioctl_set_ctx_mgr(filp); 5032 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4847 if (ret) 5033 if (ret)
4848 goto err; 5034 goto err;
4849 break; 5035 break;
@@ -5319,7 +5505,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
5319 seq_printf(m, " node %d", buffer->target_node->debug_id); 5505 seq_printf(m, " node %d", buffer->target_node->debug_id);
5320 seq_printf(m, " size %zd:%zd data %pK\n", 5506 seq_printf(m, " size %zd:%zd data %pK\n",
5321 buffer->data_size, buffer->offsets_size, 5507 buffer->data_size, buffer->offsets_size,
5322 buffer->data); 5508 buffer->user_data);
5323} 5509}
5324 5510
5325static void print_binder_work_ilocked(struct seq_file *m, 5511static void print_binder_work_ilocked(struct seq_file *m,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 022cd80e80cc..6389467670a0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -29,6 +29,8 @@
29#include <linux/list_lru.h> 29#include <linux/list_lru.h>
30#include <linux/ratelimit.h> 30#include <linux/ratelimit.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <linux/uaccess.h>
33#include <linux/highmem.h>
32#include "binder_alloc.h" 34#include "binder_alloc.h"
33#include "binder_trace.h" 35#include "binder_trace.h"
34 36
@@ -67,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
67 struct binder_buffer *buffer) 69 struct binder_buffer *buffer)
68{ 70{
69 if (list_is_last(&buffer->entry, &alloc->buffers)) 71 if (list_is_last(&buffer->entry, &alloc->buffers))
70 return (u8 *)alloc->buffer + 72 return alloc->buffer + alloc->buffer_size - buffer->user_data;
71 alloc->buffer_size - (u8 *)buffer->data; 73 return binder_buffer_next(buffer)->user_data - buffer->user_data;
72 return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
73} 74}
74 75
75static void binder_insert_free_buffer(struct binder_alloc *alloc, 76static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -119,9 +120,9 @@ static void binder_insert_allocated_buffer_locked(
119 buffer = rb_entry(parent, struct binder_buffer, rb_node); 120 buffer = rb_entry(parent, struct binder_buffer, rb_node);
120 BUG_ON(buffer->free); 121 BUG_ON(buffer->free);
121 122
122 if (new_buffer->data < buffer->data) 123 if (new_buffer->user_data < buffer->user_data)
123 p = &parent->rb_left; 124 p = &parent->rb_left;
124 else if (new_buffer->data > buffer->data) 125 else if (new_buffer->user_data > buffer->user_data)
125 p = &parent->rb_right; 126 p = &parent->rb_right;
126 else 127 else
127 BUG(); 128 BUG();
@@ -136,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
136{ 137{
137 struct rb_node *n = alloc->allocated_buffers.rb_node; 138 struct rb_node *n = alloc->allocated_buffers.rb_node;
138 struct binder_buffer *buffer; 139 struct binder_buffer *buffer;
139 void *kern_ptr; 140 void __user *uptr;
140 141
141 kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); 142 uptr = (void __user *)user_ptr;
142 143
143 while (n) { 144 while (n) {
144 buffer = rb_entry(n, struct binder_buffer, rb_node); 145 buffer = rb_entry(n, struct binder_buffer, rb_node);
145 BUG_ON(buffer->free); 146 BUG_ON(buffer->free);
146 147
147 if (kern_ptr < buffer->data) 148 if (uptr < buffer->user_data)
148 n = n->rb_left; 149 n = n->rb_left;
149 else if (kern_ptr > buffer->data) 150 else if (uptr > buffer->user_data)
150 n = n->rb_right; 151 n = n->rb_right;
151 else { 152 else {
152 /* 153 /*
@@ -186,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
186} 187}
187 188
188static int binder_update_page_range(struct binder_alloc *alloc, int allocate, 189static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
189 void *start, void *end) 190 void __user *start, void __user *end)
190{ 191{
191 void *page_addr; 192 void __user *page_addr;
192 unsigned long user_page_addr; 193 unsigned long user_page_addr;
193 struct binder_lru_page *page; 194 struct binder_lru_page *page;
194 struct vm_area_struct *vma = NULL; 195 struct vm_area_struct *vma = NULL;
@@ -263,18 +264,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
263 page->alloc = alloc; 264 page->alloc = alloc;
264 INIT_LIST_HEAD(&page->lru); 265 INIT_LIST_HEAD(&page->lru);
265 266
266 ret = map_kernel_range_noflush((unsigned long)page_addr, 267 user_page_addr = (uintptr_t)page_addr;
267 PAGE_SIZE, PAGE_KERNEL,
268 &page->page_ptr);
269 flush_cache_vmap((unsigned long)page_addr,
270 (unsigned long)page_addr + PAGE_SIZE);
271 if (ret != 1) {
272 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
273 alloc->pid, page_addr);
274 goto err_map_kernel_failed;
275 }
276 user_page_addr =
277 (uintptr_t)page_addr + alloc->user_buffer_offset;
278 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); 268 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
279 if (ret) { 269 if (ret) {
280 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 270 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -312,8 +302,6 @@ free_range:
312 continue; 302 continue;
313 303
314err_vm_insert_page_failed: 304err_vm_insert_page_failed:
315 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
316err_map_kernel_failed:
317 __free_page(page->page_ptr); 305 __free_page(page->page_ptr);
318 page->page_ptr = NULL; 306 page->page_ptr = NULL;
319err_alloc_page_failed: 307err_alloc_page_failed:
@@ -368,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
368 struct binder_buffer *buffer; 356 struct binder_buffer *buffer;
369 size_t buffer_size; 357 size_t buffer_size;
370 struct rb_node *best_fit = NULL; 358 struct rb_node *best_fit = NULL;
371 void *has_page_addr; 359 void __user *has_page_addr;
372 void *end_page_addr; 360 void __user *end_page_addr;
373 size_t size, data_offsets_size; 361 size_t size, data_offsets_size;
374 int ret; 362 int ret;
375 363
@@ -467,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
467 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", 455 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
468 alloc->pid, size, buffer, buffer_size); 456 alloc->pid, size, buffer, buffer_size);
469 457
470 has_page_addr = 458 has_page_addr = (void __user *)
471 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 459 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
472 WARN_ON(n && buffer_size != size); 460 WARN_ON(n && buffer_size != size);
473 end_page_addr = 461 end_page_addr =
474 (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); 462 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
475 if (end_page_addr > has_page_addr) 463 if (end_page_addr > has_page_addr)
476 end_page_addr = has_page_addr; 464 end_page_addr = has_page_addr;
477 ret = binder_update_page_range(alloc, 1, 465 ret = binder_update_page_range(alloc, 1, (void __user *)
478 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); 466 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
479 if (ret) 467 if (ret)
480 return ERR_PTR(ret); 468 return ERR_PTR(ret);
481 469
@@ -488,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
488 __func__, alloc->pid); 476 __func__, alloc->pid);
489 goto err_alloc_buf_struct_failed; 477 goto err_alloc_buf_struct_failed;
490 } 478 }
491 new_buffer->data = (u8 *)buffer->data + size; 479 new_buffer->user_data = (u8 __user *)buffer->user_data + size;
492 list_add(&new_buffer->entry, &buffer->entry); 480 list_add(&new_buffer->entry, &buffer->entry);
493 new_buffer->free = 1; 481 new_buffer->free = 1;
494 binder_insert_free_buffer(alloc, new_buffer); 482 binder_insert_free_buffer(alloc, new_buffer);
@@ -514,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
514 return buffer; 502 return buffer;
515 503
516err_alloc_buf_struct_failed: 504err_alloc_buf_struct_failed:
517 binder_update_page_range(alloc, 0, 505 binder_update_page_range(alloc, 0, (void __user *)
518 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 506 PAGE_ALIGN((uintptr_t)buffer->user_data),
519 end_page_addr); 507 end_page_addr);
520 return ERR_PTR(-ENOMEM); 508 return ERR_PTR(-ENOMEM);
521} 509}
@@ -550,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
550 return buffer; 538 return buffer;
551} 539}
552 540
553static void *buffer_start_page(struct binder_buffer *buffer) 541static void __user *buffer_start_page(struct binder_buffer *buffer)
554{ 542{
555 return (void *)((uintptr_t)buffer->data & PAGE_MASK); 543 return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
556} 544}
557 545
558static void *prev_buffer_end_page(struct binder_buffer *buffer) 546static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
559{ 547{
560 return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); 548 return (void __user *)
549 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
561} 550}
562 551
563static void binder_delete_free_buffer(struct binder_alloc *alloc, 552static void binder_delete_free_buffer(struct binder_alloc *alloc,
@@ -572,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
572 to_free = false; 561 to_free = false;
573 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 562 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
574 "%d: merge free, buffer %pK share page with %pK\n", 563 "%d: merge free, buffer %pK share page with %pK\n",
575 alloc->pid, buffer->data, prev->data); 564 alloc->pid, buffer->user_data,
565 prev->user_data);
576 } 566 }
577 567
578 if (!list_is_last(&buffer->entry, &alloc->buffers)) { 568 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
@@ -582,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
582 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 572 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
583 "%d: merge free, buffer %pK share page with %pK\n", 573 "%d: merge free, buffer %pK share page with %pK\n",
584 alloc->pid, 574 alloc->pid,
585 buffer->data, 575 buffer->user_data,
586 next->data); 576 next->user_data);
587 } 577 }
588 } 578 }
589 579
590 if (PAGE_ALIGNED(buffer->data)) { 580 if (PAGE_ALIGNED(buffer->user_data)) {
591 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 581 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
592 "%d: merge free, buffer start %pK is page aligned\n", 582 "%d: merge free, buffer start %pK is page aligned\n",
593 alloc->pid, buffer->data); 583 alloc->pid, buffer->user_data);
594 to_free = false; 584 to_free = false;
595 } 585 }
596 586
597 if (to_free) { 587 if (to_free) {
598 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 588 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
599 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", 589 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
600 alloc->pid, buffer->data, 590 alloc->pid, buffer->user_data,
601 prev->data, next ? next->data : NULL); 591 prev->user_data,
592 next ? next->user_data : NULL);
602 binder_update_page_range(alloc, 0, buffer_start_page(buffer), 593 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
603 buffer_start_page(buffer) + PAGE_SIZE); 594 buffer_start_page(buffer) + PAGE_SIZE);
604 } 595 }
@@ -624,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
624 BUG_ON(buffer->free); 615 BUG_ON(buffer->free);
625 BUG_ON(size > buffer_size); 616 BUG_ON(size > buffer_size);
626 BUG_ON(buffer->transaction != NULL); 617 BUG_ON(buffer->transaction != NULL);
627 BUG_ON(buffer->data < alloc->buffer); 618 BUG_ON(buffer->user_data < alloc->buffer);
628 BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); 619 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
629 620
630 if (buffer->async_transaction) { 621 if (buffer->async_transaction) {
631 alloc->free_async_space += size + sizeof(struct binder_buffer); 622 alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -636,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
636 } 627 }
637 628
638 binder_update_page_range(alloc, 0, 629 binder_update_page_range(alloc, 0,
639 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 630 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
640 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); 631 (void __user *)(((uintptr_t)
632 buffer->user_data + buffer_size) & PAGE_MASK));
641 633
642 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); 634 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
643 buffer->free = 1; 635 buffer->free = 1;
@@ -693,7 +685,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
693 struct vm_area_struct *vma) 685 struct vm_area_struct *vma)
694{ 686{
695 int ret; 687 int ret;
696 struct vm_struct *area;
697 const char *failure_string; 688 const char *failure_string;
698 struct binder_buffer *buffer; 689 struct binder_buffer *buffer;
699 690
@@ -704,28 +695,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
704 goto err_already_mapped; 695 goto err_already_mapped;
705 } 696 }
706 697
707 area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); 698 alloc->buffer = (void __user *)vma->vm_start;
708 if (area == NULL) {
709 ret = -ENOMEM;
710 failure_string = "get_vm_area";
711 goto err_get_vm_area_failed;
712 }
713 alloc->buffer = area->addr;
714 alloc->user_buffer_offset =
715 vma->vm_start - (uintptr_t)alloc->buffer;
716 mutex_unlock(&binder_alloc_mmap_lock); 699 mutex_unlock(&binder_alloc_mmap_lock);
717 700
718#ifdef CONFIG_CPU_CACHE_VIPT
719 if (cache_is_vipt_aliasing()) {
720 while (CACHE_COLOUR(
721 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
722 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
723 __func__, alloc->pid, vma->vm_start,
724 vma->vm_end, alloc->buffer);
725 vma->vm_start += PAGE_SIZE;
726 }
727 }
728#endif
729 alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, 701 alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
730 sizeof(alloc->pages[0]), 702 sizeof(alloc->pages[0]),
731 GFP_KERNEL); 703 GFP_KERNEL);
@@ -743,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
743 goto err_alloc_buf_struct_failed; 715 goto err_alloc_buf_struct_failed;
744 } 716 }
745 717
746 buffer->data = alloc->buffer; 718 buffer->user_data = alloc->buffer;
747 list_add(&buffer->entry, &alloc->buffers); 719 list_add(&buffer->entry, &alloc->buffers);
748 buffer->free = 1; 720 buffer->free = 1;
749 binder_insert_free_buffer(alloc, buffer); 721 binder_insert_free_buffer(alloc, buffer);
@@ -758,9 +730,7 @@ err_alloc_buf_struct_failed:
758 alloc->pages = NULL; 730 alloc->pages = NULL;
759err_alloc_pages_failed: 731err_alloc_pages_failed:
760 mutex_lock(&binder_alloc_mmap_lock); 732 mutex_lock(&binder_alloc_mmap_lock);
761 vfree(alloc->buffer);
762 alloc->buffer = NULL; 733 alloc->buffer = NULL;
763err_get_vm_area_failed:
764err_already_mapped: 734err_already_mapped:
765 mutex_unlock(&binder_alloc_mmap_lock); 735 mutex_unlock(&binder_alloc_mmap_lock);
766 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 736 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -806,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
806 int i; 776 int i;
807 777
808 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 778 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
809 void *page_addr; 779 void __user *page_addr;
810 bool on_lru; 780 bool on_lru;
811 781
812 if (!alloc->pages[i].page_ptr) 782 if (!alloc->pages[i].page_ptr)
@@ -819,12 +789,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
819 "%s: %d: page %d at %pK %s\n", 789 "%s: %d: page %d at %pK %s\n",
820 __func__, alloc->pid, i, page_addr, 790 __func__, alloc->pid, i, page_addr,
821 on_lru ? "on lru" : "active"); 791 on_lru ? "on lru" : "active");
822 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
823 __free_page(alloc->pages[i].page_ptr); 792 __free_page(alloc->pages[i].page_ptr);
824 page_count++; 793 page_count++;
825 } 794 }
826 kfree(alloc->pages); 795 kfree(alloc->pages);
827 vfree(alloc->buffer);
828 } 796 }
829 mutex_unlock(&alloc->mutex); 797 mutex_unlock(&alloc->mutex);
830 if (alloc->vma_vm_mm) 798 if (alloc->vma_vm_mm)
@@ -839,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
839 struct binder_buffer *buffer) 807 struct binder_buffer *buffer)
840{ 808{
841 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", 809 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
842 prefix, buffer->debug_id, buffer->data, 810 prefix, buffer->debug_id, buffer->user_data,
843 buffer->data_size, buffer->offsets_size, 811 buffer->data_size, buffer->offsets_size,
844 buffer->extra_buffers_size, 812 buffer->extra_buffers_size,
845 buffer->transaction ? "active" : "delivered"); 813 buffer->transaction ? "active" : "delivered");
@@ -964,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
964 if (!mmget_not_zero(alloc->vma_vm_mm)) 932 if (!mmget_not_zero(alloc->vma_vm_mm))
965 goto err_mmget; 933 goto err_mmget;
966 mm = alloc->vma_vm_mm; 934 mm = alloc->vma_vm_mm;
967 if (!down_write_trylock(&mm->mmap_sem)) 935 if (!down_read_trylock(&mm->mmap_sem))
968 goto err_down_write_mmap_sem_failed; 936 goto err_down_write_mmap_sem_failed;
969 } 937 }
970 938
@@ -974,19 +942,16 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
974 if (vma) { 942 if (vma) {
975 trace_binder_unmap_user_start(alloc, index); 943 trace_binder_unmap_user_start(alloc, index);
976 944
977 zap_page_range(vma, 945 zap_page_range(vma, page_addr, PAGE_SIZE);
978 page_addr + alloc->user_buffer_offset,
979 PAGE_SIZE);
980 946
981 trace_binder_unmap_user_end(alloc, index); 947 trace_binder_unmap_user_end(alloc, index);
982 948
983 up_write(&mm->mmap_sem); 949 up_read(&mm->mmap_sem);
984 mmput(mm); 950 mmput(mm);
985 } 951 }
986 952
987 trace_binder_unmap_kernel_start(alloc, index); 953 trace_binder_unmap_kernel_start(alloc, index);
988 954
989 unmap_kernel_range(page_addr, PAGE_SIZE);
990 __free_page(page->page_ptr); 955 __free_page(page->page_ptr);
991 page->page_ptr = NULL; 956 page->page_ptr = NULL;
992 957
@@ -1053,3 +1018,173 @@ int binder_alloc_shrinker_init(void)
1053 } 1018 }
1054 return ret; 1019 return ret;
1055} 1020}
1021
1022/**
1023 * check_buffer() - verify that buffer/offset is safe to access
1024 * @alloc: binder_alloc for this proc
1025 * @buffer: binder buffer to be accessed
1026 * @offset: offset into @buffer data
1027 * @bytes: bytes to access from offset
1028 *
1029 * Check that the @offset/@bytes are within the size of the given
1030 * @buffer and that the buffer is currently active and not freeable.
1031 * Offsets must also be multiples of sizeof(u32). The kernel is
1032 * allowed to touch the buffer in two cases:
1033 *
1034 * 1) when the buffer is being created:
1035 * (buffer->free == 0 && buffer->allow_user_free == 0)
1036 * 2) when the buffer is being torn down:
1037 * (buffer->free == 0 && buffer->transaction == NULL).
1038 *
1039 * Return: true if the buffer is safe to access
1040 */
1041static inline bool check_buffer(struct binder_alloc *alloc,
1042 struct binder_buffer *buffer,
1043 binder_size_t offset, size_t bytes)
1044{
1045 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1046
1047 return buffer_size >= bytes &&
1048 offset <= buffer_size - bytes &&
1049 IS_ALIGNED(offset, sizeof(u32)) &&
1050 !buffer->free &&
1051 (!buffer->allow_user_free || !buffer->transaction);
1052}
1053
1054/**
1055 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1056 * @alloc: binder_alloc for this proc
1057 * @buffer: binder buffer to be accessed
1058 * @buffer_offset: offset into @buffer data
1059 * @pgoffp: address to copy final page offset to
1060 *
1061 * Lookup the struct page corresponding to the address
1062 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
1063 * NULL, the byte-offset into the page is written there.
1064 *
1065 * The caller is responsible to ensure that the offset points
1066 * to a valid address within the @buffer and that @buffer is
1067 * not freeable by the user. Since it can't be freed, we are
1068 * guaranteed that the corresponding elements of @alloc->pages[]
1069 * cannot change.
1070 *
1071 * Return: struct page
1072 */
1073static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1074 struct binder_buffer *buffer,
1075 binder_size_t buffer_offset,
1076 pgoff_t *pgoffp)
1077{
1078 binder_size_t buffer_space_offset = buffer_offset +
1079 (buffer->user_data - alloc->buffer);
1080 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1081 size_t index = buffer_space_offset >> PAGE_SHIFT;
1082 struct binder_lru_page *lru_page;
1083
1084 lru_page = &alloc->pages[index];
1085 *pgoffp = pgoff;
1086 return lru_page->page_ptr;
1087}
1088
1089/**
1090 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1091 * @alloc: binder_alloc for this proc
1092 * @buffer: binder buffer to be accessed
1093 * @buffer_offset: offset into @buffer data
1094 * @from: userspace pointer to source buffer
1095 * @bytes: bytes to copy
1096 *
1097 * Copy bytes from source userspace to target buffer.
1098 *
1099 * Return: bytes remaining to be copied
1100 */
1101unsigned long
1102binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1103 struct binder_buffer *buffer,
1104 binder_size_t buffer_offset,
1105 const void __user *from,
1106 size_t bytes)
1107{
1108 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1109 return bytes;
1110
1111 while (bytes) {
1112 unsigned long size;
1113 unsigned long ret;
1114 struct page *page;
1115 pgoff_t pgoff;
1116 void *kptr;
1117
1118 page = binder_alloc_get_page(alloc, buffer,
1119 buffer_offset, &pgoff);
1120 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1121 kptr = kmap(page) + pgoff;
1122 ret = copy_from_user(kptr, from, size);
1123 kunmap(page);
1124 if (ret)
1125 return bytes - size + ret;
1126 bytes -= size;
1127 from += size;
1128 buffer_offset += size;
1129 }
1130 return 0;
1131}
1132
1133static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1134 bool to_buffer,
1135 struct binder_buffer *buffer,
1136 binder_size_t buffer_offset,
1137 void *ptr,
1138 size_t bytes)
1139{
1140 /* All copies must be 32-bit aligned and 32-bit size */
1141 BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
1142
1143 while (bytes) {
1144 unsigned long size;
1145 struct page *page;
1146 pgoff_t pgoff;
1147 void *tmpptr;
1148 void *base_ptr;
1149
1150 page = binder_alloc_get_page(alloc, buffer,
1151 buffer_offset, &pgoff);
1152 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1153 base_ptr = kmap_atomic(page);
1154 tmpptr = base_ptr + pgoff;
1155 if (to_buffer)
1156 memcpy(tmpptr, ptr, size);
1157 else
1158 memcpy(ptr, tmpptr, size);
1159 /*
1160 * kunmap_atomic() takes care of flushing the cache
1161 * if this device has VIVT cache arch
1162 */
1163 kunmap_atomic(base_ptr);
1164 bytes -= size;
1165 pgoff = 0;
1166 ptr = ptr + size;
1167 buffer_offset += size;
1168 }
1169}
1170
1171void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1172 struct binder_buffer *buffer,
1173 binder_size_t buffer_offset,
1174 void *src,
1175 size_t bytes)
1176{
1177 binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1178 src, bytes);
1179}
1180
1181void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1182 void *dest,
1183 struct binder_buffer *buffer,
1184 binder_size_t buffer_offset,
1185 size_t bytes)
1186{
1187 binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1188 dest, bytes);
1189}
1190
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index c0aadbbf7f19..b60d161b7a7a 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -22,6 +22,7 @@
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/list_lru.h> 24#include <linux/list_lru.h>
25#include <uapi/linux/android/binder.h>
25 26
26extern struct list_lru binder_alloc_lru; 27extern struct list_lru binder_alloc_lru;
27struct binder_transaction; 28struct binder_transaction;
@@ -39,7 +40,7 @@ struct binder_transaction;
39 * @data_size: size of @transaction data 40 * @data_size: size of @transaction data
40 * @offsets_size: size of array of offsets 41 * @offsets_size: size of array of offsets
41 * @extra_buffers_size: size of space for other objects (like sg lists) 42 * @extra_buffers_size: size of space for other objects (like sg lists)
42 * @data: pointer to base of buffer space 43 * @user_data: user pointer to base of buffer space
43 * 44 *
44 * Bookkeeping structure for binder transaction buffers 45 * Bookkeeping structure for binder transaction buffers
45 */ 46 */
@@ -58,7 +59,7 @@ struct binder_buffer {
58 size_t data_size; 59 size_t data_size;
59 size_t offsets_size; 60 size_t offsets_size;
60 size_t extra_buffers_size; 61 size_t extra_buffers_size;
61 void *data; 62 void __user *user_data;
62}; 63};
63 64
64/** 65/**
@@ -81,7 +82,6 @@ struct binder_lru_page {
81 * (invariant after init) 82 * (invariant after init)
82 * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) 83 * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
83 * @buffer: base of per-proc address space mapped via mmap 84 * @buffer: base of per-proc address space mapped via mmap
84 * @user_buffer_offset: offset between user and kernel VAs for buffer
85 * @buffers: list of all buffers for this proc 85 * @buffers: list of all buffers for this proc
86 * @free_buffers: rb tree of buffers available for allocation 86 * @free_buffers: rb tree of buffers available for allocation
87 * sorted by size 87 * sorted by size
@@ -102,8 +102,7 @@ struct binder_alloc {
102 struct mutex mutex; 102 struct mutex mutex;
103 struct vm_area_struct *vma; 103 struct vm_area_struct *vma;
104 struct mm_struct *vma_vm_mm; 104 struct mm_struct *vma_vm_mm;
105 void *buffer; 105 void __user *buffer;
106 ptrdiff_t user_buffer_offset;
107 struct list_head buffers; 106 struct list_head buffers;
108 struct rb_root free_buffers; 107 struct rb_root free_buffers;
109 struct rb_root allocated_buffers; 108 struct rb_root allocated_buffers;
@@ -162,26 +161,24 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
162 return free_async_space; 161 return free_async_space;
163} 162}
164 163
165/** 164unsigned long
166 * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs 165binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
167 * @alloc: binder_alloc for this proc 166 struct binder_buffer *buffer,
168 * 167 binder_size_t buffer_offset,
169 * Return: the offset between kernel and user-space addresses to use for 168 const void __user *from,
170 * virtual address conversion 169 size_t bytes);
171 */ 170
172static inline ptrdiff_t 171void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
173binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) 172 struct binder_buffer *buffer,
174{ 173 binder_size_t buffer_offset,
175 /* 174 void *src,
176 * user_buffer_offset is constant if vma is set and 175 size_t bytes);
177 * undefined if vma is not set. It is possible to 176
178 * get here with !alloc->vma if the target process 177void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
179 * is dying while a transaction is being initiated. 178 void *dest,
180 * Returning the old value is ok in this case and 179 struct binder_buffer *buffer,
181 * the transaction will fail. 180 binder_size_t buffer_offset,
182 */ 181 size_t bytes);
183 return alloc->user_buffer_offset;
184}
185 182
186#endif /* _LINUX_BINDER_ALLOC_H */ 183#endif /* _LINUX_BINDER_ALLOC_H */
187 184
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 8bd7bcef967d..b72708918b06 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -102,11 +102,12 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
102 struct binder_buffer *buffer, 102 struct binder_buffer *buffer,
103 size_t size) 103 size_t size)
104{ 104{
105 void *page_addr, *end; 105 void __user *page_addr;
106 void __user *end;
106 int page_index; 107 int page_index;
107 108
108 end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); 109 end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
109 page_addr = buffer->data; 110 page_addr = buffer->user_data;
110 for (; page_addr < end; page_addr += PAGE_SIZE) { 111 for (; page_addr < end; page_addr += PAGE_SIZE) {
111 page_index = (page_addr - alloc->buffer) / PAGE_SIZE; 112 page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
112 if (!alloc->pages[page_index].page_ptr || 113 if (!alloc->pages[page_index].page_ptr ||
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 14de7ac57a34..83cc254d2335 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -293,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
293 293
294TRACE_EVENT(binder_update_page_range, 294TRACE_EVENT(binder_update_page_range,
295 TP_PROTO(struct binder_alloc *alloc, bool allocate, 295 TP_PROTO(struct binder_alloc *alloc, bool allocate,
296 void *start, void *end), 296 void __user *start, void __user *end),
297 TP_ARGS(alloc, allocate, start, end), 297 TP_ARGS(alloc, allocate, start, end),
298 TP_STRUCT__entry( 298 TP_STRUCT__entry(
299 __field(int, proc) 299 __field(int, proc)
diff --git a/drivers/base/component.c b/drivers/base/component.c
index ddcea8739c12..7dbc41cccd58 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -16,11 +16,38 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18 18
19/**
20 * DOC: overview
21 *
22 * The component helper allows drivers to collect a pile of sub-devices,
23 * including their bound drivers, into an aggregate driver. Various subsystems
24 * already provide functions to get hold of such components, e.g.
25 * of_clk_get_by_name(). The component helper can be used when such a
26 * subsystem-specific way to find a device is not available: The component
27 * helper fills the niche of aggregate drivers for specific hardware, where
28 * further standardization into a subsystem would not be practical. The common
29 * example is when a logical device (e.g. a DRM display driver) is spread around
30 * the SoC on various component (scanout engines, blending blocks, transcoders
31 * for various outputs and so on).
32 *
33 * The component helper also doesn't solve runtime dependencies, e.g. for system
34 * suspend and resume operations. See also :ref:`device links<device_link>`.
35 *
36 * Components are registered using component_add() and unregistered with
37 * component_del(), usually from the driver's probe and disconnect functions.
38 *
39 * Aggregate drivers first assemble a component match list of what they need
40 * using component_match_add(). This is then registered as an aggregate driver
41 * using component_master_add_with_match(), and unregistered using
42 * component_master_del().
43 */
44
19struct component; 45struct component;
20 46
21struct component_match_array { 47struct component_match_array {
22 void *data; 48 void *data;
23 int (*compare)(struct device *, void *); 49 int (*compare)(struct device *, void *);
50 int (*compare_typed)(struct device *, int, void *);
24 void (*release)(struct device *, void *); 51 void (*release)(struct device *, void *);
25 struct component *component; 52 struct component *component;
26 bool duplicate; 53 bool duplicate;
@@ -48,6 +75,7 @@ struct component {
48 bool bound; 75 bool bound;
49 76
50 const struct component_ops *ops; 77 const struct component_ops *ops;
78 int subcomponent;
51 struct device *dev; 79 struct device *dev;
52}; 80};
53 81
@@ -132,7 +160,7 @@ static struct master *__master_find(struct device *dev,
132} 160}
133 161
134static struct component *find_component(struct master *master, 162static struct component *find_component(struct master *master,
135 int (*compare)(struct device *, void *), void *compare_data) 163 struct component_match_array *mc)
136{ 164{
137 struct component *c; 165 struct component *c;
138 166
@@ -140,7 +168,11 @@ static struct component *find_component(struct master *master,
140 if (c->master && c->master != master) 168 if (c->master && c->master != master)
141 continue; 169 continue;
142 170
143 if (compare(c->dev, compare_data)) 171 if (mc->compare && mc->compare(c->dev, mc->data))
172 return c;
173
174 if (mc->compare_typed &&
175 mc->compare_typed(c->dev, c->subcomponent, mc->data))
144 return c; 176 return c;
145 } 177 }
146 178
@@ -166,7 +198,7 @@ static int find_components(struct master *master)
166 if (match->compare[i].component) 198 if (match->compare[i].component)
167 continue; 199 continue;
168 200
169 c = find_component(master, mc->compare, mc->data); 201 c = find_component(master, mc);
170 if (!c) { 202 if (!c) {
171 ret = -ENXIO; 203 ret = -ENXIO;
172 break; 204 break;
@@ -301,15 +333,12 @@ static int component_match_realloc(struct device *dev,
301 return 0; 333 return 0;
302} 334}
303 335
304/* 336static void __component_match_add(struct device *master,
305 * Add a component to be matched, with a release function.
306 *
307 * The match array is first created or extended if necessary.
308 */
309void component_match_add_release(struct device *master,
310 struct component_match **matchptr, 337 struct component_match **matchptr,
311 void (*release)(struct device *, void *), 338 void (*release)(struct device *, void *),
312 int (*compare)(struct device *, void *), void *compare_data) 339 int (*compare)(struct device *, void *),
340 int (*compare_typed)(struct device *, int, void *),
341 void *compare_data)
313{ 342{
314 struct component_match *match = *matchptr; 343 struct component_match *match = *matchptr;
315 344
@@ -341,13 +370,69 @@ void component_match_add_release(struct device *master,
341 } 370 }
342 371
343 match->compare[match->num].compare = compare; 372 match->compare[match->num].compare = compare;
373 match->compare[match->num].compare_typed = compare_typed;
344 match->compare[match->num].release = release; 374 match->compare[match->num].release = release;
345 match->compare[match->num].data = compare_data; 375 match->compare[match->num].data = compare_data;
346 match->compare[match->num].component = NULL; 376 match->compare[match->num].component = NULL;
347 match->num++; 377 match->num++;
348} 378}
379
380/**
381 * component_match_add_release - add a component match with release callback
382 * @master: device with the aggregate driver
383 * @matchptr: pointer to the list of component matches
384 * @release: release function for @compare_data
385 * @compare: compare function to match against all components
386 * @compare_data: opaque pointer passed to the @compare function
387 *
388 * Adds a new component match to the list stored in @matchptr, which the @master
389 * aggregate driver needs to function. The list of component matches pointed to
390 * by @matchptr must be initialized to NULL before adding the first match. This
391 * only matches against components added with component_add().
392 *
393 * The allocated match list in @matchptr is automatically released using devm
394 * actions, where upon @release will be called to free any references held by
395 * @compare_data, e.g. when @compare_data is a &device_node that must be
396 * released with of_node_put().
397 *
398 * See also component_match_add() and component_match_add_typed().
399 */
400void component_match_add_release(struct device *master,
401 struct component_match **matchptr,
402 void (*release)(struct device *, void *),
403 int (*compare)(struct device *, void *), void *compare_data)
404{
405 __component_match_add(master, matchptr, release, compare, NULL,
406 compare_data);
407}
349EXPORT_SYMBOL(component_match_add_release); 408EXPORT_SYMBOL(component_match_add_release);
350 409
410/**
411 * component_match_add_typed - add a compent match for a typed component
412 * @master: device with the aggregate driver
413 * @matchptr: pointer to the list of component matches
414 * @compare_typed: compare function to match against all typed components
415 * @compare_data: opaque pointer passed to the @compare function
416 *
417 * Adds a new component match to the list stored in @matchptr, which the @master
418 * aggregate driver needs to function. The list of component matches pointed to
419 * by @matchptr must be initialized to NULL before adding the first match. This
420 * only matches against components added with component_add_typed().
421 *
422 * The allocated match list in @matchptr is automatically released using devm
423 * actions.
424 *
425 * See also component_match_add_release() and component_match_add_typed().
426 */
427void component_match_add_typed(struct device *master,
428 struct component_match **matchptr,
429 int (*compare_typed)(struct device *, int, void *), void *compare_data)
430{
431 __component_match_add(master, matchptr, NULL, NULL, compare_typed,
432 compare_data);
433}
434EXPORT_SYMBOL(component_match_add_typed);
435
351static void free_master(struct master *master) 436static void free_master(struct master *master)
352{ 437{
353 struct component_match *match = master->match; 438 struct component_match *match = master->match;
@@ -367,6 +452,18 @@ static void free_master(struct master *master)
367 kfree(master); 452 kfree(master);
368} 453}
369 454
455/**
456 * component_master_add_with_match - register an aggregate driver
457 * @dev: device with the aggregate driver
458 * @ops: callbacks for the aggregate driver
459 * @match: component match list for the aggregate driver
460 *
461 * Registers a new aggregate driver consisting of the components added to @match
462 * by calling one of the component_match_add() functions. Once all components in
463 * @match are available, it will be assembled by calling
464 * &component_master_ops.bind from @ops. Must be unregistered by calling
465 * component_master_del().
466 */
370int component_master_add_with_match(struct device *dev, 467int component_master_add_with_match(struct device *dev,
371 const struct component_master_ops *ops, 468 const struct component_master_ops *ops,
372 struct component_match *match) 469 struct component_match *match)
@@ -403,6 +500,15 @@ int component_master_add_with_match(struct device *dev,
403} 500}
404EXPORT_SYMBOL_GPL(component_master_add_with_match); 501EXPORT_SYMBOL_GPL(component_master_add_with_match);
405 502
503/**
504 * component_master_del - unregister an aggregate driver
505 * @dev: device with the aggregate driver
506 * @ops: callbacks for the aggregate driver
507 *
508 * Unregisters an aggregate driver registered with
509 * component_master_add_with_match(). If necessary the aggregate driver is first
510 * disassembled by calling &component_master_ops.unbind from @ops.
511 */
406void component_master_del(struct device *dev, 512void component_master_del(struct device *dev,
407 const struct component_master_ops *ops) 513 const struct component_master_ops *ops)
408{ 514{
@@ -430,6 +536,15 @@ static void component_unbind(struct component *component,
430 devres_release_group(component->dev, component); 536 devres_release_group(component->dev, component);
431} 537}
432 538
539/**
540 * component_unbind_all - unbind all component to an aggregate driver
541 * @master_dev: device with the aggregate driver
542 * @data: opaque pointer, passed to all components
543 *
544 * Unbinds all components to the aggregate @dev by passing @data to their
545 * &component_ops.unbind functions. Should be called from
546 * &component_master_ops.unbind.
547 */
433void component_unbind_all(struct device *master_dev, void *data) 548void component_unbind_all(struct device *master_dev, void *data)
434{ 549{
435 struct master *master; 550 struct master *master;
@@ -503,6 +618,15 @@ static int component_bind(struct component *component, struct master *master,
503 return ret; 618 return ret;
504} 619}
505 620
621/**
622 * component_bind_all - bind all component to an aggregate driver
623 * @master_dev: device with the aggregate driver
624 * @data: opaque pointer, passed to all components
625 *
626 * Binds all components to the aggregate @dev by passing @data to their
627 * &component_ops.bind functions. Should be called from
628 * &component_master_ops.bind.
629 */
506int component_bind_all(struct device *master_dev, void *data) 630int component_bind_all(struct device *master_dev, void *data)
507{ 631{
508 struct master *master; 632 struct master *master;
@@ -537,7 +661,8 @@ int component_bind_all(struct device *master_dev, void *data)
537} 661}
538EXPORT_SYMBOL_GPL(component_bind_all); 662EXPORT_SYMBOL_GPL(component_bind_all);
539 663
540int component_add(struct device *dev, const struct component_ops *ops) 664static int __component_add(struct device *dev, const struct component_ops *ops,
665 int subcomponent)
541{ 666{
542 struct component *component; 667 struct component *component;
543 int ret; 668 int ret;
@@ -548,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops)
548 673
549 component->ops = ops; 674 component->ops = ops;
550 component->dev = dev; 675 component->dev = dev;
676 component->subcomponent = subcomponent;
551 677
552 dev_dbg(dev, "adding component (ops %ps)\n", ops); 678 dev_dbg(dev, "adding component (ops %ps)\n", ops);
553 679
@@ -566,8 +692,66 @@ int component_add(struct device *dev, const struct component_ops *ops)
566 692
567 return ret < 0 ? ret : 0; 693 return ret < 0 ? ret : 0;
568} 694}
695
696/**
697 * component_add_typed - register a component
698 * @dev: component device
699 * @ops: component callbacks
700 * @subcomponent: nonzero identifier for subcomponents
701 *
702 * Register a new component for @dev. Functions in @ops will be call when the
703 * aggregate driver is ready to bind the overall driver by calling
704 * component_bind_all(). See also &struct component_ops.
705 *
706 * @subcomponent must be nonzero and is used to differentiate between multiple
707 * components registerd on the same device @dev. These components are match
708 * using component_match_add_typed().
709 *
710 * The component needs to be unregistered at driver unload/disconnect by
711 * calling component_del().
712 *
713 * See also component_add().
714 */
715int component_add_typed(struct device *dev, const struct component_ops *ops,
716 int subcomponent)
717{
718 if (WARN_ON(subcomponent == 0))
719 return -EINVAL;
720
721 return __component_add(dev, ops, subcomponent);
722}
723EXPORT_SYMBOL_GPL(component_add_typed);
724
725/**
726 * component_add - register a component
727 * @dev: component device
728 * @ops: component callbacks
729 *
730 * Register a new component for @dev. Functions in @ops will be called when the
731 * aggregate driver is ready to bind the overall driver by calling
732 * component_bind_all(). See also &struct component_ops.
733 *
734 * The component needs to be unregistered at driver unload/disconnect by
735 * calling component_del().
736 *
737 * See also component_add_typed() for a variant that allows multipled different
738 * components on the same device.
739 */
740int component_add(struct device *dev, const struct component_ops *ops)
741{
742 return __component_add(dev, ops, 0);
743}
569EXPORT_SYMBOL_GPL(component_add); 744EXPORT_SYMBOL_GPL(component_add);
570 745
746/**
747 * component_del - unregister a component
748 * @dev: component device
749 * @ops: component callbacks
750 *
751 * Unregister a component added with component_add(). If the component is bound
752 * into an aggregate driver, this will force the entire aggregate driver, including
753 * all its components, to be unbound.
754 */
571void component_del(struct device *dev, const struct component_ops *ops) 755void component_del(struct device *dev, const struct component_ops *ops)
572{ 756{
573 struct component *c, *component = NULL; 757 struct component *c, *component = NULL;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2e2ffe7010aa..72866a004f07 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -244,26 +244,23 @@ source "drivers/char/hw_random/Kconfig"
244 244
245config NVRAM 245config NVRAM
246 tristate "/dev/nvram support" 246 tristate "/dev/nvram support"
247 depends on ATARI || X86 || GENERIC_NVRAM 247 depends on X86 || HAVE_ARCH_NVRAM_OPS
248 default M68K || PPC
248 ---help--- 249 ---help---
249 If you say Y here and create a character special file /dev/nvram 250 If you say Y here and create a character special file /dev/nvram
250 with major number 10 and minor number 144 using mknod ("man mknod"), 251 with major number 10 and minor number 144 using mknod ("man mknod"),
251 you get read and write access to the extra bytes of non-volatile 252 you get read and write access to the non-volatile memory.
252 memory in the real time clock (RTC), which is contained in every PC 253
253 and most Ataris. The actual number of bytes varies, depending on the 254 /dev/nvram may be used to view settings in NVRAM or to change them
254 nvram in the system, but is usually 114 (128-14 for the RTC). 255 (with some utility). It could also be used to frequently
255
256 This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
257 on Ataris. /dev/nvram may be used to view settings there, or to
258 change them (with some utility). It could also be used to frequently
259 save a few bits of very important data that may not be lost over 256 save a few bits of very important data that may not be lost over
260 power-off and for which writing to disk is too insecure. Note 257 power-off and for which writing to disk is too insecure. Note
261 however that most NVRAM space in a PC belongs to the BIOS and you 258 however that most NVRAM space in a PC belongs to the BIOS and you
262 should NEVER idly tamper with it. See Ralf Brown's interrupt list 259 should NEVER idly tamper with it. See Ralf Brown's interrupt list
263 for a guide to the use of CMOS bytes by your BIOS. 260 for a guide to the use of CMOS bytes by your BIOS.
264 261
265 On Atari machines, /dev/nvram is always configured and does not need 262 This memory is conventionally called "NVRAM" on PowerPC machines,
266 to be selected. 263 "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
267 264
268 To compile this driver as a module, choose M here: the 265 To compile this driver as a module, choose M here: the
269 module will be called nvram. 266 module will be called nvram.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index b8d42b4e979b..fbea7dd12932 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -26,11 +26,7 @@ obj-$(CONFIG_RTC) += rtc.o
26obj-$(CONFIG_HPET) += hpet.o 26obj-$(CONFIG_HPET) += hpet.o
27obj-$(CONFIG_EFI_RTC) += efirtc.o 27obj-$(CONFIG_EFI_RTC) += efirtc.o
28obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/ 28obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
29ifeq ($(CONFIG_GENERIC_NVRAM),y) 29obj-$(CONFIG_NVRAM) += nvram.o
30 obj-$(CONFIG_NVRAM) += generic_nvram.o
31else
32 obj-$(CONFIG_NVRAM) += nvram.o
33endif
34obj-$(CONFIG_TOSHIBA) += toshiba.o 30obj-$(CONFIG_TOSHIBA) += toshiba.o
35obj-$(CONFIG_DS1620) += ds1620.o 31obj-$(CONFIG_DS1620) += ds1620.o
36obj-$(CONFIG_HW_RANDOM) += hw_random/ 32obj-$(CONFIG_HW_RANDOM) += hw_random/
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f3a986..4ccc39e00ced 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
32#include <linux/wait.h> 32#include <linux/wait.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/nospec.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <linux/uaccess.h> 38#include <linux/uaccess.h>
@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
386 TicCard = st_loc.tic_des_from_pc; /* tic number to send */ 387 TicCard = st_loc.tic_des_from_pc; /* tic number to send */
387 IndexCard = NumCard - 1; 388 IndexCard = NumCard - 1;
388 389
389 if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO) 390 if (IndexCard >= MAX_BOARD)
391 return -EINVAL;
392 IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
393
394 if (!apbs[IndexCard].RamIO)
390 return -EINVAL; 395 return -EINVAL;
391 396
392#ifdef DEBUG 397#ifdef DEBUG
@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
697 unsigned char IndexCard; 702 unsigned char IndexCard;
698 void __iomem *pmem; 703 void __iomem *pmem;
699 int ret = 0; 704 int ret = 0;
705 static int warncount = 10;
700 volatile unsigned char byte_reset_it; 706 volatile unsigned char byte_reset_it;
701 struct st_ram_io *adgl; 707 struct st_ram_io *adgl;
702 void __user *argp = (void __user *)arg; 708 void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
711 mutex_lock(&ac_mutex); 717 mutex_lock(&ac_mutex);
712 IndexCard = adgl->num_card-1; 718 IndexCard = adgl->num_card-1;
713 719
714 if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { 720 if (cmd != 6 && IndexCard >= MAX_BOARD)
715 static int warncount = 10; 721 goto err;
716 if (warncount) { 722 IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
717 printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1); 723
718 warncount--; 724 if (cmd != 6 && !apbs[IndexCard].RamIO)
719 } 725 goto err;
720 kfree(adgl);
721 mutex_unlock(&ac_mutex);
722 return -EINVAL;
723 }
724 726
725 switch (cmd) { 727 switch (cmd) {
726 728
@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
838 kfree(adgl); 840 kfree(adgl);
839 mutex_unlock(&ac_mutex); 841 mutex_unlock(&ac_mutex);
840 return 0; 842 return 0;
843
844err:
845 if (warncount) {
846 pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
847 (int)IndexCard + 1);
848 warncount--;
849 }
850 kfree(adgl);
851 mutex_unlock(&ac_mutex);
852 return -EINVAL;
853
841} 854}
842 855
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index d9aab643997e..11781ebffbf7 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -255,35 +255,12 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
255} 255}
256 256
257/* 257/*
258 * We enforce only one user at a time here with the open/close.
259 * Also clear the previous interrupt data on an open, and clean
260 * up things on a close.
261 */
262
263static int efi_rtc_open(struct inode *inode, struct file *file)
264{
265 /*
266 * nothing special to do here
267 * We do accept multiple open files at the same time as we
268 * synchronize on the per call operation.
269 */
270 return 0;
271}
272
273static int efi_rtc_close(struct inode *inode, struct file *file)
274{
275 return 0;
276}
277
278/*
279 * The various file operations we support. 258 * The various file operations we support.
280 */ 259 */
281 260
282static const struct file_operations efi_rtc_fops = { 261static const struct file_operations efi_rtc_fops = {
283 .owner = THIS_MODULE, 262 .owner = THIS_MODULE,
284 .unlocked_ioctl = efi_rtc_ioctl, 263 .unlocked_ioctl = efi_rtc_ioctl,
285 .open = efi_rtc_open,
286 .release = efi_rtc_close,
287 .llseek = no_llseek, 264 .llseek = no_llseek,
288}; 265};
289 266
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
deleted file mode 100644
index ff5394f47587..000000000000
--- a/drivers/char/generic_nvram.c
+++ /dev/null
@@ -1,159 +0,0 @@
1/*
2 * Generic /dev/nvram driver for architectures providing some
3 * "generic" hooks, that is :
4 *
5 * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
6 *
7 * Note that an additional hook is supported for PowerMac only
8 * for getting the nvram "partition" informations
9 *
10 */
11
12#define NVRAM_VERSION "1.1"
13
14#include <linux/module.h>
15
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/fs.h>
19#include <linux/miscdevice.h>
20#include <linux/fcntl.h>
21#include <linux/init.h>
22#include <linux/mutex.h>
23#include <linux/pagemap.h>
24#include <linux/uaccess.h>
25#include <asm/nvram.h>
26#ifdef CONFIG_PPC_PMAC
27#include <asm/machdep.h>
28#endif
29
30#define NVRAM_SIZE 8192
31
32static DEFINE_MUTEX(nvram_mutex);
33static ssize_t nvram_len;
34
35static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
36{
37 return generic_file_llseek_size(file, offset, origin,
38 MAX_LFS_FILESIZE, nvram_len);
39}
40
41static ssize_t read_nvram(struct file *file, char __user *buf,
42 size_t count, loff_t *ppos)
43{
44 unsigned int i;
45 char __user *p = buf;
46
47 if (!access_ok(buf, count))
48 return -EFAULT;
49 if (*ppos >= nvram_len)
50 return 0;
51 for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
52 if (__put_user(nvram_read_byte(i), p))
53 return -EFAULT;
54 *ppos = i;
55 return p - buf;
56}
57
58static ssize_t write_nvram(struct file *file, const char __user *buf,
59 size_t count, loff_t *ppos)
60{
61 unsigned int i;
62 const char __user *p = buf;
63 char c;
64
65 if (!access_ok(buf, count))
66 return -EFAULT;
67 if (*ppos >= nvram_len)
68 return 0;
69 for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
70 if (__get_user(c, p))
71 return -EFAULT;
72 nvram_write_byte(c, i);
73 }
74 *ppos = i;
75 return p - buf;
76}
77
78static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
79{
80 switch(cmd) {
81#ifdef CONFIG_PPC_PMAC
82 case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
83 printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
84 case IOC_NVRAM_GET_OFFSET: {
85 int part, offset;
86
87 if (!machine_is(powermac))
88 return -EINVAL;
89 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
90 return -EFAULT;
91 if (part < pmac_nvram_OF || part > pmac_nvram_NR)
92 return -EINVAL;
93 offset = pmac_get_partition(part);
94 if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
95 return -EFAULT;
96 break;
97 }
98#endif /* CONFIG_PPC_PMAC */
99 case IOC_NVRAM_SYNC:
100 nvram_sync();
101 break;
102 default:
103 return -EINVAL;
104 }
105
106 return 0;
107}
108
109static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
110{
111 int ret;
112
113 mutex_lock(&nvram_mutex);
114 ret = nvram_ioctl(file, cmd, arg);
115 mutex_unlock(&nvram_mutex);
116
117 return ret;
118}
119
120const struct file_operations nvram_fops = {
121 .owner = THIS_MODULE,
122 .llseek = nvram_llseek,
123 .read = read_nvram,
124 .write = write_nvram,
125 .unlocked_ioctl = nvram_unlocked_ioctl,
126};
127
128static struct miscdevice nvram_dev = {
129 NVRAM_MINOR,
130 "nvram",
131 &nvram_fops
132};
133
134int __init nvram_init(void)
135{
136 int ret = 0;
137
138 printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
139 NVRAM_VERSION);
140 ret = misc_register(&nvram_dev);
141 if (ret != 0)
142 goto out;
143
144 nvram_len = nvram_get_size();
145 if (nvram_len < 0)
146 nvram_len = NVRAM_SIZE;
147
148out:
149 return ret;
150}
151
152void __exit nvram_cleanup(void)
153{
154 misc_deregister( &nvram_dev );
155}
156
157module_init(nvram_init);
158module_exit(nvram_cleanup);
159MODULE_LICENSE("GPL");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4a22b4b41aef..d0ad85900b79 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
377 pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled"); 377 pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
378 return 1; 378 return 1;
379} 379}
380__setup("hpet_mmap", hpet_mmap_enable); 380__setup("hpet_mmap=", hpet_mmap_enable);
381 381
382static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 382static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
383{ 383{
@@ -842,7 +842,6 @@ int hpet_alloc(struct hpet_data *hdp)
842 struct hpet_dev *devp; 842 struct hpet_dev *devp;
843 u32 i, ntimer; 843 u32 i, ntimer;
844 struct hpets *hpetp; 844 struct hpets *hpetp;
845 size_t siz;
846 struct hpet __iomem *hpet; 845 struct hpet __iomem *hpet;
847 static struct hpets *last; 846 static struct hpets *last;
848 unsigned long period; 847 unsigned long period;
@@ -860,10 +859,8 @@ int hpet_alloc(struct hpet_data *hdp)
860 return 0; 859 return 0;
861 } 860 }
862 861
863 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * 862 hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
864 sizeof(struct hpet_dev)); 863 GFP_KERNEL);
865
866 hpetp = kzalloc(siz, GFP_KERNEL);
867 864
868 if (!hpetp) 865 if (!hpetp)
869 return -ENOMEM; 866 return -ENOMEM;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 5c8d780637bd..3406852f67ff 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -729,7 +729,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
729 ret = lp_set_timeout32(minor, (void __user *)arg); 729 ret = lp_set_timeout32(minor, (void __user *)arg);
730 break; 730 break;
731 } 731 }
732 /* fallthrough for 64-bit */ 732 /* fall through - for 64-bit */
733 case LPSETTIMEOUT_NEW: 733 case LPSETTIMEOUT_NEW:
734 ret = lp_set_timeout64(minor, (void __user *)arg); 734 ret = lp_set_timeout64(minor, (void __user *)arg);
735 break; 735 break;
@@ -757,7 +757,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
757 ret = lp_set_timeout32(minor, (void __user *)arg); 757 ret = lp_set_timeout32(minor, (void __user *)arg);
758 break; 758 break;
759 } 759 }
760 /* fallthrough for x32 mode */ 760 /* fall through - for x32 mode */
761 case LPSETTIMEOUT_NEW: 761 case LPSETTIMEOUT_NEW:
762 ret = lp_set_timeout64(minor, (void __user *)arg); 762 ret = lp_set_timeout64(minor, (void __user *)arg);
763 break; 763 break;
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 8c9216a0f62e..0a31b60bee7b 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -50,6 +50,7 @@ static LIST_HEAD(soft_list);
50 * file operations 50 * file operations
51 */ 51 */
52static const struct file_operations mbcs_ops = { 52static const struct file_operations mbcs_ops = {
53 .owner = THIS_MODULE,
53 .open = mbcs_open, 54 .open = mbcs_open,
54 .llseek = mbcs_sram_llseek, 55 .llseek = mbcs_sram_llseek,
55 .read = mbcs_sram_read, 56 .read = mbcs_sram_read,
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 25264d65e716..eff1e3f1b3a2 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -21,13 +21,6 @@
21 * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid 21 * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
22 * again; use with care!) 22 * again; use with care!)
23 * 23 *
24 * This file also provides some functions for other parts of the kernel that
25 * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}.
26 * Obviously this can be used only if this driver is always configured into
27 * the kernel and is not a module. Since the functions are used by some Atari
28 * drivers, this is the case on the Atari.
29 *
30 *
31 * 1.1 Cesar Barros: SMP locking fixes 24 * 1.1 Cesar Barros: SMP locking fixes
32 * added changelog 25 * added changelog
33 * 1.2 Erik Gilling: Cobalt Networks support 26 * 1.2 Erik Gilling: Cobalt Networks support
@@ -39,64 +32,6 @@
39 32
40#include <linux/module.h> 33#include <linux/module.h>
41#include <linux/nvram.h> 34#include <linux/nvram.h>
42
43#define PC 1
44#define ATARI 2
45
46/* select machine configuration */
47#if defined(CONFIG_ATARI)
48# define MACH ATARI
49#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */
50# define MACH PC
51#else
52# error Cannot build nvram driver for this machine configuration.
53#endif
54
55#if MACH == PC
56
57/* RTC in a PC */
58#define CHECK_DRIVER_INIT() 1
59
60/* On PCs, the checksum is built only over bytes 2..31 */
61#define PC_CKS_RANGE_START 2
62#define PC_CKS_RANGE_END 31
63#define PC_CKS_LOC 32
64#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
65
66#define mach_check_checksum pc_check_checksum
67#define mach_set_checksum pc_set_checksum
68#define mach_proc_infos pc_proc_infos
69
70#endif
71
72#if MACH == ATARI
73
74/* Special parameters for RTC in Atari machines */
75#include <asm/atarihw.h>
76#include <asm/atariints.h>
77#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
78#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))
79
80#define NVRAM_BYTES 50
81
82/* On Ataris, the checksum is over all bytes except the checksum bytes
83 * themselves; these are at the very end */
84#define ATARI_CKS_RANGE_START 0
85#define ATARI_CKS_RANGE_END 47
86#define ATARI_CKS_LOC 48
87
88#define mach_check_checksum atari_check_checksum
89#define mach_set_checksum atari_set_checksum
90#define mach_proc_infos atari_proc_infos
91
92#endif
93
94/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
95 * rtc_lock held. Due to the index-port/data-port design of the RTC, we
96 * don't want two different things trying to get to it at once. (e.g. the
97 * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
98 */
99
100#include <linux/types.h> 35#include <linux/types.h>
101#include <linux/errno.h> 36#include <linux/errno.h>
102#include <linux/miscdevice.h> 37#include <linux/miscdevice.h>
@@ -106,28 +41,26 @@
106#include <linux/init.h> 41#include <linux/init.h>
107#include <linux/proc_fs.h> 42#include <linux/proc_fs.h>
108#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/slab.h>
109#include <linux/spinlock.h> 45#include <linux/spinlock.h>
110#include <linux/io.h> 46#include <linux/io.h>
111#include <linux/uaccess.h> 47#include <linux/uaccess.h>
112#include <linux/mutex.h> 48#include <linux/mutex.h>
113#include <linux/pagemap.h> 49#include <linux/pagemap.h>
114 50
51#ifdef CONFIG_PPC
52#include <asm/nvram.h>
53#endif
115 54
116static DEFINE_MUTEX(nvram_mutex); 55static DEFINE_MUTEX(nvram_mutex);
117static DEFINE_SPINLOCK(nvram_state_lock); 56static DEFINE_SPINLOCK(nvram_state_lock);
118static int nvram_open_cnt; /* #times opened */ 57static int nvram_open_cnt; /* #times opened */
119static int nvram_open_mode; /* special open modes */ 58static int nvram_open_mode; /* special open modes */
59static ssize_t nvram_size;
120#define NVRAM_WRITE 1 /* opened for writing (exclusive) */ 60#define NVRAM_WRITE 1 /* opened for writing (exclusive) */
121#define NVRAM_EXCL 2 /* opened with O_EXCL */ 61#define NVRAM_EXCL 2 /* opened with O_EXCL */
122 62
123static int mach_check_checksum(void); 63#ifdef CONFIG_X86
124static void mach_set_checksum(void);
125
126#ifdef CONFIG_PROC_FS
127static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
128 void *offset);
129#endif
130
131/* 64/*
132 * These functions are provided to be called internally or by other parts of 65 * These functions are provided to be called internally or by other parts of
133 * the kernel. It's up to the caller to ensure correct checksum before reading 66 * the kernel. It's up to the caller to ensure correct checksum before reading
@@ -139,13 +72,20 @@ static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
139 * know about the RTC cruft. 72 * know about the RTC cruft.
140 */ 73 */
141 74
142unsigned char __nvram_read_byte(int i) 75#define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE)
76
77/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
78 * rtc_lock held. Due to the index-port/data-port design of the RTC, we
79 * don't want two different things trying to get to it at once. (e.g. the
80 * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
81 */
82
83static unsigned char __nvram_read_byte(int i)
143{ 84{
144 return CMOS_READ(NVRAM_FIRST_BYTE + i); 85 return CMOS_READ(NVRAM_FIRST_BYTE + i);
145} 86}
146EXPORT_SYMBOL(__nvram_read_byte);
147 87
148unsigned char nvram_read_byte(int i) 88static unsigned char pc_nvram_read_byte(int i)
149{ 89{
150 unsigned long flags; 90 unsigned long flags;
151 unsigned char c; 91 unsigned char c;
@@ -155,16 +95,14 @@ unsigned char nvram_read_byte(int i)
155 spin_unlock_irqrestore(&rtc_lock, flags); 95 spin_unlock_irqrestore(&rtc_lock, flags);
156 return c; 96 return c;
157} 97}
158EXPORT_SYMBOL(nvram_read_byte);
159 98
160/* This races nicely with trying to read with checksum checking (nvram_read) */ 99/* This races nicely with trying to read with checksum checking (nvram_read) */
161void __nvram_write_byte(unsigned char c, int i) 100static void __nvram_write_byte(unsigned char c, int i)
162{ 101{
163 CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); 102 CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
164} 103}
165EXPORT_SYMBOL(__nvram_write_byte);
166 104
167void nvram_write_byte(unsigned char c, int i) 105static void pc_nvram_write_byte(unsigned char c, int i)
168{ 106{
169 unsigned long flags; 107 unsigned long flags;
170 108
@@ -172,172 +110,266 @@ void nvram_write_byte(unsigned char c, int i)
172 __nvram_write_byte(c, i); 110 __nvram_write_byte(c, i);
173 spin_unlock_irqrestore(&rtc_lock, flags); 111 spin_unlock_irqrestore(&rtc_lock, flags);
174} 112}
175EXPORT_SYMBOL(nvram_write_byte);
176 113
177int __nvram_check_checksum(void) 114/* On PCs, the checksum is built only over bytes 2..31 */
115#define PC_CKS_RANGE_START 2
116#define PC_CKS_RANGE_END 31
117#define PC_CKS_LOC 32
118
119static int __nvram_check_checksum(void)
178{ 120{
179 return mach_check_checksum(); 121 int i;
122 unsigned short sum = 0;
123 unsigned short expect;
124
125 for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
126 sum += __nvram_read_byte(i);
127 expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
128 __nvram_read_byte(PC_CKS_LOC+1);
129 return (sum & 0xffff) == expect;
180} 130}
181EXPORT_SYMBOL(__nvram_check_checksum);
182 131
183int nvram_check_checksum(void) 132static void __nvram_set_checksum(void)
184{ 133{
185 unsigned long flags; 134 int i;
186 int rv; 135 unsigned short sum = 0;
187 136
188 spin_lock_irqsave(&rtc_lock, flags); 137 for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
189 rv = __nvram_check_checksum(); 138 sum += __nvram_read_byte(i);
190 spin_unlock_irqrestore(&rtc_lock, flags); 139 __nvram_write_byte(sum >> 8, PC_CKS_LOC);
191 return rv; 140 __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
192} 141}
193EXPORT_SYMBOL(nvram_check_checksum);
194 142
195static void __nvram_set_checksum(void) 143static long pc_nvram_set_checksum(void)
196{ 144{
197 mach_set_checksum(); 145 spin_lock_irq(&rtc_lock);
146 __nvram_set_checksum();
147 spin_unlock_irq(&rtc_lock);
148 return 0;
198} 149}
199 150
200#if 0 151static long pc_nvram_initialize(void)
201void nvram_set_checksum(void)
202{ 152{
203 unsigned long flags; 153 ssize_t i;
204 154
205 spin_lock_irqsave(&rtc_lock, flags); 155 spin_lock_irq(&rtc_lock);
156 for (i = 0; i < NVRAM_BYTES; ++i)
157 __nvram_write_byte(0, i);
206 __nvram_set_checksum(); 158 __nvram_set_checksum();
207 spin_unlock_irqrestore(&rtc_lock, flags); 159 spin_unlock_irq(&rtc_lock);
160 return 0;
208} 161}
209#endif /* 0 */
210
211/*
212 * The are the file operation function for user access to /dev/nvram
213 */
214 162
215static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) 163static ssize_t pc_nvram_get_size(void)
216{ 164{
217 return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, 165 return NVRAM_BYTES;
218 NVRAM_BYTES);
219} 166}
220 167
221static ssize_t nvram_read(struct file *file, char __user *buf, 168static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos)
222 size_t count, loff_t *ppos)
223{ 169{
224 unsigned char contents[NVRAM_BYTES]; 170 char *p = buf;
225 unsigned i = *ppos; 171 loff_t i;
226 unsigned char *tmp;
227 172
228 spin_lock_irq(&rtc_lock); 173 spin_lock_irq(&rtc_lock);
174 if (!__nvram_check_checksum()) {
175 spin_unlock_irq(&rtc_lock);
176 return -EIO;
177 }
178 for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
179 *p = __nvram_read_byte(i);
180 spin_unlock_irq(&rtc_lock);
229 181
230 if (!__nvram_check_checksum()) 182 *ppos = i;
231 goto checksum_err; 183 return p - buf;
184}
232 185
233 for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) 186static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos)
234 *tmp = __nvram_read_byte(i); 187{
188 char *p = buf;
189 loff_t i;
235 190
191 spin_lock_irq(&rtc_lock);
192 if (!__nvram_check_checksum()) {
193 spin_unlock_irq(&rtc_lock);
194 return -EIO;
195 }
196 for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
197 __nvram_write_byte(*p, i);
198 __nvram_set_checksum();
236 spin_unlock_irq(&rtc_lock); 199 spin_unlock_irq(&rtc_lock);
237 200
238 if (copy_to_user(buf, contents, tmp - contents))
239 return -EFAULT;
240
241 *ppos = i; 201 *ppos = i;
202 return p - buf;
203}
242 204
243 return tmp - contents; 205const struct nvram_ops arch_nvram_ops = {
206 .read = pc_nvram_read,
207 .write = pc_nvram_write,
208 .read_byte = pc_nvram_read_byte,
209 .write_byte = pc_nvram_write_byte,
210 .get_size = pc_nvram_get_size,
211 .set_checksum = pc_nvram_set_checksum,
212 .initialize = pc_nvram_initialize,
213};
214EXPORT_SYMBOL(arch_nvram_ops);
215#endif /* CONFIG_X86 */
244 216
245checksum_err: 217/*
246 spin_unlock_irq(&rtc_lock); 218 * The are the file operation function for user access to /dev/nvram
247 return -EIO; 219 */
248}
249 220
250static ssize_t nvram_write(struct file *file, const char __user *buf, 221static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin)
251 size_t count, loff_t *ppos)
252{ 222{
253 unsigned char contents[NVRAM_BYTES]; 223 return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
254 unsigned i = *ppos; 224 nvram_size);
255 unsigned char *tmp; 225}
256 226
257 if (i >= NVRAM_BYTES) 227static ssize_t nvram_misc_read(struct file *file, char __user *buf,
258 return 0; /* Past EOF */ 228 size_t count, loff_t *ppos)
229{
230 char *tmp;
231 ssize_t ret;
259 232
260 if (count > NVRAM_BYTES - i)
261 count = NVRAM_BYTES - i;
262 if (count > NVRAM_BYTES)
263 return -EFAULT; /* Can't happen, but prove it to gcc */
264 233
265 if (copy_from_user(contents, buf, count)) 234 if (!access_ok(buf, count))
266 return -EFAULT; 235 return -EFAULT;
236 if (*ppos >= nvram_size)
237 return 0;
267 238
268 spin_lock_irq(&rtc_lock); 239 count = min_t(size_t, count, nvram_size - *ppos);
240 count = min_t(size_t, count, PAGE_SIZE);
269 241
270 if (!__nvram_check_checksum()) 242 tmp = kmalloc(count, GFP_KERNEL);
271 goto checksum_err; 243 if (!tmp)
244 return -ENOMEM;
272 245
273 for (tmp = contents; count--; ++i, ++tmp) 246 ret = nvram_read(tmp, count, ppos);
274 __nvram_write_byte(*tmp, i); 247 if (ret <= 0)
248 goto out;
275 249
276 __nvram_set_checksum(); 250 if (copy_to_user(buf, tmp, ret)) {
251 *ppos -= ret;
252 ret = -EFAULT;
253 }
277 254
278 spin_unlock_irq(&rtc_lock); 255out:
256 kfree(tmp);
257 return ret;
258}
279 259
280 *ppos = i; 260static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
261 size_t count, loff_t *ppos)
262{
263 char *tmp;
264 ssize_t ret;
281 265
282 return tmp - contents; 266 if (!access_ok(buf, count))
267 return -EFAULT;
268 if (*ppos >= nvram_size)
269 return 0;
283 270
284checksum_err: 271 count = min_t(size_t, count, nvram_size - *ppos);
285 spin_unlock_irq(&rtc_lock); 272 count = min_t(size_t, count, PAGE_SIZE);
286 return -EIO; 273
274 tmp = memdup_user(buf, count);
275 if (IS_ERR(tmp))
276 return PTR_ERR(tmp);
277
278 ret = nvram_write(tmp, count, ppos);
279 kfree(tmp);
280 return ret;
287} 281}
288 282
289static long nvram_ioctl(struct file *file, unsigned int cmd, 283static long nvram_misc_ioctl(struct file *file, unsigned int cmd,
290 unsigned long arg) 284 unsigned long arg)
291{ 285{
292 int i; 286 long ret = -ENOTTY;
293 287
294 switch (cmd) { 288 switch (cmd) {
295 289#ifdef CONFIG_PPC
290 case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
291 pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
292 /* fall through */
293 case IOC_NVRAM_GET_OFFSET:
294 ret = -EINVAL;
295#ifdef CONFIG_PPC_PMAC
296 if (machine_is(powermac)) {
297 int part, offset;
298
299 if (copy_from_user(&part, (void __user *)arg,
300 sizeof(part)) != 0)
301 return -EFAULT;
302 if (part < pmac_nvram_OF || part > pmac_nvram_NR)
303 return -EINVAL;
304 offset = pmac_get_partition(part);
305 if (offset < 0)
306 return -EINVAL;
307 if (copy_to_user((void __user *)arg,
308 &offset, sizeof(offset)) != 0)
309 return -EFAULT;
310 ret = 0;
311 }
312#endif
313 break;
314#ifdef CONFIG_PPC32
315 case IOC_NVRAM_SYNC:
316 if (ppc_md.nvram_sync != NULL) {
317 mutex_lock(&nvram_mutex);
318 ppc_md.nvram_sync();
319 mutex_unlock(&nvram_mutex);
320 }
321 ret = 0;
322 break;
323#endif
324#elif defined(CONFIG_X86) || defined(CONFIG_M68K)
296 case NVRAM_INIT: 325 case NVRAM_INIT:
297 /* initialize NVRAM contents and checksum */ 326 /* initialize NVRAM contents and checksum */
298 if (!capable(CAP_SYS_ADMIN)) 327 if (!capable(CAP_SYS_ADMIN))
299 return -EACCES; 328 return -EACCES;
300 329
301 mutex_lock(&nvram_mutex); 330 if (arch_nvram_ops.initialize != NULL) {
302 spin_lock_irq(&rtc_lock); 331 mutex_lock(&nvram_mutex);
303 332 ret = arch_nvram_ops.initialize();
304 for (i = 0; i < NVRAM_BYTES; ++i) 333 mutex_unlock(&nvram_mutex);
305 __nvram_write_byte(0, i); 334 }
306 __nvram_set_checksum(); 335 break;
307
308 spin_unlock_irq(&rtc_lock);
309 mutex_unlock(&nvram_mutex);
310 return 0;
311
312 case NVRAM_SETCKS: 336 case NVRAM_SETCKS:
313 /* just set checksum, contents unchanged (maybe useful after 337 /* just set checksum, contents unchanged (maybe useful after
314 * checksum garbaged somehow...) */ 338 * checksum garbaged somehow...) */
315 if (!capable(CAP_SYS_ADMIN)) 339 if (!capable(CAP_SYS_ADMIN))
316 return -EACCES; 340 return -EACCES;
317 341
318 mutex_lock(&nvram_mutex); 342 if (arch_nvram_ops.set_checksum != NULL) {
319 spin_lock_irq(&rtc_lock); 343 mutex_lock(&nvram_mutex);
320 __nvram_set_checksum(); 344 ret = arch_nvram_ops.set_checksum();
321 spin_unlock_irq(&rtc_lock); 345 mutex_unlock(&nvram_mutex);
322 mutex_unlock(&nvram_mutex); 346 }
323 return 0; 347 break;
324 348#endif /* CONFIG_X86 || CONFIG_M68K */
325 default:
326 return -ENOTTY;
327 } 349 }
350 return ret;
328} 351}
329 352
330static int nvram_open(struct inode *inode, struct file *file) 353static int nvram_misc_open(struct inode *inode, struct file *file)
331{ 354{
332 spin_lock(&nvram_state_lock); 355 spin_lock(&nvram_state_lock);
333 356
357 /* Prevent multiple readers/writers if desired. */
334 if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || 358 if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
335 (nvram_open_mode & NVRAM_EXCL) || 359 (nvram_open_mode & NVRAM_EXCL)) {
336 ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
337 spin_unlock(&nvram_state_lock); 360 spin_unlock(&nvram_state_lock);
338 return -EBUSY; 361 return -EBUSY;
339 } 362 }
340 363
364#if defined(CONFIG_X86) || defined(CONFIG_M68K)
365 /* Prevent multiple writers if the set_checksum ioctl is implemented. */
366 if ((arch_nvram_ops.set_checksum != NULL) &&
367 (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) {
368 spin_unlock(&nvram_state_lock);
369 return -EBUSY;
370 }
371#endif
372
341 if (file->f_flags & O_EXCL) 373 if (file->f_flags & O_EXCL)
342 nvram_open_mode |= NVRAM_EXCL; 374 nvram_open_mode |= NVRAM_EXCL;
343 if (file->f_mode & FMODE_WRITE) 375 if (file->f_mode & FMODE_WRITE)
@@ -349,7 +381,7 @@ static int nvram_open(struct inode *inode, struct file *file)
349 return 0; 381 return 0;
350} 382}
351 383
352static int nvram_release(struct inode *inode, struct file *file) 384static int nvram_misc_release(struct inode *inode, struct file *file)
353{ 385{
354 spin_lock(&nvram_state_lock); 386 spin_lock(&nvram_state_lock);
355 387
@@ -366,123 +398,7 @@ static int nvram_release(struct inode *inode, struct file *file)
366 return 0; 398 return 0;
367} 399}
368 400
369#ifndef CONFIG_PROC_FS 401#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
370static int nvram_add_proc_fs(void)
371{
372 return 0;
373}
374
375#else
376
377static int nvram_proc_read(struct seq_file *seq, void *offset)
378{
379 unsigned char contents[NVRAM_BYTES];
380 int i = 0;
381
382 spin_lock_irq(&rtc_lock);
383 for (i = 0; i < NVRAM_BYTES; ++i)
384 contents[i] = __nvram_read_byte(i);
385 spin_unlock_irq(&rtc_lock);
386
387 mach_proc_infos(contents, seq, offset);
388
389 return 0;
390}
391
392static int nvram_add_proc_fs(void)
393{
394 if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read))
395 return -ENOMEM;
396 return 0;
397}
398
399#endif /* CONFIG_PROC_FS */
400
401static const struct file_operations nvram_fops = {
402 .owner = THIS_MODULE,
403 .llseek = nvram_llseek,
404 .read = nvram_read,
405 .write = nvram_write,
406 .unlocked_ioctl = nvram_ioctl,
407 .open = nvram_open,
408 .release = nvram_release,
409};
410
411static struct miscdevice nvram_dev = {
412 NVRAM_MINOR,
413 "nvram",
414 &nvram_fops
415};
416
417static int __init nvram_init(void)
418{
419 int ret;
420
421 /* First test whether the driver should init at all */
422 if (!CHECK_DRIVER_INIT())
423 return -ENODEV;
424
425 ret = misc_register(&nvram_dev);
426 if (ret) {
427 printk(KERN_ERR "nvram: can't misc_register on minor=%d\n",
428 NVRAM_MINOR);
429 goto out;
430 }
431 ret = nvram_add_proc_fs();
432 if (ret) {
433 printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
434 goto outmisc;
435 }
436 ret = 0;
437 printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n");
438out:
439 return ret;
440outmisc:
441 misc_deregister(&nvram_dev);
442 goto out;
443}
444
445static void __exit nvram_cleanup_module(void)
446{
447 remove_proc_entry("driver/nvram", NULL);
448 misc_deregister(&nvram_dev);
449}
450
451module_init(nvram_init);
452module_exit(nvram_cleanup_module);
453
454/*
455 * Machine specific functions
456 */
457
458#if MACH == PC
459
460static int pc_check_checksum(void)
461{
462 int i;
463 unsigned short sum = 0;
464 unsigned short expect;
465
466 for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
467 sum += __nvram_read_byte(i);
468 expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
469 __nvram_read_byte(PC_CKS_LOC+1);
470 return (sum & 0xffff) == expect;
471}
472
473static void pc_set_checksum(void)
474{
475 int i;
476 unsigned short sum = 0;
477
478 for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
479 sum += __nvram_read_byte(i);
480 __nvram_write_byte(sum >> 8, PC_CKS_LOC);
481 __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
482}
483
484#ifdef CONFIG_PROC_FS
485
486static const char * const floppy_types[] = { 402static const char * const floppy_types[] = {
487 "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", 403 "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
488 "3.5'' 2.88M", "3.5'' 2.88M" 404 "3.5'' 2.88M", "3.5'' 2.88M"
@@ -495,8 +411,8 @@ static const char * const gfx_types[] = {
495 "monochrome", 411 "monochrome",
496}; 412};
497 413
498static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq, 414static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
499 void *offset) 415 void *offset)
500{ 416{
501 int checksum; 417 int checksum;
502 int type; 418 int type;
@@ -557,143 +473,76 @@ static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
557 473
558 return; 474 return;
559} 475}
560#endif
561 476
562#endif /* MACH == PC */ 477static int nvram_proc_read(struct seq_file *seq, void *offset)
563
564#if MACH == ATARI
565
566static int atari_check_checksum(void)
567{ 478{
568 int i; 479 unsigned char contents[NVRAM_BYTES];
569 unsigned char sum = 0; 480 int i = 0;
570 481
571 for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) 482 spin_lock_irq(&rtc_lock);
572 sum += __nvram_read_byte(i); 483 for (i = 0; i < NVRAM_BYTES; ++i)
573 return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) && 484 contents[i] = __nvram_read_byte(i);
574 (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff)); 485 spin_unlock_irq(&rtc_lock);
575}
576 486
577static void atari_set_checksum(void) 487 pc_nvram_proc_read(contents, seq, offset);
578{
579 int i;
580 unsigned char sum = 0;
581 488
582 for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) 489 return 0;
583 sum += __nvram_read_byte(i);
584 __nvram_write_byte(~sum, ATARI_CKS_LOC);
585 __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
586} 490}
491#endif /* CONFIG_X86 && CONFIG_PROC_FS */
587 492
588#ifdef CONFIG_PROC_FS 493static const struct file_operations nvram_misc_fops = {
589 494 .owner = THIS_MODULE,
590static struct { 495 .llseek = nvram_misc_llseek,
591 unsigned char val; 496 .read = nvram_misc_read,
592 const char *name; 497 .write = nvram_misc_write,
593} boot_prefs[] = { 498 .unlocked_ioctl = nvram_misc_ioctl,
594 { 0x80, "TOS" }, 499 .open = nvram_misc_open,
595 { 0x40, "ASV" }, 500 .release = nvram_misc_release,
596 { 0x20, "NetBSD (?)" },
597 { 0x10, "Linux" },
598 { 0x00, "unspecified" }
599};
600
601static const char * const languages[] = {
602 "English (US)",
603 "German",
604 "French",
605 "English (UK)",
606 "Spanish",
607 "Italian",
608 "6 (undefined)",
609 "Swiss (French)",
610 "Swiss (German)"
611};
612
613static const char * const dateformat[] = {
614 "MM%cDD%cYY",
615 "DD%cMM%cYY",
616 "YY%cMM%cDD",
617 "YY%cDD%cMM",
618 "4 (undefined)",
619 "5 (undefined)",
620 "6 (undefined)",
621 "7 (undefined)"
622}; 501};
623 502
624static const char * const colors[] = { 503static struct miscdevice nvram_misc = {
625 "2", "4", "16", "256", "65536", "??", "??", "??" 504 NVRAM_MINOR,
505 "nvram",
506 &nvram_misc_fops,
626}; 507};
627 508
628static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq, 509static int __init nvram_module_init(void)
629 void *offset)
630{ 510{
631 int checksum = nvram_check_checksum(); 511 int ret;
632 int i;
633 unsigned vmode;
634 512
635 seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not "); 513 nvram_size = nvram_get_size();
514 if (nvram_size < 0)
515 return nvram_size;
636 516
637 seq_printf(seq, "Boot preference : "); 517 ret = misc_register(&nvram_misc);
638 for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) { 518 if (ret) {
639 if (nvram[1] == boot_prefs[i].val) { 519 pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR);
640 seq_printf(seq, "%s\n", boot_prefs[i].name); 520 return ret;
641 break;
642 }
643 } 521 }
644 if (i < 0)
645 seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
646
647 seq_printf(seq, "SCSI arbitration : %s\n",
648 (nvram[16] & 0x80) ? "on" : "off");
649 seq_printf(seq, "SCSI host ID : ");
650 if (nvram[16] & 0x80)
651 seq_printf(seq, "%d\n", nvram[16] & 7);
652 else
653 seq_printf(seq, "n/a\n");
654
655 /* the following entries are defined only for the Falcon */
656 if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON)
657 return;
658 522
659 seq_printf(seq, "OS language : "); 523#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
660 if (nvram[6] < ARRAY_SIZE(languages)) 524 if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
661 seq_printf(seq, "%s\n", languages[nvram[6]]); 525 pr_err("nvram: can't create /proc/driver/nvram\n");
662 else 526 misc_deregister(&nvram_misc);
663 seq_printf(seq, "%u (undefined)\n", nvram[6]); 527 return -ENOMEM;
664 seq_printf(seq, "Keyboard language: "); 528 }
665 if (nvram[7] < ARRAY_SIZE(languages)) 529#endif
666 seq_printf(seq, "%s\n", languages[nvram[7]]);
667 else
668 seq_printf(seq, "%u (undefined)\n", nvram[7]);
669 seq_printf(seq, "Date format : ");
670 seq_printf(seq, dateformat[nvram[8] & 7],
671 nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
672 seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
673 seq_printf(seq, "Boot delay : ");
674 if (nvram[10] == 0)
675 seq_printf(seq, "default");
676 else
677 seq_printf(seq, "%ds%s\n", nvram[10],
678 nvram[10] < 8 ? ", no memory test" : "");
679
680 vmode = (nvram[14] << 8) | nvram[15];
681 seq_printf(seq,
682 "Video mode : %s colors, %d columns, %s %s monitor\n",
683 colors[vmode & 7],
684 vmode & 8 ? 80 : 40,
685 vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
686 seq_printf(seq, " %soverscan, compat. mode %s%s\n",
687 vmode & 64 ? "" : "no ",
688 vmode & 128 ? "on" : "off",
689 vmode & 256 ?
690 (vmode & 16 ? ", line doubling" : ", half screen") : "");
691 530
692 return; 531 pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n");
532 return 0;
693} 533}
534
535static void __exit nvram_module_exit(void)
536{
537#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
538 remove_proc_entry("driver/nvram", NULL);
694#endif 539#endif
540 misc_deregister(&nvram_misc);
541}
695 542
696#endif /* MACH == ATARI */ 543module_init(nvram_module_init);
544module_exit(nvram_module_exit);
697 545
698MODULE_LICENSE("GPL"); 546MODULE_LICENSE("GPL");
699MODULE_ALIAS_MISCDEV(NVRAM_MINOR); 547MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
548MODULE_ALIAS("devname:nvram");
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index de15bf55895b..8e17149655f0 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -114,6 +114,14 @@ config EXTCON_PALMAS
114 Say Y here to enable support for USB peripheral and USB host 114 Say Y here to enable support for USB peripheral and USB host
115 detection by palmas usb. 115 detection by palmas usb.
116 116
117config EXTCON_PTN5150
118 tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
119 depends on I2C && GPIOLIB || COMPILE_TEST
120 select REGMAP_I2C
121 help
122 Say Y here to enable support for USB peripheral and USB host
123 detection by NXP PTN5150 CC (Configuration Channel) logic chip.
124
117config EXTCON_QCOM_SPMI_MISC 125config EXTCON_QCOM_SPMI_MISC
118 tristate "Qualcomm USB extcon support" 126 tristate "Qualcomm USB extcon support"
119 depends on ARCH_QCOM || COMPILE_TEST 127 depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0888fdeded72..261ce4cfe209 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
17obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o 17obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
18obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o 18obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
19obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o 19obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
20obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
20obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o 21obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
21obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o 22obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
22obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o 23obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
new file mode 100644
index 000000000000..d1c997599390
--- /dev/null
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -0,0 +1,339 @@
1// SPDX-License-Identifier: GPL-2.0+
2//
3// extcon-ptn5150.c - PTN5150 CC logic extcon driver to support USB detection
4//
5// Based on extcon-sm5502.c driver
6// Copyright (c) 2018-2019 by Vijai Kumar K
7// Author: Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>
8
9#include <linux/err.h>
10#include <linux/i2c.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/regmap.h>
15#include <linux/slab.h>
16#include <linux/extcon-provider.h>
17#include <linux/gpio/consumer.h>
18
19/* PTN5150 registers */
20enum ptn5150_reg {
21 PTN5150_REG_DEVICE_ID = 0x01,
22 PTN5150_REG_CONTROL,
23 PTN5150_REG_INT_STATUS,
24 PTN5150_REG_CC_STATUS,
25 PTN5150_REG_CON_DET = 0x09,
26 PTN5150_REG_VCONN_STATUS,
27 PTN5150_REG_RESET,
28 PTN5150_REG_INT_MASK = 0x18,
29 PTN5150_REG_INT_REG_STATUS,
30 PTN5150_REG_END,
31};
32
33#define PTN5150_DFP_ATTACHED 0x1
34#define PTN5150_UFP_ATTACHED 0x2
35
36/* Define PTN5150 MASK/SHIFT constant */
37#define PTN5150_REG_DEVICE_ID_VENDOR_SHIFT 0
38#define PTN5150_REG_DEVICE_ID_VENDOR_MASK \
39 (0x3 << PTN5150_REG_DEVICE_ID_VENDOR_SHIFT)
40
41#define PTN5150_REG_DEVICE_ID_VERSION_SHIFT 3
42#define PTN5150_REG_DEVICE_ID_VERSION_MASK \
43 (0x1f << PTN5150_REG_DEVICE_ID_VERSION_SHIFT)
44
45#define PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT 2
46#define PTN5150_REG_CC_PORT_ATTACHMENT_MASK \
47 (0x7 << PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT)
48
49#define PTN5150_REG_CC_VBUS_DETECTION_SHIFT 7
50#define PTN5150_REG_CC_VBUS_DETECTION_MASK \
51 (0x1 << PTN5150_REG_CC_VBUS_DETECTION_SHIFT)
52
53#define PTN5150_REG_INT_CABLE_ATTACH_SHIFT 0
54#define PTN5150_REG_INT_CABLE_ATTACH_MASK \
55 (0x1 << PTN5150_REG_INT_CABLE_ATTACH_SHIFT)
56
57#define PTN5150_REG_INT_CABLE_DETACH_SHIFT 1
58#define PTN5150_REG_INT_CABLE_DETACH_MASK \
59 (0x1 << PTN5150_REG_CC_CABLE_DETACH_SHIFT)
60
61struct ptn5150_info {
62 struct device *dev;
63 struct extcon_dev *edev;
64 struct i2c_client *i2c;
65 struct regmap *regmap;
66 struct gpio_desc *int_gpiod;
67 struct gpio_desc *vbus_gpiod;
68 int irq;
69 struct work_struct irq_work;
70 struct mutex mutex;
71};
72
73/* List of detectable cables */
74static const unsigned int ptn5150_extcon_cable[] = {
75 EXTCON_USB,
76 EXTCON_USB_HOST,
77 EXTCON_NONE,
78};
79
80static const struct regmap_config ptn5150_regmap_config = {
81 .reg_bits = 8,
82 .val_bits = 8,
83 .max_register = PTN5150_REG_END,
84};
85
86static void ptn5150_irq_work(struct work_struct *work)
87{
88 struct ptn5150_info *info = container_of(work,
89 struct ptn5150_info, irq_work);
90 int ret = 0;
91 unsigned int reg_data;
92 unsigned int int_status;
93
94 if (!info->edev)
95 return;
96
97 mutex_lock(&info->mutex);
98
99 ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
100 if (ret) {
101 dev_err(info->dev, "failed to read CC STATUS %d\n", ret);
102 mutex_unlock(&info->mutex);
103 return;
104 }
105
106 /* Clear interrupt. Read would clear the register */
107 ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &int_status);
108 if (ret) {
109 dev_err(info->dev, "failed to read INT STATUS %d\n", ret);
110 mutex_unlock(&info->mutex);
111 return;
112 }
113
114 if (int_status) {
115 unsigned int cable_attach;
116
117 cable_attach = int_status & PTN5150_REG_INT_CABLE_ATTACH_MASK;
118 if (cable_attach) {
119 unsigned int port_status;
120 unsigned int vbus;
121
122 port_status = ((reg_data &
123 PTN5150_REG_CC_PORT_ATTACHMENT_MASK) >>
124 PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT);
125
126 switch (port_status) {
127 case PTN5150_DFP_ATTACHED:
128 extcon_set_state_sync(info->edev,
129 EXTCON_USB_HOST, false);
130 gpiod_set_value(info->vbus_gpiod, 0);
131 extcon_set_state_sync(info->edev, EXTCON_USB,
132 true);
133 break;
134 case PTN5150_UFP_ATTACHED:
135 extcon_set_state_sync(info->edev, EXTCON_USB,
136 false);
137 vbus = ((reg_data &
138 PTN5150_REG_CC_VBUS_DETECTION_MASK) >>
139 PTN5150_REG_CC_VBUS_DETECTION_SHIFT);
140 if (vbus)
141 gpiod_set_value(info->vbus_gpiod, 0);
142 else
143 gpiod_set_value(info->vbus_gpiod, 1);
144
145 extcon_set_state_sync(info->edev,
146 EXTCON_USB_HOST, true);
147 break;
148 default:
149 dev_err(info->dev,
150 "Unknown Port status : %x\n",
151 port_status);
152 break;
153 }
154 } else {
155 extcon_set_state_sync(info->edev,
156 EXTCON_USB_HOST, false);
157 extcon_set_state_sync(info->edev,
158 EXTCON_USB, false);
159 gpiod_set_value(info->vbus_gpiod, 0);
160 }
161 }
162
163 /* Clear interrupt. Read would clear the register */
164 ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS,
165 &int_status);
166 if (ret) {
167 dev_err(info->dev,
168 "failed to read INT REG STATUS %d\n", ret);
169 mutex_unlock(&info->mutex);
170 return;
171 }
172
173 mutex_unlock(&info->mutex);
174}
175
176
177static irqreturn_t ptn5150_irq_handler(int irq, void *data)
178{
179 struct ptn5150_info *info = data;
180
181 schedule_work(&info->irq_work);
182
183 return IRQ_HANDLED;
184}
185
186static int ptn5150_init_dev_type(struct ptn5150_info *info)
187{
188 unsigned int reg_data, vendor_id, version_id;
189 int ret;
190
191 ret = regmap_read(info->regmap, PTN5150_REG_DEVICE_ID, &reg_data);
192 if (ret) {
193 dev_err(info->dev, "failed to read DEVICE_ID %d\n", ret);
194 return -EINVAL;
195 }
196
197 vendor_id = ((reg_data & PTN5150_REG_DEVICE_ID_VENDOR_MASK) >>
198 PTN5150_REG_DEVICE_ID_VENDOR_SHIFT);
199 version_id = ((reg_data & PTN5150_REG_DEVICE_ID_VERSION_MASK) >>
200 PTN5150_REG_DEVICE_ID_VERSION_SHIFT);
201
202 dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
203 version_id, vendor_id);
204
205 /* Clear any existing interrupts */
206 ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &reg_data);
207 if (ret) {
208 dev_err(info->dev,
209 "failed to read PTN5150_REG_INT_STATUS %d\n",
210 ret);
211 return -EINVAL;
212 }
213
214 ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS, &reg_data);
215 if (ret) {
216 dev_err(info->dev,
217 "failed to read PTN5150_REG_INT_REG_STATUS %d\n", ret);
218 return -EINVAL;
219 }
220
221 return 0;
222}
223
224static int ptn5150_i2c_probe(struct i2c_client *i2c,
225 const struct i2c_device_id *id)
226{
227 struct device *dev = &i2c->dev;
228 struct device_node *np = i2c->dev.of_node;
229 struct ptn5150_info *info;
230 int ret;
231
232 if (!np)
233 return -EINVAL;
234
235 info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
236 if (!info)
237 return -ENOMEM;
238 i2c_set_clientdata(i2c, info);
239
240 info->dev = &i2c->dev;
241 info->i2c = i2c;
242 info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN);
243 if (IS_ERR(info->int_gpiod)) {
244 dev_err(dev, "failed to get INT GPIO\n");
245 return PTR_ERR(info->int_gpiod);
246 }
247 info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_IN);
248 if (IS_ERR(info->vbus_gpiod)) {
249 dev_err(dev, "failed to get VBUS GPIO\n");
250 return PTR_ERR(info->vbus_gpiod);
251 }
252 ret = gpiod_direction_output(info->vbus_gpiod, 0);
253 if (ret) {
254 dev_err(dev, "failed to set VBUS GPIO direction\n");
255 return -EINVAL;
256 }
257
258 mutex_init(&info->mutex);
259
260 INIT_WORK(&info->irq_work, ptn5150_irq_work);
261
262 info->regmap = devm_regmap_init_i2c(i2c, &ptn5150_regmap_config);
263 if (IS_ERR(info->regmap)) {
264 ret = PTR_ERR(info->regmap);
265 dev_err(info->dev, "failed to allocate register map: %d\n",
266 ret);
267 return ret;
268 }
269
270 if (info->int_gpiod) {
271 info->irq = gpiod_to_irq(info->int_gpiod);
272 if (info->irq < 0) {
273 dev_err(dev, "failed to get INTB IRQ\n");
274 return info->irq;
275 }
276
277 ret = devm_request_threaded_irq(dev, info->irq, NULL,
278 ptn5150_irq_handler,
279 IRQF_TRIGGER_FALLING |
280 IRQF_ONESHOT,
281 i2c->name, info);
282 if (ret < 0) {
283 dev_err(dev, "failed to request handler for INTB IRQ\n");
284 return ret;
285 }
286 }
287
288 /* Allocate extcon device */
289 info->edev = devm_extcon_dev_allocate(info->dev, ptn5150_extcon_cable);
290 if (IS_ERR(info->edev)) {
291 dev_err(info->dev, "failed to allocate memory for extcon\n");
292 return -ENOMEM;
293 }
294
295 /* Register extcon device */
296 ret = devm_extcon_dev_register(info->dev, info->edev);
297 if (ret) {
298 dev_err(info->dev, "failed to register extcon device\n");
299 return ret;
300 }
301
302 /* Initialize PTN5150 device and print vendor id and version id */
303 ret = ptn5150_init_dev_type(info);
304 if (ret)
305 return -EINVAL;
306
307 return 0;
308}
309
310static const struct of_device_id ptn5150_dt_match[] = {
311 { .compatible = "nxp,ptn5150" },
312 { },
313};
314MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
315
316static const struct i2c_device_id ptn5150_i2c_id[] = {
317 { "ptn5150", 0 },
318 { }
319};
320MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
321
322static struct i2c_driver ptn5150_i2c_driver = {
323 .driver = {
324 .name = "ptn5150",
325 .of_match_table = ptn5150_dt_match,
326 },
327 .probe = ptn5150_i2c_probe,
328 .id_table = ptn5150_i2c_id,
329};
330
331static int __init ptn5150_i2c_init(void)
332{
333 return i2c_add_driver(&ptn5150_i2c_driver);
334}
335subsys_initcall(ptn5150_i2c_init);
336
337MODULE_DESCRIPTION("NXP PTN5150 CC logic Extcon driver");
338MODULE_AUTHOR("Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>");
339MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 0bb7b5cd6cdc..c20445b867ae 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -104,7 +104,7 @@ config SOCFPGA_FPGA_BRIDGE
104 104
105config ALTERA_FREEZE_BRIDGE 105config ALTERA_FREEZE_BRIDGE
106 tristate "Altera FPGA Freeze Bridge" 106 tristate "Altera FPGA Freeze Bridge"
107 depends on ARCH_SOCFPGA && FPGA_BRIDGE 107 depends on FPGA_BRIDGE && HAS_IOMEM
108 help 108 help
109 Say Y to enable drivers for Altera FPGA Freeze bridges. A 109 Say Y to enable drivers for Altera FPGA Freeze bridges. A
110 freeze bridge is a bridge that exists in the FPGA fabric to 110 freeze bridge is a bridge that exists in the FPGA fabric to
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 8c18beec6b57..678d0115f840 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -205,7 +205,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
205 struct fpga_image_info *info) 205 struct fpga_image_info *info)
206{ 206{
207 struct altera_ps_conf *conf = mgr->priv; 207 struct altera_ps_conf *conf = mgr->priv;
208 const char dummy[] = {0}; 208 static const char dummy[] = {0};
209 int ret; 209 int ret;
210 210
211 if (gpiod_get_value_cansleep(conf->status)) { 211 if (gpiod_get_value_cansleep(conf->status)) {
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
index 6abc88514512..6d8c8027e1cd 100644
--- a/drivers/gnss/Kconfig
+++ b/drivers/gnss/Kconfig
@@ -15,6 +15,19 @@ if GNSS
15config GNSS_SERIAL 15config GNSS_SERIAL
16 tristate 16 tristate
17 17
18config GNSS_MTK_SERIAL
19 tristate "Mediatek GNSS receiver support"
20 depends on SERIAL_DEV_BUS
21 select GNSS_SERIAL
22 help
23 Say Y here if you have a Mediatek-based GNSS receiver which uses a
24 serial interface.
25
26 To compile this driver as a module, choose M here: the module will
27 be called gnss-mtk.
28
29 If unsure, say N.
30
18config GNSS_SIRF_SERIAL 31config GNSS_SIRF_SERIAL
19 tristate "SiRFstar GNSS receiver support" 32 tristate "SiRFstar GNSS receiver support"
20 depends on SERIAL_DEV_BUS 33 depends on SERIAL_DEV_BUS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
index 5cf0ebe0330a..451f11401ecc 100644
--- a/drivers/gnss/Makefile
+++ b/drivers/gnss/Makefile
@@ -9,6 +9,9 @@ gnss-y := core.o
9obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o 9obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o
10gnss-serial-y := serial.o 10gnss-serial-y := serial.o
11 11
12obj-$(CONFIG_GNSS_MTK_SERIAL) += gnss-mtk.o
13gnss-mtk-y := mtk.o
14
12obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o 15obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o
13gnss-sirf-y := sirf.o 16gnss-sirf-y := sirf.o
14 17
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 4291a0dd22aa..320cfca80d5f 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -334,6 +334,7 @@ static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
334 [GNSS_TYPE_NMEA] = "NMEA", 334 [GNSS_TYPE_NMEA] = "NMEA",
335 [GNSS_TYPE_SIRF] = "SiRF", 335 [GNSS_TYPE_SIRF] = "SiRF",
336 [GNSS_TYPE_UBX] = "UBX", 336 [GNSS_TYPE_UBX] = "UBX",
337 [GNSS_TYPE_MTK] = "MTK",
337}; 338};
338 339
339static const char *gnss_type_name(struct gnss_device *gdev) 340static const char *gnss_type_name(struct gnss_device *gdev)
diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c
new file mode 100644
index 000000000000..d1fc55560daf
--- /dev/null
+++ b/drivers/gnss/mtk.c
@@ -0,0 +1,152 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Mediatek GNSS receiver driver
4 *
5 * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
6 */
7
8#include <linux/errno.h>
9#include <linux/gnss.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/regulator/consumer.h>
15#include <linux/serdev.h>
16
17#include "serial.h"
18
19struct mtk_data {
20 struct regulator *vbackup;
21 struct regulator *vcc;
22};
23
24static int mtk_set_active(struct gnss_serial *gserial)
25{
26 struct mtk_data *data = gnss_serial_get_drvdata(gserial);
27 int ret;
28
29 ret = regulator_enable(data->vcc);
30 if (ret)
31 return ret;
32
33 return 0;
34}
35
36static int mtk_set_standby(struct gnss_serial *gserial)
37{
38 struct mtk_data *data = gnss_serial_get_drvdata(gserial);
39 int ret;
40
41 ret = regulator_disable(data->vcc);
42 if (ret)
43 return ret;
44
45 return 0;
46}
47
48static int mtk_set_power(struct gnss_serial *gserial,
49 enum gnss_serial_pm_state state)
50{
51 switch (state) {
52 case GNSS_SERIAL_ACTIVE:
53 return mtk_set_active(gserial);
54 case GNSS_SERIAL_OFF:
55 case GNSS_SERIAL_STANDBY:
56 return mtk_set_standby(gserial);
57 }
58
59 return -EINVAL;
60}
61
62static const struct gnss_serial_ops mtk_gserial_ops = {
63 .set_power = mtk_set_power,
64};
65
66static int mtk_probe(struct serdev_device *serdev)
67{
68 struct gnss_serial *gserial;
69 struct mtk_data *data;
70 int ret;
71
72 gserial = gnss_serial_allocate(serdev, sizeof(*data));
73 if (IS_ERR(gserial)) {
74 ret = PTR_ERR(gserial);
75 return ret;
76 }
77
78 gserial->ops = &mtk_gserial_ops;
79
80 gserial->gdev->type = GNSS_TYPE_MTK;
81
82 data = gnss_serial_get_drvdata(gserial);
83
84 data->vcc = devm_regulator_get(&serdev->dev, "vcc");
85 if (IS_ERR(data->vcc)) {
86 ret = PTR_ERR(data->vcc);
87 goto err_free_gserial;
88 }
89
90 data->vbackup = devm_regulator_get_optional(&serdev->dev, "vbackup");
91 if (IS_ERR(data->vbackup)) {
92 ret = PTR_ERR(data->vbackup);
93 if (ret == -ENODEV)
94 data->vbackup = NULL;
95 else
96 goto err_free_gserial;
97 }
98
99 if (data->vbackup) {
100 ret = regulator_enable(data->vbackup);
101 if (ret)
102 goto err_free_gserial;
103 }
104
105 ret = gnss_serial_register(gserial);
106 if (ret)
107 goto err_disable_vbackup;
108
109 return 0;
110
111err_disable_vbackup:
112 if (data->vbackup)
113 regulator_disable(data->vbackup);
114err_free_gserial:
115 gnss_serial_free(gserial);
116
117 return ret;
118}
119
120static void mtk_remove(struct serdev_device *serdev)
121{
122 struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
123 struct mtk_data *data = gnss_serial_get_drvdata(gserial);
124
125 gnss_serial_deregister(gserial);
126 if (data->vbackup)
127 regulator_disable(data->vbackup);
128 gnss_serial_free(gserial);
129};
130
131#ifdef CONFIG_OF
132static const struct of_device_id mtk_of_match[] = {
133 { .compatible = "globaltop,pa6h" },
134 {},
135};
136MODULE_DEVICE_TABLE(of, mtk_of_match);
137#endif
138
139static struct serdev_device_driver mtk_driver = {
140 .driver = {
141 .name = "gnss-mtk",
142 .of_match_table = of_match_ptr(mtk_of_match),
143 .pm = &gnss_serial_pm_ops,
144 },
145 .probe = mtk_probe,
146 .remove = mtk_remove,
147};
148module_serdev_device_driver(mtk_driver);
149
150MODULE_AUTHOR("Loys Ollivier <lollivier@baylibre.com>");
151MODULE_DESCRIPTION("Mediatek GNSS receiver driver");
152MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 226f6e6fe01b..effed3a8d398 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -25,31 +25,83 @@
25#define SIRF_ON_OFF_PULSE_TIME 100 25#define SIRF_ON_OFF_PULSE_TIME 100
26#define SIRF_ACTIVATE_TIMEOUT 200 26#define SIRF_ACTIVATE_TIMEOUT 200
27#define SIRF_HIBERNATE_TIMEOUT 200 27#define SIRF_HIBERNATE_TIMEOUT 200
28/*
29 * If no data arrives for this time, we assume that the chip is off.
30 * REVISIT: The report cycle is configurable and can be several minutes long,
31 * so this will only work reliably if the report cycle is set to a reasonable
32 * low value. Also power saving settings (like send data only on movement)
33 * might things work even worse.
34 * Workaround might be to parse shutdown or bootup messages.
35 */
36#define SIRF_REPORT_CYCLE 2000
28 37
29struct sirf_data { 38struct sirf_data {
30 struct gnss_device *gdev; 39 struct gnss_device *gdev;
31 struct serdev_device *serdev; 40 struct serdev_device *serdev;
32 speed_t speed; 41 speed_t speed;
33 struct regulator *vcc; 42 struct regulator *vcc;
43 struct regulator *lna;
34 struct gpio_desc *on_off; 44 struct gpio_desc *on_off;
35 struct gpio_desc *wakeup; 45 struct gpio_desc *wakeup;
36 int irq; 46 int irq;
37 bool active; 47 bool active;
48
49 struct mutex gdev_mutex;
50 bool open;
51
52 struct mutex serdev_mutex;
53 int serdev_count;
54
38 wait_queue_head_t power_wait; 55 wait_queue_head_t power_wait;
39}; 56};
40 57
58static int sirf_serdev_open(struct sirf_data *data)
59{
60 int ret = 0;
61
62 mutex_lock(&data->serdev_mutex);
63 if (++data->serdev_count == 1) {
64 ret = serdev_device_open(data->serdev);
65 if (ret) {
66 data->serdev_count--;
67 goto out_unlock;
68 }
69
70 serdev_device_set_baudrate(data->serdev, data->speed);
71 serdev_device_set_flow_control(data->serdev, false);
72 }
73
74out_unlock:
75 mutex_unlock(&data->serdev_mutex);
76
77 return ret;
78}
79
80static void sirf_serdev_close(struct sirf_data *data)
81{
82 mutex_lock(&data->serdev_mutex);
83 if (--data->serdev_count == 0)
84 serdev_device_close(data->serdev);
85 mutex_unlock(&data->serdev_mutex);
86}
87
41static int sirf_open(struct gnss_device *gdev) 88static int sirf_open(struct gnss_device *gdev)
42{ 89{
43 struct sirf_data *data = gnss_get_drvdata(gdev); 90 struct sirf_data *data = gnss_get_drvdata(gdev);
44 struct serdev_device *serdev = data->serdev; 91 struct serdev_device *serdev = data->serdev;
45 int ret; 92 int ret;
46 93
47 ret = serdev_device_open(serdev); 94 mutex_lock(&data->gdev_mutex);
48 if (ret) 95 data->open = true;
49 return ret; 96 mutex_unlock(&data->gdev_mutex);
50 97
51 serdev_device_set_baudrate(serdev, data->speed); 98 ret = sirf_serdev_open(data);
52 serdev_device_set_flow_control(serdev, false); 99 if (ret) {
100 mutex_lock(&data->gdev_mutex);
101 data->open = false;
102 mutex_unlock(&data->gdev_mutex);
103 return ret;
104 }
53 105
54 ret = pm_runtime_get_sync(&serdev->dev); 106 ret = pm_runtime_get_sync(&serdev->dev);
55 if (ret < 0) { 107 if (ret < 0) {
@@ -61,7 +113,11 @@ static int sirf_open(struct gnss_device *gdev)
61 return 0; 113 return 0;
62 114
63err_close: 115err_close:
64 serdev_device_close(serdev); 116 sirf_serdev_close(data);
117
118 mutex_lock(&data->gdev_mutex);
119 data->open = false;
120 mutex_unlock(&data->gdev_mutex);
65 121
66 return ret; 122 return ret;
67} 123}
@@ -71,9 +127,13 @@ static void sirf_close(struct gnss_device *gdev)
71 struct sirf_data *data = gnss_get_drvdata(gdev); 127 struct sirf_data *data = gnss_get_drvdata(gdev);
72 struct serdev_device *serdev = data->serdev; 128 struct serdev_device *serdev = data->serdev;
73 129
74 serdev_device_close(serdev); 130 sirf_serdev_close(data);
75 131
76 pm_runtime_put(&serdev->dev); 132 pm_runtime_put(&serdev->dev);
133
134 mutex_lock(&data->gdev_mutex);
135 data->open = false;
136 mutex_unlock(&data->gdev_mutex);
77} 137}
78 138
79static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf, 139static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
@@ -105,8 +165,19 @@ static int sirf_receive_buf(struct serdev_device *serdev,
105{ 165{
106 struct sirf_data *data = serdev_device_get_drvdata(serdev); 166 struct sirf_data *data = serdev_device_get_drvdata(serdev);
107 struct gnss_device *gdev = data->gdev; 167 struct gnss_device *gdev = data->gdev;
168 int ret = 0;
169
170 if (!data->wakeup && !data->active) {
171 data->active = true;
172 wake_up_interruptible(&data->power_wait);
173 }
174
175 mutex_lock(&data->gdev_mutex);
176 if (data->open)
177 ret = gnss_insert_raw(gdev, buf, count);
178 mutex_unlock(&data->gdev_mutex);
108 179
109 return gnss_insert_raw(gdev, buf, count); 180 return ret;
110} 181}
111 182
112static const struct serdev_device_ops sirf_serdev_ops = { 183static const struct serdev_device_ops sirf_serdev_ops = {
@@ -125,17 +196,45 @@ static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
125 if (ret < 0) 196 if (ret < 0)
126 goto out; 197 goto out;
127 198
128 data->active = !!ret; 199 data->active = ret;
129 wake_up_interruptible(&data->power_wait); 200 wake_up_interruptible(&data->power_wait);
130out: 201out:
131 return IRQ_HANDLED; 202 return IRQ_HANDLED;
132} 203}
133 204
205static int sirf_wait_for_power_state_nowakeup(struct sirf_data *data,
206 bool active,
207 unsigned long timeout)
208{
209 int ret;
210
211 /* Wait for state change (including any shutdown messages). */
212 msleep(timeout);
213
214 /* Wait for data reception or timeout. */
215 data->active = false;
216 ret = wait_event_interruptible_timeout(data->power_wait,
217 data->active, msecs_to_jiffies(SIRF_REPORT_CYCLE));
218 if (ret < 0)
219 return ret;
220
221 if (ret > 0 && !active)
222 return -ETIMEDOUT;
223
224 if (ret == 0 && active)
225 return -ETIMEDOUT;
226
227 return 0;
228}
229
134static int sirf_wait_for_power_state(struct sirf_data *data, bool active, 230static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
135 unsigned long timeout) 231 unsigned long timeout)
136{ 232{
137 int ret; 233 int ret;
138 234
235 if (!data->wakeup)
236 return sirf_wait_for_power_state_nowakeup(data, active, timeout);
237
139 ret = wait_event_interruptible_timeout(data->power_wait, 238 ret = wait_event_interruptible_timeout(data->power_wait,
140 data->active == active, msecs_to_jiffies(timeout)); 239 data->active == active, msecs_to_jiffies(timeout));
141 if (ret < 0) 240 if (ret < 0)
@@ -168,21 +267,22 @@ static int sirf_set_active(struct sirf_data *data, bool active)
168 else 267 else
169 timeout = SIRF_HIBERNATE_TIMEOUT; 268 timeout = SIRF_HIBERNATE_TIMEOUT;
170 269
270 if (!data->wakeup) {
271 ret = sirf_serdev_open(data);
272 if (ret)
273 return ret;
274 }
275
171 do { 276 do {
172 sirf_pulse_on_off(data); 277 sirf_pulse_on_off(data);
173 ret = sirf_wait_for_power_state(data, active, timeout); 278 ret = sirf_wait_for_power_state(data, active, timeout);
174 if (ret < 0) { 279 } while (ret == -ETIMEDOUT && retries--);
175 if (ret == -ETIMEDOUT)
176 continue;
177 280
178 return ret; 281 if (!data->wakeup)
179 } 282 sirf_serdev_close(data);
180 283
181 break; 284 if (ret)
182 } while (retries--); 285 return ret;
183
184 if (retries < 0)
185 return -ETIMEDOUT;
186 286
187 return 0; 287 return 0;
188} 288}
@@ -190,21 +290,60 @@ static int sirf_set_active(struct sirf_data *data, bool active)
190static int sirf_runtime_suspend(struct device *dev) 290static int sirf_runtime_suspend(struct device *dev)
191{ 291{
192 struct sirf_data *data = dev_get_drvdata(dev); 292 struct sirf_data *data = dev_get_drvdata(dev);
293 int ret2;
294 int ret;
193 295
194 if (!data->on_off) 296 if (data->on_off)
195 return regulator_disable(data->vcc); 297 ret = sirf_set_active(data, false);
298 else
299 ret = regulator_disable(data->vcc);
300
301 if (ret)
302 return ret;
303
304 ret = regulator_disable(data->lna);
305 if (ret)
306 goto err_reenable;
196 307
197 return sirf_set_active(data, false); 308 return 0;
309
310err_reenable:
311 if (data->on_off)
312 ret2 = sirf_set_active(data, true);
313 else
314 ret2 = regulator_enable(data->vcc);
315
316 if (ret2)
317 dev_err(dev,
318 "failed to reenable power on failed suspend: %d\n",
319 ret2);
320
321 return ret;
198} 322}
199 323
200static int sirf_runtime_resume(struct device *dev) 324static int sirf_runtime_resume(struct device *dev)
201{ 325{
202 struct sirf_data *data = dev_get_drvdata(dev); 326 struct sirf_data *data = dev_get_drvdata(dev);
327 int ret;
203 328
204 if (!data->on_off) 329 ret = regulator_enable(data->lna);
205 return regulator_enable(data->vcc); 330 if (ret)
331 return ret;
332
333 if (data->on_off)
334 ret = sirf_set_active(data, true);
335 else
336 ret = regulator_enable(data->vcc);
337
338 if (ret)
339 goto err_disable_lna;
340
341 return 0;
342
343err_disable_lna:
344 regulator_disable(data->lna);
206 345
207 return sirf_set_active(data, true); 346 return ret;
208} 347}
209 348
210static int __maybe_unused sirf_suspend(struct device *dev) 349static int __maybe_unused sirf_suspend(struct device *dev)
@@ -275,6 +414,8 @@ static int sirf_probe(struct serdev_device *serdev)
275 data->serdev = serdev; 414 data->serdev = serdev;
276 data->gdev = gdev; 415 data->gdev = gdev;
277 416
417 mutex_init(&data->gdev_mutex);
418 mutex_init(&data->serdev_mutex);
278 init_waitqueue_head(&data->power_wait); 419 init_waitqueue_head(&data->power_wait);
279 420
280 serdev_device_set_drvdata(serdev, data); 421 serdev_device_set_drvdata(serdev, data);
@@ -290,6 +431,12 @@ static int sirf_probe(struct serdev_device *serdev)
290 goto err_put_device; 431 goto err_put_device;
291 } 432 }
292 433
434 data->lna = devm_regulator_get(dev, "lna");
435 if (IS_ERR(data->lna)) {
436 ret = PTR_ERR(data->lna);
437 goto err_put_device;
438 }
439
293 data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff", 440 data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
294 GPIOD_OUT_LOW); 441 GPIOD_OUT_LOW);
295 if (IS_ERR(data->on_off)) 442 if (IS_ERR(data->on_off))
@@ -301,39 +448,53 @@ static int sirf_probe(struct serdev_device *serdev)
301 if (IS_ERR(data->wakeup)) 448 if (IS_ERR(data->wakeup))
302 goto err_put_device; 449 goto err_put_device;
303 450
304 /* 451 ret = regulator_enable(data->vcc);
305 * Configurations where WAKEUP has been left not connected, 452 if (ret)
306 * are currently not supported.
307 */
308 if (!data->wakeup) {
309 dev_err(dev, "no wakeup gpio specified\n");
310 ret = -ENODEV;
311 goto err_put_device; 453 goto err_put_device;
312 } 454
455 /* Wait for chip to boot into hibernate mode. */
456 msleep(SIRF_BOOT_DELAY);
313 } 457 }
314 458
315 if (data->wakeup) { 459 if (data->wakeup) {
316 ret = gpiod_to_irq(data->wakeup); 460 ret = gpiod_get_value_cansleep(data->wakeup);
317 if (ret < 0) 461 if (ret < 0)
318 goto err_put_device; 462 goto err_disable_vcc;
463 data->active = ret;
319 464
465 ret = gpiod_to_irq(data->wakeup);
466 if (ret < 0)
467 goto err_disable_vcc;
320 data->irq = ret; 468 data->irq = ret;
321 469
322 ret = devm_request_threaded_irq(dev, data->irq, NULL, 470 ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
323 sirf_wakeup_handler,
324 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 471 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
325 "wakeup", data); 472 "wakeup", data);
326 if (ret) 473 if (ret)
327 goto err_put_device; 474 goto err_disable_vcc;
328 } 475 }
329 476
330 if (data->on_off) { 477 if (data->on_off) {
331 ret = regulator_enable(data->vcc); 478 if (!data->wakeup) {
332 if (ret) 479 data->active = false;
333 goto err_put_device;
334 480
335 /* Wait for chip to boot into hibernate mode */ 481 ret = sirf_serdev_open(data);
336 msleep(SIRF_BOOT_DELAY); 482 if (ret)
483 goto err_disable_vcc;
484
485 msleep(SIRF_REPORT_CYCLE);
486 sirf_serdev_close(data);
487 }
488
489 /* Force hibernate mode if already active. */
490 if (data->active) {
491 ret = sirf_set_active(data, false);
492 if (ret) {
493 dev_err(dev, "failed to set hibernate mode: %d\n",
494 ret);
495 goto err_free_irq;
496 }
497 }
337 } 498 }
338 499
339 if (IS_ENABLED(CONFIG_PM)) { 500 if (IS_ENABLED(CONFIG_PM)) {
@@ -342,7 +503,7 @@ static int sirf_probe(struct serdev_device *serdev)
342 } else { 503 } else {
343 ret = sirf_runtime_resume(dev); 504 ret = sirf_runtime_resume(dev);
344 if (ret < 0) 505 if (ret < 0)
345 goto err_disable_vcc; 506 goto err_free_irq;
346 } 507 }
347 508
348 ret = gnss_register_device(gdev); 509 ret = gnss_register_device(gdev);
@@ -356,6 +517,9 @@ err_disable_rpm:
356 pm_runtime_disable(dev); 517 pm_runtime_disable(dev);
357 else 518 else
358 sirf_runtime_suspend(dev); 519 sirf_runtime_suspend(dev);
520err_free_irq:
521 if (data->wakeup)
522 free_irq(data->irq, data);
359err_disable_vcc: 523err_disable_vcc:
360 if (data->on_off) 524 if (data->on_off)
361 regulator_disable(data->vcc); 525 regulator_disable(data->vcc);
@@ -376,6 +540,9 @@ static void sirf_remove(struct serdev_device *serdev)
376 else 540 else
377 sirf_runtime_suspend(&serdev->dev); 541 sirf_runtime_suspend(&serdev->dev);
378 542
543 if (data->wakeup)
544 free_irq(data->irq, data);
545
379 if (data->on_off) 546 if (data->on_off)
380 regulator_disable(data->vcc); 547 regulator_disable(data->vcc);
381 548
@@ -386,6 +553,7 @@ static void sirf_remove(struct serdev_device *serdev)
386static const struct of_device_id sirf_of_match[] = { 553static const struct of_device_id sirf_of_match[] = {
387 { .compatible = "fastrax,uc430" }, 554 { .compatible = "fastrax,uc430" },
388 { .compatible = "linx,r4" }, 555 { .compatible = "linx,r4" },
556 { .compatible = "wi2wi,w2sg0004" },
389 { .compatible = "wi2wi,w2sg0008i" }, 557 { .compatible = "wi2wi,w2sg0008i" },
390 { .compatible = "wi2wi,w2sg0084i" }, 558 { .compatible = "wi2wi,w2sg0084i" },
391 {}, 559 {},
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ae55a6865d5c..b32681632f30 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -984,7 +984,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
984{ 984{
985 int ret; 985 int ret;
986 986
987 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); 987 ret = component_add_typed(dev_priv->drm.dev,
988 &i915_audio_component_bind_ops,
989 I915_COMPONENT_AUDIO);
988 if (ret < 0) { 990 if (ret < 0) {
989 DRM_ERROR("failed to add audio component (%d)\n", ret); 991 DRM_ERROR("failed to add audio component (%d)\n", ret);
990 /* continue with reduced functionality */ 992 /* continue with reduced functionality */
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4262452963b3..79203666fc62 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -26,6 +26,7 @@
26#define _INTEL_DISPLAY_H_ 26#define _INTEL_DISPLAY_H_
27 27
28#include <drm/drm_util.h> 28#include <drm/drm_util.h>
29#include <drm/i915_drm.h>
29 30
30enum i915_gpio { 31enum i915_gpio {
31 GPIOA, 32 GPIOA,
@@ -150,21 +151,6 @@ enum plane_id {
150 for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ 151 for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
151 for_each_if((__crtc)->plane_ids_mask & BIT(__p)) 152 for_each_if((__crtc)->plane_ids_mask & BIT(__p))
152 153
153enum port {
154 PORT_NONE = -1,
155
156 PORT_A = 0,
157 PORT_B,
158 PORT_C,
159 PORT_D,
160 PORT_E,
161 PORT_F,
162
163 I915_MAX_PORTS
164};
165
166#define port_name(p) ((p) + 'A')
167
168/* 154/*
169 * Ports identifier referenced from other drivers. 155 * Ports identifier referenced from other drivers.
170 * Expected to remain stable over time 156 * Expected to remain stable over time
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index cf549f1ed403..78c9e5a5e793 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,6 +5,7 @@ config DRM_MSM
5 depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 depends on MMU 7 depends on MMU
8 depends on INTERCONNECT || !INTERCONNECT
8 select QCOM_MDT_LOADER if ARCH_QCOM 9 select QCOM_MDT_LOADER if ARCH_QCOM
9 select REGULATOR 10 select REGULATOR
10 select DRM_KMS_HELPER 11 select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index ce1b3cc4bf6d..d1662a75c7ec 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
2/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 2/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
3 3
4#include <linux/clk.h> 4#include <linux/clk.h>
5#include <linux/interconnect.h>
5#include <linux/pm_opp.h> 6#include <linux/pm_opp.h>
6#include <soc/qcom/cmd-db.h> 7#include <soc/qcom/cmd-db.h>
7 8
@@ -84,6 +85,9 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
84 85
85static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 86static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
86{ 87{
88 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
89 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
90 struct msm_gpu *gpu = &adreno_gpu->base;
87 int ret; 91 int ret;
88 92
89 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 93 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
@@ -106,6 +110,12 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
106 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 110 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
107 111
108 gmu->freq = gmu->gpu_freqs[index]; 112 gmu->freq = gmu->gpu_freqs[index];
113
114 /*
115 * Eventually we will want to scale the path vote with the frequency but
116 * for now leave it at max so that the performance is nominal.
117 */
118 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
109} 119}
110 120
111void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 121void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
@@ -705,6 +715,8 @@ out:
705 715
706int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 716int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
707{ 717{
718 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
719 struct msm_gpu *gpu = &adreno_gpu->base;
708 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 720 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
709 int status, ret; 721 int status, ret;
710 722
@@ -720,6 +732,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
720 if (ret) 732 if (ret)
721 goto out; 733 goto out;
722 734
735 /* Set the bus quota to a reasonable value for boot */
736 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
737
723 a6xx_gmu_irq_enable(gmu); 738 a6xx_gmu_irq_enable(gmu);
724 739
725 /* Check to see if we are doing a cold or warm boot */ 740 /* Check to see if we are doing a cold or warm boot */
@@ -760,6 +775,8 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
760 775
761int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 776int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
762{ 777{
778 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
779 struct msm_gpu *gpu = &adreno_gpu->base;
763 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 780 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
764 u32 val; 781 u32 val;
765 782
@@ -806,6 +823,9 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
806 /* Tell RPMh to power off the GPU */ 823 /* Tell RPMh to power off the GPU */
807 a6xx_rpmh_stop(gmu); 824 a6xx_rpmh_stop(gmu);
808 825
826 /* Remove the bus vote */
827 icc_set_bw(gpu->icc_path, 0, 0);
828
809 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 829 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
810 830
811 pm_runtime_put_sync(gmu->dev); 831 pm_runtime_put_sync(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2cfee1a4fe0b..27898475cdf4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/ascii85.h> 20#include <linux/ascii85.h>
21#include <linux/interconnect.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/pm_opp.h> 23#include <linux/pm_opp.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
@@ -747,6 +748,11 @@ static int adreno_get_pwrlevels(struct device *dev,
747 748
748 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); 749 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
749 750
751 /* Check for an interconnect path for the bus */
752 gpu->icc_path = of_icc_get(dev, NULL);
753 if (IS_ERR(gpu->icc_path))
754 gpu->icc_path = NULL;
755
750 return 0; 756 return 0;
751} 757}
752 758
@@ -787,10 +793,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
787 793
788void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) 794void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
789{ 795{
796 struct msm_gpu *gpu = &adreno_gpu->base;
790 unsigned int i; 797 unsigned int i;
791 798
792 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) 799 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
793 release_firmware(adreno_gpu->fw[i]); 800 release_firmware(adreno_gpu->fw[i]);
794 801
802 icc_put(gpu->icc_path);
803
795 msm_gpu_cleanup(&adreno_gpu->base); 804 msm_gpu_cleanup(&adreno_gpu->base);
796} 805}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ca17086f72c9..6241986bab51 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,6 +19,7 @@
19#define __MSM_GPU_H__ 19#define __MSM_GPU_H__
20 20
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/interconnect.h>
22#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
23 24
24#include "msm_drv.h" 25#include "msm_drv.h"
@@ -118,6 +119,8 @@ struct msm_gpu {
118 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; 119 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
119 uint32_t fast_rate; 120 uint32_t fast_rate;
120 121
122 struct icc_path *icc_path;
123
121 /* Hang and Inactivity Detection: 124 /* Hang and Inactivity Detection:
122 */ 125 */
123#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ 126#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index bea4c9850247..23381c41d087 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -282,8 +282,8 @@ int vmbus_open(struct vmbus_channel *newchannel,
282EXPORT_SYMBOL_GPL(vmbus_open); 282EXPORT_SYMBOL_GPL(vmbus_open);
283 283
284/* Used for Hyper-V Socket: a guest client's connect() to the host */ 284/* Used for Hyper-V Socket: a guest client's connect() to the host */
285int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, 285int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
286 const uuid_le *shv_host_servie_id) 286 const guid_t *shv_host_servie_id)
287{ 287{
288 struct vmbus_channel_tl_connect_request conn_msg; 288 struct vmbus_channel_tl_connect_request conn_msg;
289 int ret; 289 int ret;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index d01689079e9b..62703b354d6d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -141,7 +141,7 @@ static const struct vmbus_device vmbus_devs[] = {
141}; 141};
142 142
143static const struct { 143static const struct {
144 uuid_le guid; 144 guid_t guid;
145} vmbus_unsupported_devs[] = { 145} vmbus_unsupported_devs[] = {
146 { HV_AVMA1_GUID }, 146 { HV_AVMA1_GUID },
147 { HV_AVMA2_GUID }, 147 { HV_AVMA2_GUID },
@@ -171,26 +171,26 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
172} 172}
173 173
174static bool is_unsupported_vmbus_devs(const uuid_le *guid) 174static bool is_unsupported_vmbus_devs(const guid_t *guid)
175{ 175{
176 int i; 176 int i;
177 177
178 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++) 178 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
179 if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid)) 179 if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
180 return true; 180 return true;
181 return false; 181 return false;
182} 182}
183 183
184static u16 hv_get_dev_type(const struct vmbus_channel *channel) 184static u16 hv_get_dev_type(const struct vmbus_channel *channel)
185{ 185{
186 const uuid_le *guid = &channel->offermsg.offer.if_type; 186 const guid_t *guid = &channel->offermsg.offer.if_type;
187 u16 i; 187 u16 i;
188 188
189 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid)) 189 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
190 return HV_UNKNOWN; 190 return HV_UNKNOWN;
191 191
192 for (i = HV_IDE; i < HV_UNKNOWN; i++) { 192 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
193 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid)) 193 if (guid_equal(guid, &vmbus_devs[i].guid))
194 return i; 194 return i;
195 } 195 }
196 pr_info("Unknown GUID: %pUl\n", guid); 196 pr_info("Unknown GUID: %pUl\n", guid);
@@ -561,10 +561,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
561 atomic_dec(&vmbus_connection.offer_in_progress); 561 atomic_dec(&vmbus_connection.offer_in_progress);
562 562
563 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 563 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
564 if (!uuid_le_cmp(channel->offermsg.offer.if_type, 564 if (guid_equal(&channel->offermsg.offer.if_type,
565 newchannel->offermsg.offer.if_type) && 565 &newchannel->offermsg.offer.if_type) &&
566 !uuid_le_cmp(channel->offermsg.offer.if_instance, 566 guid_equal(&channel->offermsg.offer.if_instance,
567 newchannel->offermsg.offer.if_instance)) { 567 &newchannel->offermsg.offer.if_instance)) {
568 fnew = false; 568 fnew = false;
569 break; 569 break;
570 } 570 }
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a1f6ce6e5974..cb86b133eb4d 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -312,8 +312,8 @@ extern const struct vmbus_channel_message_table_entry
312 312
313/* General vmbus interface */ 313/* General vmbus interface */
314 314
315struct hv_device *vmbus_device_create(const uuid_le *type, 315struct hv_device *vmbus_device_create(const guid_t *type,
316 const uuid_le *instance, 316 const guid_t *instance,
317 struct vmbus_channel *channel); 317 struct vmbus_channel *channel);
318 318
319int vmbus_device_register(struct hv_device *child_device_obj); 319int vmbus_device_register(struct hv_device *child_device_obj);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 1f1a55e07733..9e8b31ccc142 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -74,8 +74,10 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
74 * This is the only case we need to signal when the 74 * This is the only case we need to signal when the
75 * ring transitions from being empty to non-empty. 75 * ring transitions from being empty to non-empty.
76 */ 76 */
77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
78 ++channel->intr_out_empty;
78 vmbus_setevent(channel); 79 vmbus_setevent(channel);
80 }
79} 81}
80 82
81/* Get the next write location for the specified ring buffer. */ 83/* Get the next write location for the specified ring buffer. */
@@ -272,10 +274,19 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
272 * is empty since the read index == write index. 274 * is empty since the read index == write index.
273 */ 275 */
274 if (bytes_avail_towrite <= totalbytes_towrite) { 276 if (bytes_avail_towrite <= totalbytes_towrite) {
277 ++channel->out_full_total;
278
279 if (!channel->out_full_flag) {
280 ++channel->out_full_first;
281 channel->out_full_flag = true;
282 }
283
275 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 284 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
276 return -EAGAIN; 285 return -EAGAIN;
277 } 286 }
278 287
288 channel->out_full_flag = false;
289
279 /* Write to the ring buffer */ 290 /* Write to the ring buffer */
280 next_write_location = hv_get_next_write_location(outring_info); 291 next_write_location = hv_get_next_write_location(outring_info);
281 292
@@ -530,6 +541,7 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
530 if (curr_write_sz <= pending_sz) 541 if (curr_write_sz <= pending_sz)
531 return; 542 return;
532 543
544 ++channel->intr_in_full;
533 vmbus_setevent(channel); 545 vmbus_setevent(channel);
534} 546}
535EXPORT_SYMBOL_GPL(hv_pkt_iter_close); 547EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 403fee01572c..000b53e5a17a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -234,7 +234,7 @@ static ssize_t server_monitor_pending_show(struct device *dev,
234 return -ENODEV; 234 return -ENODEV;
235 return sprintf(buf, "%d\n", 235 return sprintf(buf, "%d\n",
236 channel_pending(hv_dev->channel, 236 channel_pending(hv_dev->channel,
237 vmbus_connection.monitor_pages[1])); 237 vmbus_connection.monitor_pages[0]));
238} 238}
239static DEVICE_ATTR_RO(server_monitor_pending); 239static DEVICE_ATTR_RO(server_monitor_pending);
240 240
@@ -654,38 +654,28 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
654 return ret; 654 return ret;
655} 655}
656 656
657static const uuid_le null_guid;
658
659static inline bool is_null_guid(const uuid_le *guid)
660{
661 if (uuid_le_cmp(*guid, null_guid))
662 return false;
663 return true;
664}
665
666static const struct hv_vmbus_device_id * 657static const struct hv_vmbus_device_id *
667hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid) 658hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
668
669{ 659{
670 if (id == NULL) 660 if (id == NULL)
671 return NULL; /* empty device table */ 661 return NULL; /* empty device table */
672 662
673 for (; !is_null_guid(&id->guid); id++) 663 for (; !guid_is_null(&id->guid); id++)
674 if (!uuid_le_cmp(id->guid, *guid)) 664 if (guid_equal(&id->guid, guid))
675 return id; 665 return id;
676 666
677 return NULL; 667 return NULL;
678} 668}
679 669
680static const struct hv_vmbus_device_id * 670static const struct hv_vmbus_device_id *
681hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid) 671hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
682{ 672{
683 const struct hv_vmbus_device_id *id = NULL; 673 const struct hv_vmbus_device_id *id = NULL;
684 struct vmbus_dynid *dynid; 674 struct vmbus_dynid *dynid;
685 675
686 spin_lock(&drv->dynids.lock); 676 spin_lock(&drv->dynids.lock);
687 list_for_each_entry(dynid, &drv->dynids.list, node) { 677 list_for_each_entry(dynid, &drv->dynids.list, node) {
688 if (!uuid_le_cmp(dynid->id.guid, *guid)) { 678 if (guid_equal(&dynid->id.guid, guid)) {
689 id = &dynid->id; 679 id = &dynid->id;
690 break; 680 break;
691 } 681 }
@@ -695,9 +685,7 @@ hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
695 return id; 685 return id;
696} 686}
697 687
698static const struct hv_vmbus_device_id vmbus_device_null = { 688static const struct hv_vmbus_device_id vmbus_device_null;
699 .guid = NULL_UUID_LE,
700};
701 689
702/* 690/*
703 * Return a matching hv_vmbus_device_id pointer. 691 * Return a matching hv_vmbus_device_id pointer.
@@ -706,7 +694,7 @@ static const struct hv_vmbus_device_id vmbus_device_null = {
706static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, 694static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
707 struct hv_device *dev) 695 struct hv_device *dev)
708{ 696{
709 const uuid_le *guid = &dev->dev_type; 697 const guid_t *guid = &dev->dev_type;
710 const struct hv_vmbus_device_id *id; 698 const struct hv_vmbus_device_id *id;
711 699
712 /* When driver_override is set, only bind to the matching driver */ 700 /* When driver_override is set, only bind to the matching driver */
@@ -726,7 +714,7 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
726} 714}
727 715
728/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ 716/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
729static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid) 717static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
730{ 718{
731 struct vmbus_dynid *dynid; 719 struct vmbus_dynid *dynid;
732 720
@@ -764,10 +752,10 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
764 size_t count) 752 size_t count)
765{ 753{
766 struct hv_driver *drv = drv_to_hv_drv(driver); 754 struct hv_driver *drv = drv_to_hv_drv(driver);
767 uuid_le guid; 755 guid_t guid;
768 ssize_t retval; 756 ssize_t retval;
769 757
770 retval = uuid_le_to_bin(buf, &guid); 758 retval = guid_parse(buf, &guid);
771 if (retval) 759 if (retval)
772 return retval; 760 return retval;
773 761
@@ -791,10 +779,10 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
791{ 779{
792 struct hv_driver *drv = drv_to_hv_drv(driver); 780 struct hv_driver *drv = drv_to_hv_drv(driver);
793 struct vmbus_dynid *dynid, *n; 781 struct vmbus_dynid *dynid, *n;
794 uuid_le guid; 782 guid_t guid;
795 ssize_t retval; 783 ssize_t retval;
796 784
797 retval = uuid_le_to_bin(buf, &guid); 785 retval = guid_parse(buf, &guid);
798 if (retval) 786 if (retval)
799 return retval; 787 return retval;
800 788
@@ -803,7 +791,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
803 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 791 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
804 struct hv_vmbus_device_id *id = &dynid->id; 792 struct hv_vmbus_device_id *id = &dynid->id;
805 793
806 if (!uuid_le_cmp(id->guid, guid)) { 794 if (guid_equal(&id->guid, &guid)) {
807 list_del(&dynid->node); 795 list_del(&dynid->node);
808 kfree(dynid); 796 kfree(dynid);
809 retval = count; 797 retval = count;
@@ -1496,6 +1484,38 @@ static ssize_t channel_events_show(const struct vmbus_channel *channel, char *bu
1496} 1484}
1497static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL); 1485static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
1498 1486
1487static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
1488 char *buf)
1489{
1490 return sprintf(buf, "%llu\n",
1491 (unsigned long long)channel->intr_in_full);
1492}
1493static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1494
1495static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
1496 char *buf)
1497{
1498 return sprintf(buf, "%llu\n",
1499 (unsigned long long)channel->intr_out_empty);
1500}
1501static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1502
1503static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
1504 char *buf)
1505{
1506 return sprintf(buf, "%llu\n",
1507 (unsigned long long)channel->out_full_first);
1508}
1509static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1510
1511static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
1512 char *buf)
1513{
1514 return sprintf(buf, "%llu\n",
1515 (unsigned long long)channel->out_full_total);
1516}
1517static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1518
1499static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel, 1519static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
1500 char *buf) 1520 char *buf)
1501{ 1521{
@@ -1521,6 +1541,10 @@ static struct attribute *vmbus_chan_attrs[] = {
1521 &chan_attr_latency.attr, 1541 &chan_attr_latency.attr,
1522 &chan_attr_interrupts.attr, 1542 &chan_attr_interrupts.attr,
1523 &chan_attr_events.attr, 1543 &chan_attr_events.attr,
1544 &chan_attr_intr_in_full.attr,
1545 &chan_attr_intr_out_empty.attr,
1546 &chan_attr_out_full_first.attr,
1547 &chan_attr_out_full_total.attr,
1524 &chan_attr_monitor_id.attr, 1548 &chan_attr_monitor_id.attr,
1525 &chan_attr_subchannel_id.attr, 1549 &chan_attr_subchannel_id.attr,
1526 NULL 1550 NULL
@@ -1556,8 +1580,8 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1556 * vmbus_device_create - Creates and registers a new child device 1580 * vmbus_device_create - Creates and registers a new child device
1557 * on the vmbus. 1581 * on the vmbus.
1558 */ 1582 */
1559struct hv_device *vmbus_device_create(const uuid_le *type, 1583struct hv_device *vmbus_device_create(const guid_t *type,
1560 const uuid_le *instance, 1584 const guid_t *instance,
1561 struct vmbus_channel *channel) 1585 struct vmbus_channel *channel)
1562{ 1586{
1563 struct hv_device *child_device_obj; 1587 struct hv_device *child_device_obj;
@@ -1569,12 +1593,10 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
1569 } 1593 }
1570 1594
1571 child_device_obj->channel = channel; 1595 child_device_obj->channel = channel;
1572 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); 1596 guid_copy(&child_device_obj->dev_type, type);
1573 memcpy(&child_device_obj->dev_instance, instance, 1597 guid_copy(&child_device_obj->dev_instance, instance);
1574 sizeof(uuid_le));
1575 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ 1598 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
1576 1599
1577
1578 return child_device_obj; 1600 return child_device_obj;
1579} 1601}
1580 1602
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 45b2460f3166..e8819d750938 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
668 .id = 0x000bbd08, 668 .id = 0x000bbd08,
669 .mask = 0x000fffff, 669 .mask = 0x000fffff,
670 }, 670 },
671 { /* Debug for Cortex-A73 */
672 .id = 0x000bbd09,
673 .mask = 0x000fffff,
674 },
671 { 0, 0 }, 675 { 0, 0 },
672}; 676};
673 677
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 53e2fb6e86f6..fe76b176974a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -55,7 +55,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
55 55
56static bool etm4_arch_supported(u8 arch) 56static bool etm4_arch_supported(u8 arch)
57{ 57{
58 switch (arch) { 58 /* Mask out the minor version number */
59 switch (arch & 0xf0) {
59 case ETM_ARCH_V4: 60 case ETM_ARCH_V4:
60 break; 61 break;
61 default: 62 default:
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index ef339ff22090..f07825df5c7a 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -793,7 +793,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
793 struct stm_drvdata *drvdata; 793 struct stm_drvdata *drvdata;
794 struct resource *res = &adev->res; 794 struct resource *res = &adev->res;
795 struct resource ch_res; 795 struct resource ch_res;
796 size_t res_size, bitmap_size; 796 size_t bitmap_size;
797 struct coresight_desc desc = { 0 }; 797 struct coresight_desc desc = { 0 };
798 struct device_node *np = adev->dev.of_node; 798 struct device_node *np = adev->dev.of_node;
799 799
@@ -833,15 +833,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
833 833
834 drvdata->write_bytes = stm_fundamental_data_size(drvdata); 834 drvdata->write_bytes = stm_fundamental_data_size(drvdata);
835 835
836 if (boot_nr_channel) { 836 if (boot_nr_channel)
837 drvdata->numsp = boot_nr_channel; 837 drvdata->numsp = boot_nr_channel;
838 res_size = min((resource_size_t)(boot_nr_channel * 838 else
839 BYTES_PER_CHANNEL), resource_size(res));
840 } else {
841 drvdata->numsp = stm_num_stimulus_port(drvdata); 839 drvdata->numsp = stm_num_stimulus_port(drvdata);
842 res_size = min((resource_size_t)(drvdata->numsp * 840
843 BYTES_PER_CHANNEL), resource_size(res));
844 }
845 bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); 841 bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
846 842
847 guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); 843 guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 89092f83567e..7045930fc958 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -80,8 +80,8 @@ static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
80 * Skip one-level up to the real device node, if we 80 * Skip one-level up to the real device node, if we
81 * are using the new bindings. 81 * are using the new bindings.
82 */ 82 */
83 if (!of_node_cmp(parent->name, "in-ports") || 83 if (of_node_name_eq(parent, "in-ports") ||
84 !of_node_cmp(parent->name, "out-ports")) 84 of_node_name_eq(parent, "out-ports"))
85 parent = of_get_next_parent(parent); 85 parent = of_get_next_parent(parent);
86 86
87 return parent; 87 return parent;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index fc6b7f8b62fb..7c1acc2f801c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -422,6 +422,7 @@ static const struct intel_th_subdevice {
422 unsigned nres; 422 unsigned nres;
423 unsigned type; 423 unsigned type;
424 unsigned otype; 424 unsigned otype;
425 bool mknode;
425 unsigned scrpd; 426 unsigned scrpd;
426 int id; 427 int id;
427} intel_th_subdevices[] = { 428} intel_th_subdevices[] = {
@@ -456,6 +457,7 @@ static const struct intel_th_subdevice {
456 .name = "msc", 457 .name = "msc",
457 .id = 0, 458 .id = 0,
458 .type = INTEL_TH_OUTPUT, 459 .type = INTEL_TH_OUTPUT,
460 .mknode = true,
459 .otype = GTH_MSU, 461 .otype = GTH_MSU,
460 .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED, 462 .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
461 }, 463 },
@@ -476,6 +478,7 @@ static const struct intel_th_subdevice {
476 .name = "msc", 478 .name = "msc",
477 .id = 1, 479 .id = 1,
478 .type = INTEL_TH_OUTPUT, 480 .type = INTEL_TH_OUTPUT,
481 .mknode = true,
479 .otype = GTH_MSU, 482 .otype = GTH_MSU,
480 .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED, 483 .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
481 }, 484 },
@@ -635,7 +638,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
635 } 638 }
636 639
637 if (subdev->type == INTEL_TH_OUTPUT) { 640 if (subdev->type == INTEL_TH_OUTPUT) {
638 thdev->dev.devt = MKDEV(th->major, th->num_thdevs); 641 if (subdev->mknode)
642 thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
639 thdev->output.type = subdev->otype; 643 thdev->output.type = subdev->otype;
640 thdev->output.port = -1; 644 thdev->output.port = -1;
641 thdev->output.scratchpad = subdev->scrpd; 645 thdev->output.scratchpad = subdev->scrpd;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 8426b7970c14..edc52d75e6bd 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
607{ 607{
608 struct gth_device *gth = dev_get_drvdata(&thdev->dev); 608 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
609 int port = othdev->output.port; 609 int port = othdev->output.port;
610 int master;
610 611
611 if (thdev->host_mode) 612 if (thdev->host_mode)
612 return; 613 return;
@@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
615 othdev->output.port = -1; 616 othdev->output.port = -1;
616 othdev->output.active = false; 617 othdev->output.active = false;
617 gth->output[port].output = NULL; 618 gth->output[port].output = NULL;
619 for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
620 if (gth->master[master] == port)
621 gth->master[master] = -1;
618 spin_unlock(&gth->gth_lock); 622 spin_unlock(&gth->gth_lock);
619} 623}
620 624
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 56694339cb06..0da6b787f553 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -272,19 +272,17 @@ static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr,
272 const char *buf, size_t size) 272 const char *buf, size_t size)
273{ 273{
274 struct pti_device *pti = dev_get_drvdata(dev); 274 struct pti_device *pti = dev_get_drvdata(dev);
275 ssize_t ret = -EINVAL;
276 int i; 275 int i;
277 276
278 for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++) 277 i = sysfs_match_string(lpp_dest_str, buf);
279 if (sysfs_streq(buf, lpp_dest_str[i])) 278 if (i < 0)
280 break; 279 return i;
281 280
282 if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) { 281 if (!(pti->lpp_dest_mask & BIT(i)))
283 pti->lpp_dest = i; 282 return -EINVAL;
284 ret = size;
285 }
286 283
287 return ret; 284 pti->lpp_dest = i;
285 return size;
288} 286}
289 287
290static DEVICE_ATTR_RW(lpp_dest); 288static DEVICE_ATTR_RW(lpp_dest);
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 4b7ae47789d2..3a1f4e650378 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -84,8 +84,12 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
84 /* Global packets (GERR, XSYNC, TRIG) are sent with register writes */ 84 /* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
85 case STP_PACKET_GERR: 85 case STP_PACKET_GERR:
86 reg += 4; 86 reg += 4;
87 /* fall through */
88
87 case STP_PACKET_XSYNC: 89 case STP_PACKET_XSYNC:
88 reg += 8; 90 reg += 8;
91 /* fall through */
92
89 case STP_PACKET_TRIG: 93 case STP_PACKET_TRIG:
90 if (flags & STP_PACKET_TIMESTAMPED) 94 if (flags & STP_PACKET_TIMESTAMPED)
91 reg += 4; 95 reg += 4;
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 93ce3aa740a9..c7ba8acfd4d5 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
244 ; 244 ;
245 if (i == width) 245 if (i == width)
246 return pos; 246 return pos;
247
248 /* step over [pos..pos+i) to continue search */
249 pos += i;
247 } 250 }
248 251
249 return -1; 252 return -1;
@@ -732,7 +735,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
732 struct stm_device *stm = stmf->stm; 735 struct stm_device *stm = stmf->stm;
733 struct stp_policy_id *id; 736 struct stp_policy_id *id;
734 char *ids[] = { NULL, NULL }; 737 char *ids[] = { NULL, NULL };
735 int ret = -EINVAL; 738 int ret = -EINVAL, wlimit = 1;
736 u32 size; 739 u32 size;
737 740
738 if (stmf->output.nr_chans) 741 if (stmf->output.nr_chans)
@@ -760,8 +763,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
760 if (id->__reserved_0 || id->__reserved_1) 763 if (id->__reserved_0 || id->__reserved_1)
761 goto err_free; 764 goto err_free;
762 765
763 if (id->width < 1 || 766 if (stm->data->sw_mmiosz)
764 id->width > PAGE_SIZE / stm->data->sw_mmiosz) 767 wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
768
769 if (id->width < 1 || id->width > wlimit)
765 goto err_free; 770 goto err_free;
766 771
767 ids[0] = id->id; 772 ids[0] = id->id;
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
new file mode 100644
index 000000000000..07a8276fa35a
--- /dev/null
+++ b/drivers/interconnect/Kconfig
@@ -0,0 +1,15 @@
1menuconfig INTERCONNECT
2 tristate "On-Chip Interconnect management support"
3 help
4 Support for management of the on-chip interconnects.
5
6 This framework is designed to provide a generic interface for
7 managing the interconnects in a SoC.
8
9 If unsure, say no.
10
11if INTERCONNECT
12
13source "drivers/interconnect/qcom/Kconfig"
14
15endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
new file mode 100644
index 000000000000..28f2ab0824d5
--- /dev/null
+++ b/drivers/interconnect/Makefile
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0
2
3icc-core-objs := core.o
4
5obj-$(CONFIG_INTERCONNECT) += icc-core.o
6obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
new file mode 100644
index 000000000000..6005a1c189f6
--- /dev/null
+++ b/drivers/interconnect/core.c
@@ -0,0 +1,799 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Interconnect framework core driver
4 *
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9#include <linux/debugfs.h>
10#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/init.h>
13#include <linux/interconnect.h>
14#include <linux/interconnect-provider.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
19#include <linux/of.h>
20#include <linux/overflow.h>
21
22static DEFINE_IDR(icc_idr);
23static LIST_HEAD(icc_providers);
24static DEFINE_MUTEX(icc_lock);
25static struct dentry *icc_debugfs_dir;
26
27/**
28 * struct icc_req - constraints that are attached to each node
29 * @req_node: entry in list of requests for the particular @node
30 * @node: the interconnect node to which this constraint applies
31 * @dev: reference to the device that sets the constraints
32 * @avg_bw: an integer describing the average bandwidth in kBps
33 * @peak_bw: an integer describing the peak bandwidth in kBps
34 */
35struct icc_req {
36 struct hlist_node req_node;
37 struct icc_node *node;
38 struct device *dev;
39 u32 avg_bw;
40 u32 peak_bw;
41};
42
43/**
44 * struct icc_path - interconnect path structure
45 * @num_nodes: number of hops (nodes)
46 * @reqs: array of the requests applicable to this path of nodes
47 */
48struct icc_path {
49 size_t num_nodes;
50 struct icc_req reqs[];
51};
52
53static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
54{
55 if (!n)
56 return;
57
58 seq_printf(s, "%-30s %12u %12u\n",
59 n->name, n->avg_bw, n->peak_bw);
60}
61
62static int icc_summary_show(struct seq_file *s, void *data)
63{
64 struct icc_provider *provider;
65
66 seq_puts(s, " node avg peak\n");
67 seq_puts(s, "--------------------------------------------------------\n");
68
69 mutex_lock(&icc_lock);
70
71 list_for_each_entry(provider, &icc_providers, provider_list) {
72 struct icc_node *n;
73
74 list_for_each_entry(n, &provider->nodes, node_list) {
75 struct icc_req *r;
76
77 icc_summary_show_one(s, n);
78 hlist_for_each_entry(r, &n->req_list, req_node) {
79 if (!r->dev)
80 continue;
81
82 seq_printf(s, " %-26s %12u %12u\n",
83 dev_name(r->dev), r->avg_bw,
84 r->peak_bw);
85 }
86 }
87 }
88
89 mutex_unlock(&icc_lock);
90
91 return 0;
92}
93
94static int icc_summary_open(struct inode *inode, struct file *file)
95{
96 return single_open(file, icc_summary_show, inode->i_private);
97}
98
99static const struct file_operations icc_summary_fops = {
100 .open = icc_summary_open,
101 .read = seq_read,
102 .llseek = seq_lseek,
103 .release = single_release,
104};
105
106static struct icc_node *node_find(const int id)
107{
108 return idr_find(&icc_idr, id);
109}
110
111static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
112 ssize_t num_nodes)
113{
114 struct icc_node *node = dst;
115 struct icc_path *path;
116 int i;
117
118 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
119 if (!path)
120 return ERR_PTR(-ENOMEM);
121
122 path->num_nodes = num_nodes;
123
124 for (i = num_nodes - 1; i >= 0; i--) {
125 node->provider->users++;
126 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
127 path->reqs[i].node = node;
128 path->reqs[i].dev = dev;
129 /* reference to previous node was saved during path traversal */
130 node = node->reverse;
131 }
132
133 return path;
134}
135
136static struct icc_path *path_find(struct device *dev, struct icc_node *src,
137 struct icc_node *dst)
138{
139 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
140 struct icc_node *n, *node = NULL;
141 struct list_head traverse_list;
142 struct list_head edge_list;
143 struct list_head visited_list;
144 size_t i, depth = 1;
145 bool found = false;
146
147 INIT_LIST_HEAD(&traverse_list);
148 INIT_LIST_HEAD(&edge_list);
149 INIT_LIST_HEAD(&visited_list);
150
151 list_add(&src->search_list, &traverse_list);
152 src->reverse = NULL;
153
154 do {
155 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
156 if (node == dst) {
157 found = true;
158 list_splice_init(&edge_list, &visited_list);
159 list_splice_init(&traverse_list, &visited_list);
160 break;
161 }
162 for (i = 0; i < node->num_links; i++) {
163 struct icc_node *tmp = node->links[i];
164
165 if (!tmp) {
166 path = ERR_PTR(-ENOENT);
167 goto out;
168 }
169
170 if (tmp->is_traversed)
171 continue;
172
173 tmp->is_traversed = true;
174 tmp->reverse = node;
175 list_add_tail(&tmp->search_list, &edge_list);
176 }
177 }
178
179 if (found)
180 break;
181
182 list_splice_init(&traverse_list, &visited_list);
183 list_splice_init(&edge_list, &traverse_list);
184
185 /* count the hops including the source */
186 depth++;
187
188 } while (!list_empty(&traverse_list));
189
190out:
191
192 /* reset the traversed state */
193 list_for_each_entry_reverse(n, &visited_list, search_list)
194 n->is_traversed = false;
195
196 if (found)
197 path = path_init(dev, dst, depth);
198
199 return path;
200}
201
202/*
203 * We want the path to honor all bandwidth requests, so the average and peak
204 * bandwidth requirements from each consumer are aggregated at each node.
205 * The aggregation is platform specific, so each platform can customize it by
206 * implementing its own aggregate() function.
207 */
208
209static int aggregate_requests(struct icc_node *node)
210{
211 struct icc_provider *p = node->provider;
212 struct icc_req *r;
213
214 node->avg_bw = 0;
215 node->peak_bw = 0;
216
217 hlist_for_each_entry(r, &node->req_list, req_node)
218 p->aggregate(node, r->avg_bw, r->peak_bw,
219 &node->avg_bw, &node->peak_bw);
220
221 return 0;
222}
223
224static int apply_constraints(struct icc_path *path)
225{
226 struct icc_node *next, *prev = NULL;
227 int ret = -EINVAL;
228 int i;
229
230 for (i = 0; i < path->num_nodes; i++) {
231 next = path->reqs[i].node;
232
233 /*
234 * Both endpoints should be valid master-slave pairs of the
235 * same interconnect provider that will be configured.
236 */
237 if (!prev || next->provider != prev->provider) {
238 prev = next;
239 continue;
240 }
241
242 /* set the constraints */
243 ret = next->provider->set(prev, next);
244 if (ret)
245 goto out;
246
247 prev = next;
248 }
249out:
250 return ret;
251}
252
253/* of_icc_xlate_onecell() - Translate function using a single index.
254 * @spec: OF phandle args to map into an interconnect node.
255 * @data: private data (pointer to struct icc_onecell_data)
256 *
257 * This is a generic translate function that can be used to model simple
258 * interconnect providers that have one device tree node and provide
259 * multiple interconnect nodes. A single cell is used as an index into
260 * an array of icc nodes specified in the icc_onecell_data struct when
261 * registering the provider.
262 */
263struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
264 void *data)
265{
266 struct icc_onecell_data *icc_data = data;
267 unsigned int idx = spec->args[0];
268
269 if (idx >= icc_data->num_nodes) {
270 pr_err("%s: invalid index %u\n", __func__, idx);
271 return ERR_PTR(-EINVAL);
272 }
273
274 return icc_data->nodes[idx];
275}
276EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
277
278/**
279 * of_icc_get_from_provider() - Look-up interconnect node
280 * @spec: OF phandle args to use for look-up
281 *
282 * Looks for interconnect provider under the node specified by @spec and if
283 * found, uses xlate function of the provider to map phandle args to node.
284 *
285 * Returns a valid pointer to struct icc_node on success or ERR_PTR()
286 * on failure.
287 */
288static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
289{
290 struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
291 struct icc_provider *provider;
292
293 if (!spec || spec->args_count != 1)
294 return ERR_PTR(-EINVAL);
295
296 mutex_lock(&icc_lock);
297 list_for_each_entry(provider, &icc_providers, provider_list) {
298 if (provider->dev->of_node == spec->np)
299 node = provider->xlate(spec, provider->data);
300 if (!IS_ERR(node))
301 break;
302 }
303 mutex_unlock(&icc_lock);
304
305 return node;
306}
307
308/**
309 * of_icc_get() - get a path handle from a DT node based on name
310 * @dev: device pointer for the consumer device
311 * @name: interconnect path name
312 *
313 * This function will search for a path between two endpoints and return an
314 * icc_path handle on success. Use icc_put() to release constraints when they
315 * are not needed anymore.
316 * If the interconnect API is disabled, NULL is returned and the consumer
317 * drivers will still build. Drivers are free to handle this specifically,
318 * but they don't have to.
319 *
320 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
321 * when the API is disabled or the "interconnects" DT property is missing.
322 */
323struct icc_path *of_icc_get(struct device *dev, const char *name)
324{
325 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
326 struct icc_node *src_node, *dst_node;
327 struct device_node *np = NULL;
328 struct of_phandle_args src_args, dst_args;
329 int idx = 0;
330 int ret;
331
332 if (!dev || !dev->of_node)
333 return ERR_PTR(-ENODEV);
334
335 np = dev->of_node;
336
337 /*
338 * When the consumer DT node do not have "interconnects" property
339 * return a NULL path to skip setting constraints.
340 */
341 if (!of_find_property(np, "interconnects", NULL))
342 return NULL;
343
344 /*
345 * We use a combination of phandle and specifier for endpoint. For now
346 * lets support only global ids and extend this in the future if needed
347 * without breaking DT compatibility.
348 */
349 if (name) {
350 idx = of_property_match_string(np, "interconnect-names", name);
351 if (idx < 0)
352 return ERR_PTR(idx);
353 }
354
355 ret = of_parse_phandle_with_args(np, "interconnects",
356 "#interconnect-cells", idx * 2,
357 &src_args);
358 if (ret)
359 return ERR_PTR(ret);
360
361 of_node_put(src_args.np);
362
363 ret = of_parse_phandle_with_args(np, "interconnects",
364 "#interconnect-cells", idx * 2 + 1,
365 &dst_args);
366 if (ret)
367 return ERR_PTR(ret);
368
369 of_node_put(dst_args.np);
370
371 src_node = of_icc_get_from_provider(&src_args);
372
373 if (IS_ERR(src_node)) {
374 if (PTR_ERR(src_node) != -EPROBE_DEFER)
375 dev_err(dev, "error finding src node: %ld\n",
376 PTR_ERR(src_node));
377 return ERR_CAST(src_node);
378 }
379
380 dst_node = of_icc_get_from_provider(&dst_args);
381
382 if (IS_ERR(dst_node)) {
383 if (PTR_ERR(dst_node) != -EPROBE_DEFER)
384 dev_err(dev, "error finding dst node: %ld\n",
385 PTR_ERR(dst_node));
386 return ERR_CAST(dst_node);
387 }
388
389 mutex_lock(&icc_lock);
390 path = path_find(dev, src_node, dst_node);
391 if (IS_ERR(path))
392 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
393 mutex_unlock(&icc_lock);
394
395 return path;
396}
397EXPORT_SYMBOL_GPL(of_icc_get);
398
399/**
400 * icc_set_bw() - set bandwidth constraints on an interconnect path
401 * @path: reference to the path returned by icc_get()
402 * @avg_bw: average bandwidth in kilobytes per second
403 * @peak_bw: peak bandwidth in kilobytes per second
404 *
405 * This function is used by an interconnect consumer to express its own needs
406 * in terms of bandwidth for a previously requested path between two endpoints.
407 * The requests are aggregated and each node is updated accordingly. The entire
408 * path is locked by a mutex to ensure that the set() is completed.
409 * The @path can be NULL when the "interconnects" DT properties is missing,
410 * which will mean that no constraints will be set.
411 *
412 * Returns 0 on success, or an appropriate error code otherwise.
413 */
414int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
415{
416 struct icc_node *node;
417 u32 old_avg, old_peak;
418 size_t i;
419 int ret;
420
421 if (!path || !path->num_nodes)
422 return 0;
423
424 mutex_lock(&icc_lock);
425
426 old_avg = path->reqs[0].avg_bw;
427 old_peak = path->reqs[0].peak_bw;
428
429 for (i = 0; i < path->num_nodes; i++) {
430 node = path->reqs[i].node;
431
432 /* update the consumer request for this path */
433 path->reqs[i].avg_bw = avg_bw;
434 path->reqs[i].peak_bw = peak_bw;
435
436 /* aggregate requests for this node */
437 aggregate_requests(node);
438 }
439
440 ret = apply_constraints(path);
441 if (ret) {
442 pr_debug("interconnect: error applying constraints (%d)\n",
443 ret);
444
445 for (i = 0; i < path->num_nodes; i++) {
446 node = path->reqs[i].node;
447 path->reqs[i].avg_bw = old_avg;
448 path->reqs[i].peak_bw = old_peak;
449 aggregate_requests(node);
450 }
451 apply_constraints(path);
452 }
453
454 mutex_unlock(&icc_lock);
455
456 return ret;
457}
458EXPORT_SYMBOL_GPL(icc_set_bw);
459
460/**
461 * icc_get() - return a handle for path between two endpoints
462 * @dev: the device requesting the path
463 * @src_id: source device port id
464 * @dst_id: destination device port id
465 *
466 * This function will search for a path between two endpoints and return an
467 * icc_path handle on success. Use icc_put() to release
468 * constraints when they are not needed anymore.
469 * If the interconnect API is disabled, NULL is returned and the consumer
470 * drivers will still build. Drivers are free to handle this specifically,
471 * but they don't have to.
472 *
473 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
474 * interconnect API is disabled.
475 */
476struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
477{
478 struct icc_node *src, *dst;
479 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
480
481 mutex_lock(&icc_lock);
482
483 src = node_find(src_id);
484 if (!src)
485 goto out;
486
487 dst = node_find(dst_id);
488 if (!dst)
489 goto out;
490
491 path = path_find(dev, src, dst);
492 if (IS_ERR(path))
493 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
494
495out:
496 mutex_unlock(&icc_lock);
497 return path;
498}
499EXPORT_SYMBOL_GPL(icc_get);
500
501/**
502 * icc_put() - release the reference to the icc_path
503 * @path: interconnect path
504 *
505 * Use this function to release the constraints on a path when the path is
506 * no longer needed. The constraints will be re-aggregated.
507 */
508void icc_put(struct icc_path *path)
509{
510 struct icc_node *node;
511 size_t i;
512 int ret;
513
514 if (!path || WARN_ON(IS_ERR(path)))
515 return;
516
517 ret = icc_set_bw(path, 0, 0);
518 if (ret)
519 pr_err("%s: error (%d)\n", __func__, ret);
520
521 mutex_lock(&icc_lock);
522 for (i = 0; i < path->num_nodes; i++) {
523 node = path->reqs[i].node;
524 hlist_del(&path->reqs[i].req_node);
525 if (!WARN_ON(!node->provider->users))
526 node->provider->users--;
527 }
528 mutex_unlock(&icc_lock);
529
530 kfree(path);
531}
532EXPORT_SYMBOL_GPL(icc_put);
533
534static struct icc_node *icc_node_create_nolock(int id)
535{
536 struct icc_node *node;
537
538 /* check if node already exists */
539 node = node_find(id);
540 if (node)
541 return node;
542
543 node = kzalloc(sizeof(*node), GFP_KERNEL);
544 if (!node)
545 return ERR_PTR(-ENOMEM);
546
547 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
548 if (id < 0) {
549 WARN(1, "%s: couldn't get idr\n", __func__);
550 kfree(node);
551 return ERR_PTR(id);
552 }
553
554 node->id = id;
555
556 return node;
557}
558
559/**
560 * icc_node_create() - create a node
561 * @id: node id
562 *
563 * Return: icc_node pointer on success, or ERR_PTR() on error
564 */
565struct icc_node *icc_node_create(int id)
566{
567 struct icc_node *node;
568
569 mutex_lock(&icc_lock);
570
571 node = icc_node_create_nolock(id);
572
573 mutex_unlock(&icc_lock);
574
575 return node;
576}
577EXPORT_SYMBOL_GPL(icc_node_create);
578
579/**
580 * icc_node_destroy() - destroy a node
581 * @id: node id
582 */
583void icc_node_destroy(int id)
584{
585 struct icc_node *node;
586
587 mutex_lock(&icc_lock);
588
589 node = node_find(id);
590 if (node) {
591 idr_remove(&icc_idr, node->id);
592 WARN_ON(!hlist_empty(&node->req_list));
593 }
594
595 mutex_unlock(&icc_lock);
596
597 kfree(node);
598}
599EXPORT_SYMBOL_GPL(icc_node_destroy);
600
601/**
602 * icc_link_create() - create a link between two nodes
603 * @node: source node id
604 * @dst_id: destination node id
605 *
606 * Create a link between two nodes. The nodes might belong to different
607 * interconnect providers and the @dst_id node might not exist (if the
608 * provider driver has not probed yet). So just create the @dst_id node
609 * and when the actual provider driver is probed, the rest of the node
610 * data is filled.
611 *
612 * Return: 0 on success, or an error code otherwise
613 */
614int icc_link_create(struct icc_node *node, const int dst_id)
615{
616 struct icc_node *dst;
617 struct icc_node **new;
618 int ret = 0;
619
620 if (!node->provider)
621 return -EINVAL;
622
623 mutex_lock(&icc_lock);
624
625 dst = node_find(dst_id);
626 if (!dst) {
627 dst = icc_node_create_nolock(dst_id);
628
629 if (IS_ERR(dst)) {
630 ret = PTR_ERR(dst);
631 goto out;
632 }
633 }
634
635 new = krealloc(node->links,
636 (node->num_links + 1) * sizeof(*node->links),
637 GFP_KERNEL);
638 if (!new) {
639 ret = -ENOMEM;
640 goto out;
641 }
642
643 node->links = new;
644 node->links[node->num_links++] = dst;
645
646out:
647 mutex_unlock(&icc_lock);
648
649 return ret;
650}
651EXPORT_SYMBOL_GPL(icc_link_create);
652
653/**
654 * icc_link_destroy() - destroy a link between two nodes
655 * @src: pointer to source node
656 * @dst: pointer to destination node
657 *
658 * Return: 0 on success, or an error code otherwise
659 */
660int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
661{
662 struct icc_node **new;
663 size_t slot;
664 int ret = 0;
665
666 if (IS_ERR_OR_NULL(src))
667 return -EINVAL;
668
669 if (IS_ERR_OR_NULL(dst))
670 return -EINVAL;
671
672 mutex_lock(&icc_lock);
673
674 for (slot = 0; slot < src->num_links; slot++)
675 if (src->links[slot] == dst)
676 break;
677
678 if (WARN_ON(slot == src->num_links)) {
679 ret = -ENXIO;
680 goto out;
681 }
682
683 src->links[slot] = src->links[--src->num_links];
684
685 new = krealloc(src->links, src->num_links * sizeof(*src->links),
686 GFP_KERNEL);
687 if (new)
688 src->links = new;
689
690out:
691 mutex_unlock(&icc_lock);
692
693 return ret;
694}
695EXPORT_SYMBOL_GPL(icc_link_destroy);
696
697/**
698 * icc_node_add() - add interconnect node to interconnect provider
699 * @node: pointer to the interconnect node
700 * @provider: pointer to the interconnect provider
701 */
702void icc_node_add(struct icc_node *node, struct icc_provider *provider)
703{
704 mutex_lock(&icc_lock);
705
706 node->provider = provider;
707 list_add_tail(&node->node_list, &provider->nodes);
708
709 mutex_unlock(&icc_lock);
710}
711EXPORT_SYMBOL_GPL(icc_node_add);
712
713/**
714 * icc_node_del() - delete interconnect node from interconnect provider
715 * @node: pointer to the interconnect node
716 */
717void icc_node_del(struct icc_node *node)
718{
719 mutex_lock(&icc_lock);
720
721 list_del(&node->node_list);
722
723 mutex_unlock(&icc_lock);
724}
725EXPORT_SYMBOL_GPL(icc_node_del);
726
727/**
728 * icc_provider_add() - add a new interconnect provider
729 * @provider: the interconnect provider that will be added into topology
730 *
731 * Return: 0 on success, or an error code otherwise
732 */
733int icc_provider_add(struct icc_provider *provider)
734{
735 if (WARN_ON(!provider->set))
736 return -EINVAL;
737 if (WARN_ON(!provider->xlate))
738 return -EINVAL;
739
740 mutex_lock(&icc_lock);
741
742 INIT_LIST_HEAD(&provider->nodes);
743 list_add_tail(&provider->provider_list, &icc_providers);
744
745 mutex_unlock(&icc_lock);
746
747 dev_dbg(provider->dev, "interconnect provider added to topology\n");
748
749 return 0;
750}
751EXPORT_SYMBOL_GPL(icc_provider_add);
752
753/**
754 * icc_provider_del() - delete previously added interconnect provider
755 * @provider: the interconnect provider that will be removed from topology
756 *
757 * Return: 0 on success, or an error code otherwise
758 */
759int icc_provider_del(struct icc_provider *provider)
760{
761 mutex_lock(&icc_lock);
762 if (provider->users) {
763 pr_warn("interconnect provider still has %d users\n",
764 provider->users);
765 mutex_unlock(&icc_lock);
766 return -EBUSY;
767 }
768
769 if (!list_empty(&provider->nodes)) {
770 pr_warn("interconnect provider still has nodes\n");
771 mutex_unlock(&icc_lock);
772 return -EBUSY;
773 }
774
775 list_del(&provider->provider_list);
776 mutex_unlock(&icc_lock);
777
778 return 0;
779}
780EXPORT_SYMBOL_GPL(icc_provider_del);
781
782static int __init icc_init(void)
783{
784 icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
785 debugfs_create_file("interconnect_summary", 0444,
786 icc_debugfs_dir, NULL, &icc_summary_fops);
787 return 0;
788}
789
790static void __exit icc_exit(void)
791{
792 debugfs_remove_recursive(icc_debugfs_dir);
793}
794module_init(icc_init);
795module_exit(icc_exit);
796
797MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
798MODULE_DESCRIPTION("Interconnect Driver Core");
799MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
new file mode 100644
index 000000000000..290d330abe5a
--- /dev/null
+++ b/drivers/interconnect/qcom/Kconfig
@@ -0,0 +1,13 @@
1config INTERCONNECT_QCOM
2 bool "Qualcomm Network-on-Chip interconnect drivers"
3 depends on ARCH_QCOM
4 help
5 Support for Qualcomm's Network-on-Chip interconnect hardware.
6
7config INTERCONNECT_QCOM_SDM845
8 tristate "Qualcomm SDM845 interconnect driver"
9 depends on INTERCONNECT_QCOM
10 depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
11 help
12 This is a driver for the Qualcomm Network-on-Chip on sdm845-based
13 platforms.
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
new file mode 100644
index 000000000000..1c1cea690f92
--- /dev/null
+++ b/drivers/interconnect/qcom/Makefile
@@ -0,0 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0
2
3qnoc-sdm845-objs := sdm845.o
4
5obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
new file mode 100644
index 000000000000..4915b78da673
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -0,0 +1,838 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <asm/div64.h>
8#include <dt-bindings/interconnect/qcom,sdm845.h>
9#include <linux/device.h>
10#include <linux/interconnect.h>
11#include <linux/interconnect-provider.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/of_device.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/sort.h>
18
19#include <soc/qcom/cmd-db.h>
20#include <soc/qcom/rpmh.h>
21#include <soc/qcom/tcs.h>
22
23#define BCM_TCS_CMD_COMMIT_SHFT 30
24#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
25#define BCM_TCS_CMD_VALID_SHFT 29
26#define BCM_TCS_CMD_VALID_MASK 0x20000000
27#define BCM_TCS_CMD_VOTE_X_SHFT 14
28#define BCM_TCS_CMD_VOTE_MASK 0x3fff
29#define BCM_TCS_CMD_VOTE_Y_SHFT 0
30#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
31
32#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
33 (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
34 ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
35 ((cpu_to_le32(vote_x) & \
36 BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
37 ((cpu_to_le32(vote_y) & \
38 BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
39
40#define to_qcom_provider(_provider) \
41 container_of(_provider, struct qcom_icc_provider, provider)
42
43struct qcom_icc_provider {
44 struct icc_provider provider;
45 struct device *dev;
46 struct qcom_icc_bcm **bcms;
47 size_t num_bcms;
48};
49
50/**
51 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
52 * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
53 * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
54 * @vcd: virtual clock domain that this bcm belongs to
55 * @reserved: reserved field
56 */
57struct bcm_db {
58 __le32 unit;
59 __le16 width;
60 u8 vcd;
61 u8 reserved;
62};
63
64#define SDM845_MAX_LINKS 43
65#define SDM845_MAX_BCMS 30
66#define SDM845_MAX_BCM_PER_NODE 2
67#define SDM845_MAX_VCD 10
68
69/**
70 * struct qcom_icc_node - Qualcomm specific interconnect nodes
71 * @name: the node name used in debugfs
72 * @links: an array of nodes where we can go next while traversing
73 * @id: a unique node identifier
74 * @num_links: the total number of @links
75 * @channels: num of channels at this node
76 * @buswidth: width of the interconnect between a node and the bus
77 * @sum_avg: current sum aggregate value of all avg bw requests
78 * @max_peak: current max aggregate value of all peak bw requests
79 * @bcms: list of bcms associated with this logical node
80 * @num_bcms: num of @bcms
81 */
82struct qcom_icc_node {
83 const char *name;
84 u16 links[SDM845_MAX_LINKS];
85 u16 id;
86 u16 num_links;
87 u16 channels;
88 u16 buswidth;
89 u64 sum_avg;
90 u64 max_peak;
91 struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
92 size_t num_bcms;
93};
94
95/**
96 * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
97 * known as Bus Clock Manager (BCM)
98 * @name: the bcm node name used to fetch BCM data from command db
99 * @type: latency or bandwidth bcm
100 * @addr: address offsets used when voting to RPMH
101 * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
102 * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
103 * @dirty: flag used to indicate whether the bcm needs to be committed
104 * @keepalive: flag used to indicate whether a keepalive is required
105 * @aux_data: auxiliary data used when calculating threshold values and
106 * communicating with RPMh
107 * @list: used to link to other bcms when compiling lists for commit
108 * @num_nodes: total number of @num_nodes
109 * @nodes: list of qcom_icc_nodes that this BCM encapsulates
110 */
111struct qcom_icc_bcm {
112 const char *name;
113 u32 type;
114 u32 addr;
115 u64 vote_x;
116 u64 vote_y;
117 bool dirty;
118 bool keepalive;
119 struct bcm_db aux_data;
120 struct list_head list;
121 size_t num_nodes;
122 struct qcom_icc_node *nodes[];
123};
124
125struct qcom_icc_fabric {
126 struct qcom_icc_node **nodes;
127 size_t num_nodes;
128};
129
130struct qcom_icc_desc {
131 struct qcom_icc_node **nodes;
132 size_t num_nodes;
133 struct qcom_icc_bcm **bcms;
134 size_t num_bcms;
135};
136
137#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
138 _numlinks, ...) \
139 static struct qcom_icc_node _name = { \
140 .id = _id, \
141 .name = #_name, \
142 .channels = _channels, \
143 .buswidth = _buswidth, \
144 .num_links = _numlinks, \
145 .links = { __VA_ARGS__ }, \
146 }
147
148DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
149DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
150DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
151DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
152DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
153DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
154DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
155DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
156DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
157DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
158DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
159DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
160DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
161DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
162DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
163DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
164DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
165DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
166DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
167DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
168DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
169DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
170DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
171DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
172DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
173DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
174DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
175DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
176DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
177DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
178DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
179DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
180DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
181DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
182DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
183DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
184DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
185DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
186DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
187DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
188DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
189DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
190DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
191DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
192DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
193DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
194DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
195DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
196DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
197DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
198DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
199DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
200DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
201DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
202DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
203DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
204DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
205DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
206DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
207DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
208DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
209DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
210DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
211DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
212DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
213DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
214DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
215DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
216DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
217DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
218DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
219DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
220DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
221DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
222DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
223DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
224DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
225DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
226DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
227DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
228DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
229DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
230DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
231DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
232DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
233DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
234DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
235DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
236DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
237DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
238DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
239DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
240DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
241DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
242DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
243DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
244DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
245DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
246DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
247DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
248DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
249DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
250DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
251DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
252DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
253DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
254DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
255DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
256DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
257DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
258DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
259DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
260DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
261DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
262DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
263DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
264DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
265DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
266DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
267DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
268DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
269DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
270DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
271DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
272DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
273DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
274DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
275DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
276DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
277DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
278
279#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
280 static struct qcom_icc_bcm _name = { \
281 .name = _bcmname, \
282 .keepalive = _keepalive, \
283 .num_nodes = _numnodes, \
284 .nodes = { __VA_ARGS__ }, \
285 }
286
287DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
288DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
289DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
290DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
291DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
292DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
293DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
294DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
295DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
296DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
297DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
298DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
299DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
300DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
301DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
302DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
303DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
304DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
305DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
306DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
307DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
308DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
309DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
310DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
311DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
312DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
313DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
314DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
315
316static struct qcom_icc_node *rsc_hlos_nodes[] = {
317 [MASTER_APPSS_PROC] = &acm_l3,
318 [MASTER_TCU_0] = &acm_tcu,
319 [MASTER_LLCC] = &llcc_mc,
320 [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
321 [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
322 [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
323 [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
324 [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
325 [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
326 [MASTER_QDSS_BAM] = &qhm_qdss_bam,
327 [MASTER_BLSP_1] = &qhm_qup1,
328 [MASTER_BLSP_2] = &qhm_qup2,
329 [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
330 [MASTER_SPDM] = &qhm_spdm,
331 [MASTER_TIC] = &qhm_tic,
332 [MASTER_TSIF] = &qhm_tsif,
333 [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
334 [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
335 [MASTER_GNOC_MEM_NOC] = &qnm_apps,
336 [MASTER_CNOC_A2NOC] = &qnm_cnoc,
337 [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
338 [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
339 [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
340 [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
341 [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
342 [MASTER_SNOC_CNOC] = &qnm_snoc,
343 [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
344 [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
345 [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
346 [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
347 [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
348 [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
349 [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
350 [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
351 [MASTER_CRYPTO] = &qxm_crypto,
352 [MASTER_GFX3D] = &qxm_gpu,
353 [MASTER_IPA] = &qxm_ipa,
354 [MASTER_MDP0] = &qxm_mdp0,
355 [MASTER_MDP1] = &qxm_mdp1,
356 [MASTER_PIMEM] = &qxm_pimem,
357 [MASTER_ROTATOR] = &qxm_rot,
358 [MASTER_VIDEO_P0] = &qxm_venus0,
359 [MASTER_VIDEO_P1] = &qxm_venus1,
360 [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
361 [MASTER_GIC] = &xm_gic,
362 [MASTER_PCIE_1] = &xm_pcie3_1,
363 [MASTER_PCIE_0] = &xm_pcie_0,
364 [MASTER_QDSS_DAP] = &xm_qdss_dap,
365 [MASTER_QDSS_ETR] = &xm_qdss_etr,
366 [MASTER_SDCC_2] = &xm_sdc2,
367 [MASTER_SDCC_4] = &xm_sdc4,
368 [MASTER_UFS_CARD] = &xm_ufs_card,
369 [MASTER_UFS_MEM] = &xm_ufs_mem,
370 [MASTER_USB3_0] = &xm_usb3_0,
371 [MASTER_USB3_1] = &xm_usb3_1,
372 [SLAVE_EBI1] = &ebi,
373 [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
374 [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
375 [SLAVE_AOP] = &qhs_aop,
376 [SLAVE_AOSS] = &qhs_aoss,
377 [SLAVE_APPSS] = &qhs_apss,
378 [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
379 [SLAVE_CLK_CTL] = &qhs_clk_ctl,
380 [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
381 [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
382 [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
383 [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
384 [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
385 [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
386 [SLAVE_GLM] = &qhs_glm,
387 [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
388 [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
389 [SLAVE_IPA_CFG] = &qhs_ipa,
390 [SLAVE_LLCC_CFG] = &qhs_llcc,
391 [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
392 [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
393 [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
394 [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
395 [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
396 [SLAVE_PDM] = &qhs_pdm,
397 [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
398 [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
399 [SLAVE_PRNG] = &qhs_prng,
400 [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
401 [SLAVE_BLSP_2] = &qhs_qupv3_north,
402 [SLAVE_BLSP_1] = &qhs_qupv3_south,
403 [SLAVE_SDCC_2] = &qhs_sdc2,
404 [SLAVE_SDCC_4] = &qhs_sdc4,
405 [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
406 [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
407 [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
408 [SLAVE_TCSR] = &qhs_tcsr,
409 [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
410 [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
411 [SLAVE_TSIF] = &qhs_tsif,
412 [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
413 [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
414 [SLAVE_USB3_0] = &qhs_usb3_0,
415 [SLAVE_USB3_1] = &qhs_usb3_1,
416 [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
417 [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
418 [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
419 [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
420 [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
421 [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
422 [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
423 [SLAVE_SNOC_CNOC] = &qns_cnoc,
424 [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
425 [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
426 [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
427 [SLAVE_LLCC] = &qns_llcc,
428 [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
429 [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
430 [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
431 [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
432 [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
433 [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
434 [SLAVE_IMEM] = &qxs_imem,
435 [SLAVE_PCIE_0] = &qxs_pcie,
436 [SLAVE_PCIE_1] = &qxs_pcie_gen3,
437 [SLAVE_PIMEM] = &qxs_pimem,
438 [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
439 [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
440 [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
441 [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
442 [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
443 [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
444 [SLAVE_SERVICE_SNOC] = &srvc_snoc,
445 [SLAVE_QDSS_STM] = &xs_qdss_stm,
446 [SLAVE_TCU] = &xs_sys_tcu_cfg,
447};
448
449static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
450 &bcm_acv,
451 &bcm_mc0,
452 &bcm_sh0,
453 &bcm_mm0,
454 &bcm_sh1,
455 &bcm_mm1,
456 &bcm_sh2,
457 &bcm_mm2,
458 &bcm_sh3,
459 &bcm_mm3,
460 &bcm_sh5,
461 &bcm_sn0,
462 &bcm_ce0,
463 &bcm_cn0,
464 &bcm_qup0,
465 &bcm_sn1,
466 &bcm_sn2,
467 &bcm_sn3,
468 &bcm_sn4,
469 &bcm_sn5,
470 &bcm_sn6,
471 &bcm_sn7,
472 &bcm_sn8,
473 &bcm_sn9,
474 &bcm_sn11,
475 &bcm_sn12,
476 &bcm_sn14,
477 &bcm_sn15,
478};
479
480static struct qcom_icc_desc sdm845_rsc_hlos = {
481 .nodes = rsc_hlos_nodes,
482 .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
483 .bcms = rsc_hlos_bcms,
484 .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
485};
486
487static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
488{
489 struct qcom_icc_node *qn;
490 const struct bcm_db *data;
491 size_t data_count;
492 int i;
493
494 bcm->addr = cmd_db_read_addr(bcm->name);
495 if (!bcm->addr) {
496 dev_err(dev, "%s could not find RPMh address\n",
497 bcm->name);
498 return -EINVAL;
499 }
500
501 data = cmd_db_read_aux_data(bcm->name, &data_count);
502 if (IS_ERR(data)) {
503 dev_err(dev, "%s command db read error (%ld)\n",
504 bcm->name, PTR_ERR(data));
505 return PTR_ERR(data);
506 }
507 if (!data_count) {
508 dev_err(dev, "%s command db missing or partial aux data\n",
509 bcm->name);
510 return -EINVAL;
511 }
512
513 bcm->aux_data.unit = le32_to_cpu(data->unit);
514 bcm->aux_data.width = le16_to_cpu(data->width);
515 bcm->aux_data.vcd = data->vcd;
516 bcm->aux_data.reserved = data->reserved;
517
518 /*
519 * Link Qnodes to their respective BCMs
520 */
521 for (i = 0; i < bcm->num_nodes; i++) {
522 qn = bcm->nodes[i];
523 qn->bcms[qn->num_bcms] = bcm;
524 qn->num_bcms++;
525 }
526
527 return 0;
528}
529
530inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
531 u32 addr, bool commit)
532{
533 bool valid = true;
534
535 if (!cmd)
536 return;
537
538 if (vote_x == 0 && vote_y == 0)
539 valid = false;
540
541 if (vote_x > BCM_TCS_CMD_VOTE_MASK)
542 vote_x = BCM_TCS_CMD_VOTE_MASK;
543
544 if (vote_y > BCM_TCS_CMD_VOTE_MASK)
545 vote_y = BCM_TCS_CMD_VOTE_MASK;
546
547 cmd->addr = addr;
548 cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
549
550 /*
551 * Set the wait for completion flag on command that need to be completed
552 * before the next command.
553 */
554 if (commit)
555 cmd->wait = true;
556}
557
558static void tcs_list_gen(struct list_head *bcm_list,
559 struct tcs_cmd tcs_list[SDM845_MAX_VCD],
560 int n[SDM845_MAX_VCD])
561{
562 struct qcom_icc_bcm *bcm;
563 bool commit;
564 size_t idx = 0, batch = 0, cur_vcd_size = 0;
565
566 memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
567
568 list_for_each_entry(bcm, bcm_list, list) {
569 commit = false;
570 cur_vcd_size++;
571 if ((list_is_last(&bcm->list, bcm_list)) ||
572 bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
573 commit = true;
574 cur_vcd_size = 0;
575 }
576 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
577 bcm->addr, commit);
578 idx++;
579 n[batch]++;
580 /*
581 * Batch the BCMs in such a way that we do not split them in
582 * multiple payloads when they are under the same VCD. This is
583 * to ensure that every BCM is committed since we only set the
584 * commit bit on the last BCM request of every VCD.
585 */
586 if (n[batch] >= MAX_RPMH_PAYLOAD) {
587 if (!commit) {
588 n[batch] -= cur_vcd_size;
589 n[batch + 1] = cur_vcd_size;
590 }
591 batch++;
592 }
593 }
594}
595
596static void bcm_aggregate(struct qcom_icc_bcm *bcm)
597{
598 size_t i;
599 u64 agg_avg = 0;
600 u64 agg_peak = 0;
601 u64 temp;
602
603 for (i = 0; i < bcm->num_nodes; i++) {
604 temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
605 do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
606 agg_avg = max(agg_avg, temp);
607
608 temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
609 do_div(temp, bcm->nodes[i]->buswidth);
610 agg_peak = max(agg_peak, temp);
611 }
612
613 temp = agg_avg * 1000ULL;
614 do_div(temp, bcm->aux_data.unit);
615 bcm->vote_x = temp;
616
617 temp = agg_peak * 1000ULL;
618 do_div(temp, bcm->aux_data.unit);
619 bcm->vote_y = temp;
620
621 if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
622 bcm->vote_x = 1;
623 bcm->vote_y = 1;
624 }
625
626 bcm->dirty = false;
627}
628
629static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
630 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
631{
632 size_t i;
633 struct qcom_icc_node *qn;
634
635 qn = node->data;
636
637 *agg_avg += avg_bw;
638 *agg_peak = max_t(u32, *agg_peak, peak_bw);
639
640 qn->sum_avg = *agg_avg;
641 qn->max_peak = *agg_peak;
642
643 for (i = 0; i < qn->num_bcms; i++)
644 qn->bcms[i]->dirty = true;
645
646 return 0;
647}
648
649static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
650{
651 struct qcom_icc_provider *qp;
652 struct icc_node *node;
653 struct tcs_cmd cmds[SDM845_MAX_BCMS];
654 struct list_head commit_list;
655 int commit_idx[SDM845_MAX_VCD];
656 int ret = 0, i;
657
658 if (!src)
659 node = dst;
660 else
661 node = src;
662
663 qp = to_qcom_provider(node->provider);
664
665 INIT_LIST_HEAD(&commit_list);
666
667 for (i = 0; i < qp->num_bcms; i++) {
668 if (qp->bcms[i]->dirty) {
669 bcm_aggregate(qp->bcms[i]);
670 list_add_tail(&qp->bcms[i]->list, &commit_list);
671 }
672 }
673
674 /*
675 * Construct the command list based on a pre ordered list of BCMs
676 * based on VCD.
677 */
678 tcs_list_gen(&commit_list, cmds, commit_idx);
679
680 if (!commit_idx[0])
681 return ret;
682
683 ret = rpmh_invalidate(qp->dev);
684 if (ret) {
685 pr_err("Error invalidating RPMH client (%d)\n", ret);
686 return ret;
687 }
688
689 ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
690 cmds, commit_idx);
691 if (ret) {
692 pr_err("Error sending AMC RPMH requests (%d)\n", ret);
693 return ret;
694 }
695
696 return ret;
697}
698
699static int cmp_vcd(const void *_l, const void *_r)
700{
701 const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
702 const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
703
704 if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
705 return -1;
706 else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
707 return 0;
708 else
709 return 1;
710}
711
712static int qnoc_probe(struct platform_device *pdev)
713{
714 const struct qcom_icc_desc *desc;
715 struct icc_onecell_data *data;
716 struct icc_provider *provider;
717 struct qcom_icc_node **qnodes;
718 struct qcom_icc_provider *qp;
719 struct icc_node *node;
720 size_t num_nodes, i;
721 int ret;
722
723 desc = of_device_get_match_data(&pdev->dev);
724 if (!desc)
725 return -EINVAL;
726
727 qnodes = desc->nodes;
728 num_nodes = desc->num_nodes;
729
730 qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
731 if (!qp)
732 return -ENOMEM;
733
734 data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
735 if (!data)
736 return -ENOMEM;
737
738 provider = &qp->provider;
739 provider->dev = &pdev->dev;
740 provider->set = qcom_icc_set;
741 provider->aggregate = qcom_icc_aggregate;
742 provider->xlate = of_icc_xlate_onecell;
743 INIT_LIST_HEAD(&provider->nodes);
744 provider->data = data;
745
746 qp->dev = &pdev->dev;
747 qp->bcms = desc->bcms;
748 qp->num_bcms = desc->num_bcms;
749
750 ret = icc_provider_add(provider);
751 if (ret) {
752 dev_err(&pdev->dev, "error adding interconnect provider\n");
753 return ret;
754 }
755
756 for (i = 0; i < num_nodes; i++) {
757 size_t j;
758
759 node = icc_node_create(qnodes[i]->id);
760 if (IS_ERR(node)) {
761 ret = PTR_ERR(node);
762 goto err;
763 }
764
765 node->name = qnodes[i]->name;
766 node->data = qnodes[i];
767 icc_node_add(node, provider);
768
769 dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
770 qnodes[i]->name, node->id);
771
772 /* populate links */
773 for (j = 0; j < qnodes[i]->num_links; j++)
774 icc_link_create(node, qnodes[i]->links[j]);
775
776 data->nodes[i] = node;
777 }
778 data->num_nodes = num_nodes;
779
780 for (i = 0; i < qp->num_bcms; i++)
781 qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
782
783 /*
784 * Pre sort the BCMs based on VCD for ease of generating a command list
785 * that groups the BCMs with the same VCD together. VCDs are numbered
786 * with lowest being the most expensive time wise, ensuring that
787 * those commands are being sent the earliest in the queue.
788 */
789 sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
790
791 platform_set_drvdata(pdev, qp);
792
793 dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
794
795 return ret;
796err:
797 list_for_each_entry(node, &provider->nodes, node_list) {
798 icc_node_del(node);
799 icc_node_destroy(node->id);
800 }
801
802 icc_provider_del(provider);
803 return ret;
804}
805
806static int qnoc_remove(struct platform_device *pdev)
807{
808 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
809 struct icc_provider *provider = &qp->provider;
810 struct icc_node *n;
811
812 list_for_each_entry(n, &provider->nodes, node_list) {
813 icc_node_del(n);
814 icc_node_destroy(n->id);
815 }
816
817 return icc_provider_del(provider);
818}
819
820static const struct of_device_id qnoc_of_match[] = {
821 { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
822 { },
823};
824MODULE_DEVICE_TABLE(of, qnoc_of_match);
825
826static struct platform_driver qnoc_driver = {
827 .probe = qnoc_probe,
828 .remove = qnoc_remove,
829 .driver = {
830 .name = "qnoc-sdm845",
831 .of_match_table = qnoc_of_match,
832 },
833};
834module_platform_driver(qnoc_driver);
835
836MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
837MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver");
838MODULE_LICENSE("GPL v2");
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index bbec6ac0a966..3581abfb0c6a 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -569,6 +569,7 @@ cuda_interrupt(int irq, void *arg)
569 unsigned char ibuf[16]; 569 unsigned char ibuf[16];
570 int ibuf_len = 0; 570 int ibuf_len = 0;
571 int complete = 0; 571 int complete = 0;
572 bool full;
572 573
573 spin_lock_irqsave(&cuda_lock, flags); 574 spin_lock_irqsave(&cuda_lock, flags);
574 575
@@ -656,12 +657,13 @@ idle_state:
656 break; 657 break;
657 658
658 case reading: 659 case reading:
659 if (reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr) 660 full = reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
660 : ARRAY_FULL(cuda_rbuf, reply_ptr)) 661 : ARRAY_FULL(cuda_rbuf, reply_ptr);
662 if (full)
661 (void)in_8(&via[SR]); 663 (void)in_8(&via[SR]);
662 else 664 else
663 *reply_ptr++ = in_8(&via[SR]); 665 *reply_ptr++ = in_8(&via[SR]);
664 if (!TREQ_asserted(status)) { 666 if (!TREQ_asserted(status) || full) {
665 if (mcu_is_egret) 667 if (mcu_is_egret)
666 assert_TACK(); 668 assert_TACK();
667 /* that's all folks */ 669 /* that's all folks */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f417b06e11c5..42ab8ec92a04 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -295,6 +295,17 @@ config QCOM_COINCELL
295 to maintain PMIC register and RTC state in the absence of 295 to maintain PMIC register and RTC state in the absence of
296 external power. 296 external power.
297 297
298config QCOM_FASTRPC
299 tristate "Qualcomm FastRPC"
300 depends on ARCH_QCOM || COMPILE_TEST
301 depends on RPMSG
302 select DMA_SHARED_BUFFER
303 help
304 Provides a communication mechanism that allows for clients to
305 make remote method invocations across processor boundary to
306 applications DSP processor. Say M if you want to enable this
307 module.
308
298config SGI_GRU 309config SGI_GRU
299 tristate "SGI GRU driver" 310 tristate "SGI GRU driver"
300 depends on X86_UV && SMP 311 depends on X86_UV && SMP
@@ -535,4 +546,5 @@ source "drivers/misc/echo/Kconfig"
535source "drivers/misc/cxl/Kconfig" 546source "drivers/misc/cxl/Kconfig"
536source "drivers/misc/ocxl/Kconfig" 547source "drivers/misc/ocxl/Kconfig"
537source "drivers/misc/cardreader/Kconfig" 548source "drivers/misc/cardreader/Kconfig"
549source "drivers/misc/habanalabs/Kconfig"
538endmenu 550endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e39ccbbc1b3a..d5b7d3404dc7 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
18obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 18obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
19obj-$(CONFIG_PHANTOM) += phantom.o 19obj-$(CONFIG_PHANTOM) += phantom.o
20obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o 20obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
21obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o
21obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o 22obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
22obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o 23obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
23obj-$(CONFIG_SGI_IOC4) += ioc4.o 24obj-$(CONFIG_SGI_IOC4) += ioc4.o
@@ -59,3 +60,4 @@ obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
59obj-$(CONFIG_OCXL) += ocxl/ 60obj-$(CONFIG_OCXL) += ocxl/
60obj-y += cardreader/ 61obj-y += cardreader/
61obj-$(CONFIG_PVPANIC) += pvpanic.o 62obj-$(CONFIG_PVPANIC) += pvpanic.o
63obj-$(CONFIG_HABANA_AI) += habanalabs/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index a0afadefcc49..1f6d008e0036 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -202,22 +202,20 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
202 return dpot_read_r8d8(dpot, ctrl); 202 return dpot_read_r8d8(dpot, ctrl);
203 case DPOT_UID(AD5272_ID): 203 case DPOT_UID(AD5272_ID):
204 case DPOT_UID(AD5274_ID): 204 case DPOT_UID(AD5274_ID):
205 dpot_write_r8d8(dpot, 205 dpot_write_r8d8(dpot,
206 (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0); 206 (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
207 207
208 value = dpot_read_r8d16(dpot, 208 value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2);
209 DPOT_AD5270_1_2_4_RDAC << 2); 209 if (value < 0)
210 210 return value;
211 if (value < 0) 211 /*
212 return value; 212 * AD5272/AD5274 returns high byte first, however
213 /* 213 * underling smbus expects low byte first.
214 * AD5272/AD5274 returns high byte first, however 214 */
215 * underling smbus expects low byte first. 215 value = swab16(value);
216 */
217 value = swab16(value);
218 216
219 if (dpot->uid == DPOT_UID(AD5274_ID)) 217 if (dpot->uid == DPOT_UID(AD5274_ID))
220 value = value >> 2; 218 value = value >> 2;
221 return value; 219 return value;
222 default: 220 default:
223 if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) 221 if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 024dcba8d6c8..5c98e2221889 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -170,35 +170,46 @@ static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
170{ 170{
171 int err; 171 int err;
172 172
173 if (pcr->option.ocp_en)
174 rtsx_pci_enable_ocp(pcr);
175
173 rtsx_pci_init_cmd(pcr); 176 rtsx_pci_init_cmd(pcr);
174 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, 177 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
175 SD_POWER_MASK, SD_PARTIAL_POWER_ON); 178 SD_POWER_MASK, SD_PARTIAL_POWER_ON);
179
176 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, 180 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
177 LDO3318_PWR_MASK, 0x02); 181 LDO3318_PWR_MASK, 0x02);
182
178 err = rtsx_pci_send_cmd(pcr, 100); 183 err = rtsx_pci_send_cmd(pcr, 100);
179 if (err < 0) 184 if (err < 0)
180 return err; 185 return err;
181 186
182 /* To avoid too large in-rush current */ 187 /* To avoid too large in-rush current */
183 udelay(150); 188 msleep(20);
184
185 rtsx_pci_init_cmd(pcr); 189 rtsx_pci_init_cmd(pcr);
186 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, 190 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
187 SD_POWER_MASK, SD_POWER_ON); 191 SD_POWER_MASK, SD_POWER_ON);
192
188 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, 193 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
189 LDO3318_PWR_MASK, 0x06); 194 LDO3318_PWR_MASK, 0x06);
195
196 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
197 SD_OUTPUT_EN, SD_OUTPUT_EN);
198 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
199 MS_OUTPUT_EN, MS_OUTPUT_EN);
190 return rtsx_pci_send_cmd(pcr, 100); 200 return rtsx_pci_send_cmd(pcr, 100);
191} 201}
192 202
193static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card) 203static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
194{ 204{
195 rtsx_pci_init_cmd(pcr); 205 if (pcr->option.ocp_en)
196 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, 206 rtsx_pci_disable_ocp(pcr);
197 SD_POWER_MASK | PMOS_STRG_MASK, 207
198 SD_POWER_OFF | PMOS_STRG_400mA); 208 rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK |
199 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, 209 PMOS_STRG_MASK, SD_POWER_OFF | PMOS_STRG_400mA);
200 LDO3318_PWR_MASK, 0X00); 210 rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0X00);
201 return rtsx_pci_send_cmd(pcr, 100); 211
212 return 0;
202} 213}
203 214
204static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 215static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
@@ -348,6 +359,32 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
348 return 0; 359 return 0;
349} 360}
350 361
362static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
363{
364 int err;
365
366 if (voltage == OUTPUT_3V3) {
367 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x57E4);
368 if (err < 0)
369 return err;
370 } else if (voltage == OUTPUT_1V8) {
371 err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
372 if (err < 0)
373 return err;
374 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x54A4);
375 if (err < 0)
376 return err;
377 } else {
378 return -EINVAL;
379 }
380
381 /* set pad drive */
382 rtsx_pci_init_cmd(pcr);
383 rts5227_fill_driving(pcr, voltage);
384 return rtsx_pci_send_cmd(pcr, 100);
385}
386
387
351/* rts522a operations mainly derived from rts5227, except phy/hw init setting. 388/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
352 */ 389 */
353static const struct pcr_ops rts522a_pcr_ops = { 390static const struct pcr_ops rts522a_pcr_ops = {
@@ -360,7 +397,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
360 .disable_auto_blink = rts5227_disable_auto_blink, 397 .disable_auto_blink = rts5227_disable_auto_blink,
361 .card_power_on = rts5227_card_power_on, 398 .card_power_on = rts5227_card_power_on,
362 .card_power_off = rts5227_card_power_off, 399 .card_power_off = rts5227_card_power_off,
363 .switch_output_voltage = rts5227_switch_output_voltage, 400 .switch_output_voltage = rts522a_switch_output_voltage,
364 .cd_deglitch = NULL, 401 .cd_deglitch = NULL,
365 .conv_clk_and_div_n = NULL, 402 .conv_clk_and_div_n = NULL,
366 .force_power_down = rts5227_force_power_down, 403 .force_power_down = rts5227_force_power_down,
@@ -371,4 +408,11 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
371 rts5227_init_params(pcr); 408 rts5227_init_params(pcr);
372 409
373 pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; 410 pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
411
412 pcr->option.ocp_en = 1;
413 if (pcr->option.ocp_en)
414 pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
415 pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
416 pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800;
417
374} 418}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index dbe013abdb83..0f72a7e0fdab 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -284,6 +284,10 @@ static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
284static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card) 284static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
285{ 285{
286 int err; 286 int err;
287 struct rtsx_cr_option *option = &pcr->option;
288
289 if (option->ocp_en)
290 rtsx_pci_enable_ocp(pcr);
287 291
288 rtsx_pci_init_cmd(pcr); 292 rtsx_pci_init_cmd(pcr);
289 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, 293 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
@@ -306,12 +310,15 @@ static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
306 310
307static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card) 311static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
308{ 312{
309 rtsx_pci_init_cmd(pcr); 313 struct rtsx_cr_option *option = &pcr->option;
310 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, 314
311 SD_POWER_MASK, SD_POWER_OFF); 315 if (option->ocp_en)
312 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, 316 rtsx_pci_disable_ocp(pcr);
313 LDO3318_PWR_MASK, 0x00); 317
314 return rtsx_pci_send_cmd(pcr, 100); 318 rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_OFF);
319
320 rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x00);
321 return 0;
315} 322}
316 323
317static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 324static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
@@ -629,6 +636,13 @@ void rts524a_init_params(struct rtsx_pcr *pcr)
629 636
630 pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; 637 pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
631 pcr->ops = &rts524a_pcr_ops; 638 pcr->ops = &rts524a_pcr_ops;
639
640 pcr->option.ocp_en = 1;
641 if (pcr->option.ocp_en)
642 pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
643 pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
644 pcr->option.sd_800mA_ocp_thd = RTS524A_OCP_THD_800;
645
632} 646}
633 647
634static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card) 648static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
@@ -737,4 +751,10 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
737 751
738 pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; 752 pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
739 pcr->ops = &rts525a_pcr_ops; 753 pcr->ops = &rts525a_pcr_ops;
754
755 pcr->option.ocp_en = 1;
756 if (pcr->option.ocp_en)
757 pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
758 pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
759 pcr->option.sd_800mA_ocp_thd = RTS525A_OCP_THD_800;
740} 760}
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index a493b01c5bc6..da22bcb62b04 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -64,11 +64,13 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
64 drive_sel = pcr->sd30_drive_sel_1v8; 64 drive_sel = pcr->sd30_drive_sel_1v8;
65 } 65 }
66 66
67 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL, 67 rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
68 0xFF, driving[drive_sel][0]); 68 0xFF, driving[drive_sel][0]);
69 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL, 69
70 rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
70 0xFF, driving[drive_sel][1]); 71 0xFF, driving[drive_sel][1]);
71 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL, 72
73 rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
72 0xFF, driving[drive_sel][2]); 74 0xFF, driving[drive_sel][2]);
73} 75}
74 76
@@ -193,7 +195,7 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
193 | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); 195 | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
194 rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); 196 rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
195 rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, 197 rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
196 CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); 198 CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
197 rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); 199 rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
198 200
199 return 0; 201 return 0;
@@ -207,22 +209,16 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
207 if (option->ocp_en) 209 if (option->ocp_en)
208 rtsx_pci_enable_ocp(pcr); 210 rtsx_pci_enable_ocp(pcr);
209 211
210 rtsx_pci_init_cmd(pcr);
211 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
212 DV331812_VDD1, DV331812_VDD1);
213 err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
214 if (err < 0)
215 return err;
216 212
217 rtsx_pci_init_cmd(pcr); 213 rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_VDD1, DV331812_VDD1);
218 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0, 214 rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
219 RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33); 215 RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
220 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
221 LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
222 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
223 DV331812_POWERON, DV331812_POWERON);
224 err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
225 216
217 rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK,
218 LDO_POW_SDVDD1_ON);
219
220 rtsx_pci_write_register(pcr, LDO_CONFIG2,
221 DV331812_POWERON, DV331812_POWERON);
226 msleep(20); 222 msleep(20);
227 223
228 if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || 224 if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
@@ -242,8 +238,8 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
242 /* Reset SD_CFG3 register */ 238 /* Reset SD_CFG3 register */
243 rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); 239 rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
244 rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, 240 rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
245 SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | 241 SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
246 SD30_CLK_STOP_CFG0, 0); 242 SD30_CLK_STOP_CFG0, 0);
247 243
248 rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0); 244 rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
249 245
@@ -273,9 +269,9 @@ static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
273 } 269 }
274 270
275 /* set pad drive */ 271 /* set pad drive */
276 rtsx_pci_init_cmd(pcr);
277 rts5260_fill_driving(pcr, voltage); 272 rts5260_fill_driving(pcr, voltage);
278 return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); 273
274 return 0;
279} 275}
280 276
281static void rts5260_stop_cmd(struct rtsx_pcr *pcr) 277static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
@@ -290,13 +286,9 @@ static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
290 286
291static void rts5260_card_before_power_off(struct rtsx_pcr *pcr) 287static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
292{ 288{
293 struct rtsx_cr_option *option = &pcr->option;
294
295 rts5260_stop_cmd(pcr); 289 rts5260_stop_cmd(pcr);
296 rts5260_switch_output_voltage(pcr, OUTPUT_3V3); 290 rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
297 291
298 if (option->ocp_en)
299 rtsx_pci_disable_ocp(pcr);
300} 292}
301 293
302static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card) 294static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
@@ -304,13 +296,12 @@ static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
304 int err = 0; 296 int err = 0;
305 297
306 rts5260_card_before_power_off(pcr); 298 rts5260_card_before_power_off(pcr);
307 299 err = rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
308 rtsx_pci_init_cmd(pcr);
309 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
310 LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF); 300 LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
311 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2, 301 err = rtsx_pci_write_register(pcr, LDO_CONFIG2,
312 DV331812_POWERON, DV331812_POWEROFF); 302 DV331812_POWERON, DV331812_POWEROFF);
313 err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); 303 if (pcr->option.ocp_en)
304 rtsx_pci_disable_ocp(pcr);
314 305
315 return err; 306 return err;
316} 307}
@@ -322,41 +313,29 @@ static void rts5260_init_ocp(struct rtsx_pcr *pcr)
322 if (option->ocp_en) { 313 if (option->ocp_en) {
323 u8 mask, val; 314 u8 mask, val;
324 315
325 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
326 RTS5260_DVCC_OCP_EN |
327 RTS5260_DVCC_OCP_CL_EN,
328 RTS5260_DVCC_OCP_EN |
329 RTS5260_DVCC_OCP_CL_EN);
330 rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
331 RTS5260_DVIO_OCP_EN |
332 RTS5260_DVIO_OCP_CL_EN,
333 RTS5260_DVIO_OCP_EN |
334 RTS5260_DVIO_OCP_CL_EN);
335 316
336 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, 317 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
337 RTS5260_DVCC_OCP_THD_MASK, 318 RTS5260_DVCC_OCP_THD_MASK,
338 option->sd_400mA_ocp_thd); 319 option->sd_800mA_ocp_thd);
339
340 rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
341 RTS5260_DVIO_OCP_THD_MASK,
342 RTS5260_DVIO_OCP_THD_350);
343 320
344 rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG, 321 rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
345 RTS5260_DV331812_OCP_THD_MASK, 322 RTS5260_DV331812_OCP_THD_MASK,
346 RTS5260_DV331812_OCP_THD_210); 323 RTS5260_DV331812_OCP_THD_270);
347 324
348 mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK; 325 mask = SD_OCP_GLITCH_MASK;
349 val = pcr->hw_param.ocp_glitch; 326 val = pcr->hw_param.ocp_glitch;
350 rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); 327 rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
328 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
329 RTS5260_DVCC_OCP_EN |
330 RTS5260_DVCC_OCP_CL_EN,
331 RTS5260_DVCC_OCP_EN |
332 RTS5260_DVCC_OCP_CL_EN);
351 333
352 rtsx_pci_enable_ocp(pcr); 334 rtsx_pci_enable_ocp(pcr);
353 } else { 335 } else {
354 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, 336 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
355 RTS5260_DVCC_OCP_EN | 337 RTS5260_DVCC_OCP_EN |
356 RTS5260_DVCC_OCP_CL_EN, 0); 338 RTS5260_DVCC_OCP_CL_EN, 0);
357 rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
358 RTS5260_DVIO_OCP_EN |
359 RTS5260_DVIO_OCP_CL_EN, 0);
360 } 339 }
361} 340}
362 341
@@ -364,14 +343,9 @@ static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
364{ 343{
365 u8 val = 0; 344 u8 val = 0;
366 345
367 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
368
369 val = SD_OCP_INT_EN | SD_DETECT_EN; 346 val = SD_OCP_INT_EN | SD_DETECT_EN;
370 val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
371 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); 347 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
372 rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, 348
373 DV3318_DETECT_EN | DV3318_OCP_INT_EN,
374 DV3318_DETECT_EN | DV3318_OCP_INT_EN);
375} 349}
376 350
377static void rts5260_disable_ocp(struct rtsx_pcr *pcr) 351static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
@@ -379,15 +353,11 @@ static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
379 u8 mask = 0; 353 u8 mask = 0;
380 354
381 mask = SD_OCP_INT_EN | SD_DETECT_EN; 355 mask = SD_OCP_INT_EN | SD_DETECT_EN;
382 mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
383 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 356 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
384 rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
385 DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
386 357
387 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
388 OC_POWER_DOWN);
389} 358}
390 359
360
391static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) 361static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
392{ 362{
393 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); 363 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
@@ -404,9 +374,7 @@ static void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
404 u8 val = 0; 374 u8 val = 0;
405 375
406 mask = SD_OCP_INT_CLR | SD_OC_CLR; 376 mask = SD_OCP_INT_CLR | SD_OC_CLR;
407 mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
408 val = SD_OCP_INT_CLR | SD_OC_CLR; 377 val = SD_OCP_INT_CLR | SD_OC_CLR;
409 val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
410 378
411 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); 379 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
412 rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, 380 rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
@@ -425,36 +393,22 @@ static void rts5260_process_ocp(struct rtsx_pcr *pcr)
425 393
426 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); 394 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
427 rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2); 395 rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
428 if (pcr->card_exist & SD_EXIST) 396
429 rtsx_sd_power_off_card3v3(pcr); 397 if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) ||
430 else if (pcr->card_exist & MS_EXIST) 398 (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
431 rtsx_ms_power_off_card3v3(pcr); 399 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
432 400 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
433 if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) { 401 rtsx_pci_clear_ocpstat(pcr);
434 if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
435 SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
436 (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
437 rtsx_pci_clear_ocpstat(pcr);
438 pcr->ocp_stat = 0; 402 pcr->ocp_stat = 0;
439 pcr->ocp_stat2 = 0; 403 pcr->ocp_stat2 = 0;
440 } 404 }
441 405
442 if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
443 SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
444 (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
445 if (pcr->card_exist & SD_EXIST)
446 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
447 else if (pcr->card_exist & MS_EXIST)
448 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
449 }
450} 406}
451 407
452static int rts5260_init_hw(struct rtsx_pcr *pcr) 408static int rts5260_init_hw(struct rtsx_pcr *pcr)
453{ 409{
454 int err; 410 int err;
455 411
456 rtsx_pci_init_ocp(pcr);
457
458 rtsx_pci_init_cmd(pcr); 412 rtsx_pci_init_cmd(pcr);
459 413
460 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1, 414 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
@@ -483,6 +437,8 @@ static int rts5260_init_hw(struct rtsx_pcr *pcr)
483 if (err < 0) 437 if (err < 0)
484 return err; 438 return err;
485 439
440 rtsx_pci_init_ocp(pcr);
441
486 return 0; 442 return 0;
487} 443}
488 444
@@ -499,7 +455,13 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
499 pcr_dbg(pcr, "Set parameters for L1.2."); 455 pcr_dbg(pcr, "Set parameters for L1.2.");
500 rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL, 456 rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
501 0xFF, PCIE_L1_2_EN); 457 0xFF, PCIE_L1_2_EN);
502 rtsx_pci_write_register(pcr, PWR_FE_CTL, 458 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
459 RTS5260_DVCC_OCP_EN |
460 RTS5260_DVCC_OCP_CL_EN,
461 RTS5260_DVCC_OCP_EN |
462 RTS5260_DVCC_OCP_CL_EN);
463
464 rtsx_pci_write_register(pcr, PWR_FE_CTL,
503 0xFF, PCIE_L1_2_PD_FE_EN); 465 0xFF, PCIE_L1_2_PD_FE_EN);
504 } else if (lss_l1_1) { 466 } else if (lss_l1_1) {
505 pcr_dbg(pcr, "Set parameters for L1.1."); 467 pcr_dbg(pcr, "Set parameters for L1.1.");
@@ -742,7 +704,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
742 option->ocp_en = 1; 704 option->ocp_en = 1;
743 if (option->ocp_en) 705 if (option->ocp_en)
744 hw_param->interrupt_en |= SD_OC_INT_EN; 706 hw_param->interrupt_en |= SD_OC_INT_EN;
745 hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U; 707 hw_param->ocp_glitch = SDVIO_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
746 option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550; 708 option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
747 option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970; 709 option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
748} 710}
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index da445223f4cc..0d320e0ab4c9 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -703,7 +703,10 @@ EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
703 703
704static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) 704static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
705{ 705{
706 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN; 706 struct rtsx_hw_param *hw_param = &pcr->hw_param;
707
708 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
709 | hw_param->interrupt_en;
707 710
708 if (pcr->num_slots > 1) 711 if (pcr->num_slots > 1)
709 pcr->bier |= MS_INT_EN; 712 pcr->bier |= MS_INT_EN;
@@ -969,8 +972,19 @@ static void rtsx_pci_card_detect(struct work_struct *work)
969 972
970static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) 973static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
971{ 974{
972 if (pcr->ops->process_ocp) 975 if (pcr->ops->process_ocp) {
973 pcr->ops->process_ocp(pcr); 976 pcr->ops->process_ocp(pcr);
977 } else {
978 if (!pcr->option.ocp_en)
979 return;
980 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
981 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
982 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
983 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
984 rtsx_pci_clear_ocpstat(pcr);
985 pcr->ocp_stat = 0;
986 }
987 }
974} 988}
975 989
976static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) 990static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
@@ -1039,7 +1053,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
1039 } 1053 }
1040 } 1054 }
1041 1055
1042 if (pcr->card_inserted || pcr->card_removed) 1056 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1043 schedule_delayed_work(&pcr->carddet_work, 1057 schedule_delayed_work(&pcr->carddet_work,
1044 msecs_to_jiffies(200)); 1058 msecs_to_jiffies(200));
1045 1059
@@ -1144,10 +1158,12 @@ void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1144{ 1158{
1145 u8 val = SD_OCP_INT_EN | SD_DETECT_EN; 1159 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1146 1160
1147 if (pcr->ops->enable_ocp) 1161 if (pcr->ops->enable_ocp) {
1148 pcr->ops->enable_ocp(pcr); 1162 pcr->ops->enable_ocp(pcr);
1149 else 1163 } else {
1164 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1150 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); 1165 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1166 }
1151 1167
1152} 1168}
1153 1169
@@ -1155,10 +1171,13 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1155{ 1171{
1156 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; 1172 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1157 1173
1158 if (pcr->ops->disable_ocp) 1174 if (pcr->ops->disable_ocp) {
1159 pcr->ops->disable_ocp(pcr); 1175 pcr->ops->disable_ocp(pcr);
1160 else 1176 } else {
1161 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1177 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1178 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1179 OC_POWER_DOWN);
1180 }
1162} 1181}
1163 1182
1164void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) 1183void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
@@ -1169,7 +1188,7 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1169 struct rtsx_cr_option *option = &(pcr->option); 1188 struct rtsx_cr_option *option = &(pcr->option);
1170 1189
1171 if (option->ocp_en) { 1190 if (option->ocp_en) {
1172 u8 val = option->sd_400mA_ocp_thd; 1191 u8 val = option->sd_800mA_ocp_thd;
1173 1192
1174 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1193 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1175 rtsx_pci_write_register(pcr, REG_OCPPARA1, 1194 rtsx_pci_write_register(pcr, REG_OCPPARA1,
@@ -1204,6 +1223,7 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1204 u8 val = SD_OCP_INT_CLR | SD_OC_CLR; 1223 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1205 1224
1206 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); 1225 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1226 udelay(100);
1207 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1227 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1208 } 1228 }
1209} 1229}
@@ -1213,7 +1233,6 @@ int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1213 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1233 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1214 MS_CLK_EN | SD40_CLK_EN, 0); 1234 MS_CLK_EN | SD40_CLK_EN, 0);
1215 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 1235 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1216
1217 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 1236 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1218 1237
1219 msleep(50); 1238 msleep(50);
@@ -1313,6 +1332,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1313 break; 1332 break;
1314 } 1333 }
1315 1334
1335 /*init ocp*/
1336 rtsx_pci_init_ocp(pcr);
1337
1316 /* Enable clk_request_n to enable clock power management */ 1338 /* Enable clk_request_n to enable clock power management */
1317 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1); 1339 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1318 /* Enter L1 when host tx idle */ 1340 /* Enter L1 when host tx idle */
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 6ea1655db0bb..300fc31d8e67 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -46,6 +46,11 @@
46 46
47#define SSC_CLOCK_STABLE_WAIT 130 47#define SSC_CLOCK_STABLE_WAIT 130
48 48
49#define RTS524A_OCP_THD_800 0x04
50#define RTS525A_OCP_THD_800 0x05
51#define RTS522A_OCP_THD_800 0x06
52
53
49int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val); 54int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
50int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val); 55int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
51 56
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 5a17bfeb80d3..74d4fda6c4a7 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -125,9 +125,7 @@ enclosure_register(struct device *dev, const char *name, int components,
125 struct enclosure_component_callbacks *cb) 125 struct enclosure_component_callbacks *cb)
126{ 126{
127 struct enclosure_device *edev = 127 struct enclosure_device *edev =
128 kzalloc(sizeof(struct enclosure_device) + 128 kzalloc(struct_size(edev, component, components), GFP_KERNEL);
129 sizeof(struct enclosure_component)*components,
130 GFP_KERNEL);
131 int err, i; 129 int err, i;
132 130
133 BUG_ON(!cb); 131 BUG_ON(!cb);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
new file mode 100644
index 000000000000..39f832d27288
--- /dev/null
+++ b/drivers/misc/fastrpc.c
@@ -0,0 +1,1401 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3// Copyright (c) 2018, Linaro Limited
4
5#include <linux/completion.h>
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/rpmsg.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <uapi/misc/fastrpc.h>
20
21#define ADSP_DOMAIN_ID (0)
22#define MDSP_DOMAIN_ID (1)
23#define SDSP_DOMAIN_ID (2)
24#define CDSP_DOMAIN_ID (3)
25#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
26#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
27#define FASTRPC_ALIGN 128
28#define FASTRPC_MAX_FDLIST 16
29#define FASTRPC_MAX_CRCLIST 64
30#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
31#define FASTRPC_CTX_MAX (256)
32#define FASTRPC_INIT_HANDLE 1
33#define FASTRPC_CTXID_MASK (0xFF0)
34#define INIT_FILELEN_MAX (2 * 1024 * 1024)
35#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
36#define FASTRPC_DEVICE_NAME "fastrpc"
37
38/* Retrives number of input buffers from the scalars parameter */
39#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
40
41/* Retrives number of output buffers from the scalars parameter */
42#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
43
44/* Retrives number of input handles from the scalars parameter */
45#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
46
47/* Retrives number of output handles from the scalars parameter */
48#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
49
50#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
51 REMOTE_SCALARS_OUTBUFS(sc) + \
52 REMOTE_SCALARS_INHANDLES(sc)+ \
53 REMOTE_SCALARS_OUTHANDLES(sc))
54#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
55 (((attr & 0x07) << 29) | \
56 ((method & 0x1f) << 24) | \
57 ((in & 0xff) << 16) | \
58 ((out & 0xff) << 8) | \
59 ((oin & 0x0f) << 4) | \
60 (oout & 0x0f))
61
62#define FASTRPC_SCALARS(method, in, out) \
63 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
64
65#define FASTRPC_CREATE_PROCESS_NARGS 6
66/* Remote Method id table */
67#define FASTRPC_RMID_INIT_ATTACH 0
68#define FASTRPC_RMID_INIT_RELEASE 1
69#define FASTRPC_RMID_INIT_CREATE 6
70#define FASTRPC_RMID_INIT_CREATE_ATTR 7
71#define FASTRPC_RMID_INIT_CREATE_STATIC 8
72
73#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
74
75static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
76 "sdsp", "cdsp"};
77struct fastrpc_phy_page {
78 u64 addr; /* physical address */
79 u64 size; /* size of contiguous region */
80};
81
82struct fastrpc_invoke_buf {
83 u32 num; /* number of contiguous regions */
84 u32 pgidx; /* index to start of contiguous region */
85};
86
87struct fastrpc_remote_arg {
88 u64 pv;
89 u64 len;
90};
91
92struct fastrpc_msg {
93 int pid; /* process group id */
94 int tid; /* thread id */
95 u64 ctx; /* invoke caller context */
96 u32 handle; /* handle to invoke */
97 u32 sc; /* scalars structure describing the data */
98 u64 addr; /* physical address */
99 u64 size; /* size of contiguous region */
100};
101
102struct fastrpc_invoke_rsp {
103 u64 ctx; /* invoke caller context */
104 int retval; /* invoke return value */
105};
106
107struct fastrpc_buf {
108 struct fastrpc_user *fl;
109 struct dma_buf *dmabuf;
110 struct device *dev;
111 void *virt;
112 u64 phys;
113 u64 size;
114 /* Lock for dma buf attachments */
115 struct mutex lock;
116 struct list_head attachments;
117};
118
119struct fastrpc_dma_buf_attachment {
120 struct device *dev;
121 struct sg_table sgt;
122 struct list_head node;
123};
124
125struct fastrpc_map {
126 struct list_head node;
127 struct fastrpc_user *fl;
128 int fd;
129 struct dma_buf *buf;
130 struct sg_table *table;
131 struct dma_buf_attachment *attach;
132 u64 phys;
133 u64 size;
134 void *va;
135 u64 len;
136 struct kref refcount;
137};
138
139struct fastrpc_invoke_ctx {
140 int nscalars;
141 int nbufs;
142 int retval;
143 int pid;
144 int tgid;
145 u32 sc;
146 u32 *crc;
147 u64 ctxid;
148 u64 msg_sz;
149 struct kref refcount;
150 struct list_head node; /* list of ctxs */
151 struct completion work;
152 struct fastrpc_msg msg;
153 struct fastrpc_user *fl;
154 struct fastrpc_remote_arg *rpra;
155 struct fastrpc_map **maps;
156 struct fastrpc_buf *buf;
157 struct fastrpc_invoke_args *args;
158 struct fastrpc_channel_ctx *cctx;
159};
160
161struct fastrpc_session_ctx {
162 struct device *dev;
163 int sid;
164 bool used;
165 bool valid;
166};
167
168struct fastrpc_channel_ctx {
169 int domain_id;
170 int sesscount;
171 struct rpmsg_device *rpdev;
172 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
173 spinlock_t lock;
174 struct idr ctx_idr;
175 struct list_head users;
176 struct miscdevice miscdev;
177};
178
179struct fastrpc_user {
180 struct list_head user;
181 struct list_head maps;
182 struct list_head pending;
183
184 struct fastrpc_channel_ctx *cctx;
185 struct fastrpc_session_ctx *sctx;
186 struct fastrpc_buf *init_mem;
187
188 int tgid;
189 int pd;
190 /* Lock for lists */
191 spinlock_t lock;
192 /* lock for allocations */
193 struct mutex mutex;
194};
195
196static void fastrpc_free_map(struct kref *ref)
197{
198 struct fastrpc_map *map;
199
200 map = container_of(ref, struct fastrpc_map, refcount);
201
202 if (map->table) {
203 dma_buf_unmap_attachment(map->attach, map->table,
204 DMA_BIDIRECTIONAL);
205 dma_buf_detach(map->buf, map->attach);
206 dma_buf_put(map->buf);
207 }
208
209 kfree(map);
210}
211
212static void fastrpc_map_put(struct fastrpc_map *map)
213{
214 if (map)
215 kref_put(&map->refcount, fastrpc_free_map);
216}
217
218static void fastrpc_map_get(struct fastrpc_map *map)
219{
220 if (map)
221 kref_get(&map->refcount);
222}
223
224static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
225 struct fastrpc_map **ppmap)
226{
227 struct fastrpc_map *map = NULL;
228
229 mutex_lock(&fl->mutex);
230 list_for_each_entry(map, &fl->maps, node) {
231 if (map->fd == fd) {
232 fastrpc_map_get(map);
233 *ppmap = map;
234 mutex_unlock(&fl->mutex);
235 return 0;
236 }
237 }
238 mutex_unlock(&fl->mutex);
239
240 return -ENOENT;
241}
242
243static void fastrpc_buf_free(struct fastrpc_buf *buf)
244{
245 dma_free_coherent(buf->dev, buf->size, buf->virt,
246 FASTRPC_PHYS(buf->phys));
247 kfree(buf);
248}
249
250static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
251 u64 size, struct fastrpc_buf **obuf)
252{
253 struct fastrpc_buf *buf;
254
255 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
256 if (!buf)
257 return -ENOMEM;
258
259 INIT_LIST_HEAD(&buf->attachments);
260 mutex_init(&buf->lock);
261
262 buf->fl = fl;
263 buf->virt = NULL;
264 buf->phys = 0;
265 buf->size = size;
266 buf->dev = dev;
267
268 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
269 GFP_KERNEL);
270 if (!buf->virt)
271 return -ENOMEM;
272
273 if (fl->sctx && fl->sctx->sid)
274 buf->phys += ((u64)fl->sctx->sid << 32);
275
276 *obuf = buf;
277
278 return 0;
279}
280
281static void fastrpc_context_free(struct kref *ref)
282{
283 struct fastrpc_invoke_ctx *ctx;
284 struct fastrpc_channel_ctx *cctx;
285 int i;
286
287 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
288 cctx = ctx->cctx;
289
290 for (i = 0; i < ctx->nscalars; i++)
291 fastrpc_map_put(ctx->maps[i]);
292
293 if (ctx->buf)
294 fastrpc_buf_free(ctx->buf);
295
296 spin_lock(&cctx->lock);
297 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
298 spin_unlock(&cctx->lock);
299
300 kfree(ctx->maps);
301 kfree(ctx);
302}
303
304static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
305{
306 kref_get(&ctx->refcount);
307}
308
309static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
310{
311 kref_put(&ctx->refcount, fastrpc_context_free);
312}
313
314static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
315 struct fastrpc_user *user, u32 kernel, u32 sc,
316 struct fastrpc_invoke_args *args)
317{
318 struct fastrpc_channel_ctx *cctx = user->cctx;
319 struct fastrpc_invoke_ctx *ctx = NULL;
320 int ret;
321
322 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
323 if (!ctx)
324 return ERR_PTR(-ENOMEM);
325
326 INIT_LIST_HEAD(&ctx->node);
327 ctx->fl = user;
328 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
329 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
330 REMOTE_SCALARS_OUTBUFS(sc);
331
332 if (ctx->nscalars) {
333 ctx->maps = kcalloc(ctx->nscalars,
334 sizeof(*ctx->maps), GFP_KERNEL);
335 if (!ctx->maps) {
336 kfree(ctx);
337 return ERR_PTR(-ENOMEM);
338 }
339 ctx->args = args;
340 }
341
342 ctx->sc = sc;
343 ctx->retval = -1;
344 ctx->pid = current->pid;
345 ctx->tgid = user->tgid;
346 ctx->cctx = cctx;
347 init_completion(&ctx->work);
348
349 spin_lock(&user->lock);
350 list_add_tail(&ctx->node, &user->pending);
351 spin_unlock(&user->lock);
352
353 spin_lock(&cctx->lock);
354 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
355 FASTRPC_CTX_MAX, GFP_ATOMIC);
356 if (ret < 0) {
357 spin_unlock(&cctx->lock);
358 goto err_idr;
359 }
360 ctx->ctxid = ret << 4;
361 spin_unlock(&cctx->lock);
362
363 kref_init(&ctx->refcount);
364
365 return ctx;
366err_idr:
367 spin_lock(&user->lock);
368 list_del(&ctx->node);
369 spin_unlock(&user->lock);
370 kfree(ctx->maps);
371 kfree(ctx);
372
373 return ERR_PTR(ret);
374}
375
376static struct sg_table *
377fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
378 enum dma_data_direction dir)
379{
380 struct fastrpc_dma_buf_attachment *a = attachment->priv;
381 struct sg_table *table;
382
383 table = &a->sgt;
384
385 if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
386 return ERR_PTR(-ENOMEM);
387
388 return table;
389}
390
391static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
392 struct sg_table *table,
393 enum dma_data_direction dir)
394{
395 dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
396}
397
398static void fastrpc_release(struct dma_buf *dmabuf)
399{
400 struct fastrpc_buf *buffer = dmabuf->priv;
401
402 fastrpc_buf_free(buffer);
403}
404
405static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
406 struct dma_buf_attachment *attachment)
407{
408 struct fastrpc_dma_buf_attachment *a;
409 struct fastrpc_buf *buffer = dmabuf->priv;
410 int ret;
411
412 a = kzalloc(sizeof(*a), GFP_KERNEL);
413 if (!a)
414 return -ENOMEM;
415
416 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
417 FASTRPC_PHYS(buffer->phys), buffer->size);
418 if (ret < 0) {
419 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
420 return -EINVAL;
421 }
422
423 a->dev = attachment->dev;
424 INIT_LIST_HEAD(&a->node);
425 attachment->priv = a;
426
427 mutex_lock(&buffer->lock);
428 list_add(&a->node, &buffer->attachments);
429 mutex_unlock(&buffer->lock);
430
431 return 0;
432}
433
434static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
435 struct dma_buf_attachment *attachment)
436{
437 struct fastrpc_dma_buf_attachment *a = attachment->priv;
438 struct fastrpc_buf *buffer = dmabuf->priv;
439
440 mutex_lock(&buffer->lock);
441 list_del(&a->node);
442 mutex_unlock(&buffer->lock);
443 kfree(a);
444}
445
446static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
447{
448 struct fastrpc_buf *buf = dmabuf->priv;
449
450 return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
451}
452
453static void *fastrpc_vmap(struct dma_buf *dmabuf)
454{
455 struct fastrpc_buf *buf = dmabuf->priv;
456
457 return buf->virt;
458}
459
460static int fastrpc_mmap(struct dma_buf *dmabuf,
461 struct vm_area_struct *vma)
462{
463 struct fastrpc_buf *buf = dmabuf->priv;
464 size_t size = vma->vm_end - vma->vm_start;
465
466 return dma_mmap_coherent(buf->dev, vma, buf->virt,
467 FASTRPC_PHYS(buf->phys), size);
468}
469
470static const struct dma_buf_ops fastrpc_dma_buf_ops = {
471 .attach = fastrpc_dma_buf_attach,
472 .detach = fastrpc_dma_buf_detatch,
473 .map_dma_buf = fastrpc_map_dma_buf,
474 .unmap_dma_buf = fastrpc_unmap_dma_buf,
475 .mmap = fastrpc_mmap,
476 .map = fastrpc_kmap,
477 .vmap = fastrpc_vmap,
478 .release = fastrpc_release,
479};
480
481static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
482 u64 len, struct fastrpc_map **ppmap)
483{
484 struct fastrpc_session_ctx *sess = fl->sctx;
485 struct fastrpc_map *map = NULL;
486 int err = 0;
487
488 if (!fastrpc_map_find(fl, fd, ppmap))
489 return 0;
490
491 map = kzalloc(sizeof(*map), GFP_KERNEL);
492 if (!map)
493 return -ENOMEM;
494
495 INIT_LIST_HEAD(&map->node);
496 map->fl = fl;
497 map->fd = fd;
498 map->buf = dma_buf_get(fd);
499 if (IS_ERR(map->buf)) {
500 err = PTR_ERR(map->buf);
501 goto get_err;
502 }
503
504 map->attach = dma_buf_attach(map->buf, sess->dev);
505 if (IS_ERR(map->attach)) {
506 dev_err(sess->dev, "Failed to attach dmabuf\n");
507 err = PTR_ERR(map->attach);
508 goto attach_err;
509 }
510
511 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
512 if (IS_ERR(map->table)) {
513 err = PTR_ERR(map->table);
514 goto map_err;
515 }
516
517 map->phys = sg_dma_address(map->table->sgl);
518 map->phys += ((u64)fl->sctx->sid << 32);
519 map->size = len;
520 map->va = sg_virt(map->table->sgl);
521 map->len = len;
522 kref_init(&map->refcount);
523
524 spin_lock(&fl->lock);
525 list_add_tail(&map->node, &fl->maps);
526 spin_unlock(&fl->lock);
527 *ppmap = map;
528
529 return 0;
530
531map_err:
532 dma_buf_detach(map->buf, map->attach);
533attach_err:
534 dma_buf_put(map->buf);
535get_err:
536 kfree(map);
537
538 return err;
539}
540
541/*
542 * Fastrpc payload buffer with metadata looks like:
543 *
544 * >>>>>> START of METADATA <<<<<<<<<
545 * +---------------------------------+
546 * | Arguments |
547 * | type:(struct fastrpc_remote_arg)|
548 * | (0 - N) |
549 * +---------------------------------+
550 * | Invoke Buffer list |
551 * | type:(struct fastrpc_invoke_buf)|
552 * | (0 - N) |
553 * +---------------------------------+
554 * | Page info list |
555 * | type:(struct fastrpc_phy_page) |
556 * | (0 - N) |
557 * +---------------------------------+
558 * | Optional info |
559 * |(can be specific to SoC/Firmware)|
560 * +---------------------------------+
561 * >>>>>>>> END of METADATA <<<<<<<<<
562 * +---------------------------------+
563 * | Inline ARGS |
564 * | (0-N) |
565 * +---------------------------------+
566 */
567
568static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
569{
570 int size = 0;
571
572 size = (sizeof(struct fastrpc_remote_arg) +
573 sizeof(struct fastrpc_invoke_buf) +
574 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
575 sizeof(u64) * FASTRPC_MAX_FDLIST +
576 sizeof(u32) * FASTRPC_MAX_CRCLIST;
577
578 return size;
579}
580
581static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
582{
583 u64 size = 0;
584 int i;
585
586 size = ALIGN(metalen, FASTRPC_ALIGN);
587 for (i = 0; i < ctx->nscalars; i++) {
588 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
589 size = ALIGN(size, FASTRPC_ALIGN);
590 size += ctx->args[i].length;
591 }
592 }
593
594 return size;
595}
596
597static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
598{
599 struct device *dev = ctx->fl->sctx->dev;
600 int i, err;
601
602 for (i = 0; i < ctx->nscalars; ++i) {
603 /* Make sure reserved field is set to 0 */
604 if (ctx->args[i].reserved)
605 return -EINVAL;
606
607 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
608 ctx->args[i].length == 0)
609 continue;
610
611 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
612 ctx->args[i].length, &ctx->maps[i]);
613 if (err) {
614 dev_err(dev, "Error Creating map %d\n", err);
615 return -EINVAL;
616 }
617
618 }
619 return 0;
620}
621
622static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
623{
624 struct device *dev = ctx->fl->sctx->dev;
625 struct fastrpc_remote_arg *rpra;
626 struct fastrpc_invoke_buf *list;
627 struct fastrpc_phy_page *pages;
628 int inbufs, i, err = 0;
629 u64 rlen, pkt_size;
630 uintptr_t args;
631 int metalen;
632
633
634 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
635 metalen = fastrpc_get_meta_size(ctx);
636 pkt_size = fastrpc_get_payload_size(ctx, metalen);
637
638 err = fastrpc_create_maps(ctx);
639 if (err)
640 return err;
641
642 ctx->msg_sz = pkt_size;
643
644 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
645 if (err)
646 return err;
647
648 rpra = ctx->buf->virt;
649 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
650 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
651 sizeof(*rpra));
652 args = (uintptr_t)ctx->buf->virt + metalen;
653 rlen = pkt_size - metalen;
654 ctx->rpra = rpra;
655
656 for (i = 0; i < ctx->nbufs; ++i) {
657 u64 len = ctx->args[i].length;
658
659 rpra[i].pv = 0;
660 rpra[i].len = len;
661 list[i].num = len ? 1 : 0;
662 list[i].pgidx = i;
663
664 if (!len)
665 continue;
666
667 pages[i].size = roundup(len, PAGE_SIZE);
668
669 if (ctx->maps[i]) {
670 rpra[i].pv = (u64) ctx->args[i].ptr;
671 pages[i].addr = ctx->maps[i]->phys;
672 } else {
673 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
674 args = ALIGN(args, FASTRPC_ALIGN);
675 if (rlen < len)
676 goto bail;
677
678 rpra[i].pv = args;
679 pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
680 pages[i].addr = pages[i].addr & PAGE_MASK;
681 args = args + len;
682 rlen -= len;
683 }
684
685 if (i < inbufs && !ctx->maps[i]) {
686 void *dst = (void *)(uintptr_t)rpra[i].pv;
687 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
688
689 if (!kernel) {
690 if (copy_from_user(dst, (void __user *)src,
691 len)) {
692 err = -EFAULT;
693 goto bail;
694 }
695 } else {
696 memcpy(dst, src, len);
697 }
698 }
699 }
700
701 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
702 rpra[i].pv = (u64) ctx->args[i].ptr;
703 rpra[i].len = ctx->args[i].length;
704 list[i].num = ctx->args[i].length ? 1 : 0;
705 list[i].pgidx = i;
706 pages[i].addr = ctx->maps[i]->phys;
707 pages[i].size = ctx->maps[i]->size;
708 }
709
710bail:
711 if (err)
712 dev_err(dev, "Error: get invoke args failed:%d\n", err);
713
714 return err;
715}
716
717static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
718 u32 kernel)
719{
720 struct fastrpc_remote_arg *rpra = ctx->rpra;
721 int i, inbufs;
722
723 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
724
725 for (i = inbufs; i < ctx->nbufs; ++i) {
726 void *src = (void *)(uintptr_t)rpra[i].pv;
727 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
728 u64 len = rpra[i].len;
729
730 if (!kernel) {
731 if (copy_to_user((void __user *)dst, src, len))
732 return -EFAULT;
733 } else {
734 memcpy(dst, src, len);
735 }
736 }
737
738 return 0;
739}
740
741static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
742 struct fastrpc_invoke_ctx *ctx,
743 u32 kernel, uint32_t handle)
744{
745 struct fastrpc_channel_ctx *cctx;
746 struct fastrpc_user *fl = ctx->fl;
747 struct fastrpc_msg *msg = &ctx->msg;
748
749 cctx = fl->cctx;
750 msg->pid = fl->tgid;
751 msg->tid = current->pid;
752
753 if (kernel)
754 msg->pid = 0;
755
756 msg->ctx = ctx->ctxid | fl->pd;
757 msg->handle = handle;
758 msg->sc = ctx->sc;
759 msg->addr = ctx->buf ? ctx->buf->phys : 0;
760 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
761 fastrpc_context_get(ctx);
762
763 return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
764}
765
766static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
767 u32 handle, u32 sc,
768 struct fastrpc_invoke_args *args)
769{
770 struct fastrpc_invoke_ctx *ctx = NULL;
771 int err = 0;
772
773 if (!fl->sctx)
774 return -EINVAL;
775
776 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
777 if (IS_ERR(ctx))
778 return PTR_ERR(ctx);
779
780 if (ctx->nscalars) {
781 err = fastrpc_get_args(kernel, ctx);
782 if (err)
783 goto bail;
784 }
785 /* Send invoke buffer to remote dsp */
786 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
787 if (err)
788 goto bail;
789
790 /* Wait for remote dsp to respond or time out */
791 err = wait_for_completion_interruptible(&ctx->work);
792 if (err)
793 goto bail;
794
795 /* Check the response from remote dsp */
796 err = ctx->retval;
797 if (err)
798 goto bail;
799
800 if (ctx->nscalars) {
801 /* populate all the output buffers with results */
802 err = fastrpc_put_args(ctx, kernel);
803 if (err)
804 goto bail;
805 }
806
807bail:
808 /* We are done with this compute context, remove it from pending list */
809 spin_lock(&fl->lock);
810 list_del(&ctx->node);
811 spin_unlock(&fl->lock);
812 fastrpc_context_put(ctx);
813
814 if (err)
815 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
816
817 return err;
818}
819
820static int fastrpc_init_create_process(struct fastrpc_user *fl,
821 char __user *argp)
822{
823 struct fastrpc_init_create init;
824 struct fastrpc_invoke_args *args;
825 struct fastrpc_phy_page pages[1];
826 struct fastrpc_map *map = NULL;
827 struct fastrpc_buf *imem = NULL;
828 int memlen;
829 int err;
830 struct {
831 int pgid;
832 u32 namelen;
833 u32 filelen;
834 u32 pageslen;
835 u32 attrs;
836 u32 siglen;
837 } inbuf;
838 u32 sc;
839
840 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
841 if (!args)
842 return -ENOMEM;
843
844 if (copy_from_user(&init, argp, sizeof(init))) {
845 err = -EFAULT;
846 goto bail;
847 }
848
849 if (init.filelen > INIT_FILELEN_MAX) {
850 err = -EINVAL;
851 goto bail;
852 }
853
854 inbuf.pgid = fl->tgid;
855 inbuf.namelen = strlen(current->comm) + 1;
856 inbuf.filelen = init.filelen;
857 inbuf.pageslen = 1;
858 inbuf.attrs = init.attrs;
859 inbuf.siglen = init.siglen;
860 fl->pd = 1;
861
862 if (init.filelen && init.filefd) {
863 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
864 if (err)
865 goto bail;
866 }
867
868 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
869 1024 * 1024);
870 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
871 &imem);
872 if (err) {
873 fastrpc_map_put(map);
874 goto bail;
875 }
876
877 fl->init_mem = imem;
878 args[0].ptr = (u64)(uintptr_t)&inbuf;
879 args[0].length = sizeof(inbuf);
880 args[0].fd = -1;
881
882 args[1].ptr = (u64)(uintptr_t)current->comm;
883 args[1].length = inbuf.namelen;
884 args[1].fd = -1;
885
886 args[2].ptr = (u64) init.file;
887 args[2].length = inbuf.filelen;
888 args[2].fd = init.filefd;
889
890 pages[0].addr = imem->phys;
891 pages[0].size = imem->size;
892
893 args[3].ptr = (u64)(uintptr_t) pages;
894 args[3].length = 1 * sizeof(*pages);
895 args[3].fd = -1;
896
897 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
898 args[4].length = sizeof(inbuf.attrs);
899 args[4].fd = -1;
900
901 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
902 args[5].length = sizeof(inbuf.siglen);
903 args[5].fd = -1;
904
905 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
906 if (init.attrs)
907 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
908
909 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
910 sc, args);
911
912 if (err) {
913 fastrpc_map_put(map);
914 fastrpc_buf_free(imem);
915 }
916
917bail:
918 kfree(args);
919
920 return err;
921}
922
923static struct fastrpc_session_ctx *fastrpc_session_alloc(
924 struct fastrpc_channel_ctx *cctx)
925{
926 struct fastrpc_session_ctx *session = NULL;
927 int i;
928
929 spin_lock(&cctx->lock);
930 for (i = 0; i < cctx->sesscount; i++) {
931 if (!cctx->session[i].used && cctx->session[i].valid) {
932 cctx->session[i].used = true;
933 session = &cctx->session[i];
934 break;
935 }
936 }
937 spin_unlock(&cctx->lock);
938
939 return session;
940}
941
942static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
943 struct fastrpc_session_ctx *session)
944{
945 spin_lock(&cctx->lock);
946 session->used = false;
947 spin_unlock(&cctx->lock);
948}
949
950static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
951{
952 struct fastrpc_invoke_args args[1];
953 int tgid = 0;
954 u32 sc;
955
956 tgid = fl->tgid;
957 args[0].ptr = (u64)(uintptr_t) &tgid;
958 args[0].length = sizeof(tgid);
959 args[0].fd = -1;
960 args[0].reserved = 0;
961 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
962
963 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
964 sc, &args[0]);
965}
966
967static int fastrpc_device_release(struct inode *inode, struct file *file)
968{
969 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
970 struct fastrpc_channel_ctx *cctx = fl->cctx;
971 struct fastrpc_invoke_ctx *ctx, *n;
972 struct fastrpc_map *map, *m;
973
974 fastrpc_release_current_dsp_process(fl);
975
976 spin_lock(&cctx->lock);
977 list_del(&fl->user);
978 spin_unlock(&cctx->lock);
979
980 if (fl->init_mem)
981 fastrpc_buf_free(fl->init_mem);
982
983 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
984 list_del(&ctx->node);
985 fastrpc_context_put(ctx);
986 }
987
988 list_for_each_entry_safe(map, m, &fl->maps, node) {
989 list_del(&map->node);
990 fastrpc_map_put(map);
991 }
992
993 fastrpc_session_free(cctx, fl->sctx);
994
995 mutex_destroy(&fl->mutex);
996 kfree(fl);
997 file->private_data = NULL;
998
999 return 0;
1000}
1001
1002static int fastrpc_device_open(struct inode *inode, struct file *filp)
1003{
1004 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1005 struct fastrpc_user *fl = NULL;
1006
1007 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1008 if (!fl)
1009 return -ENOMEM;
1010
1011 filp->private_data = fl;
1012 spin_lock_init(&fl->lock);
1013 mutex_init(&fl->mutex);
1014 INIT_LIST_HEAD(&fl->pending);
1015 INIT_LIST_HEAD(&fl->maps);
1016 INIT_LIST_HEAD(&fl->user);
1017 fl->tgid = current->tgid;
1018 fl->cctx = cctx;
1019
1020 fl->sctx = fastrpc_session_alloc(cctx);
1021 if (!fl->sctx) {
1022 dev_err(&cctx->rpdev->dev, "No session available\n");
1023 mutex_destroy(&fl->mutex);
1024 kfree(fl);
1025
1026 return -EBUSY;
1027 }
1028
1029 spin_lock(&cctx->lock);
1030 list_add_tail(&fl->user, &cctx->users);
1031 spin_unlock(&cctx->lock);
1032
1033 return 0;
1034}
1035
1036static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
1037{
1038 struct dma_buf *buf;
1039 int info;
1040
1041 if (copy_from_user(&info, argp, sizeof(info)))
1042 return -EFAULT;
1043
1044 buf = dma_buf_get(info);
1045 if (IS_ERR_OR_NULL(buf))
1046 return -EINVAL;
1047 /*
1048 * one for the last get and other for the ALLOC_DMA_BUFF ioctl
1049 */
1050 dma_buf_put(buf);
1051 dma_buf_put(buf);
1052
1053 return 0;
1054}
1055
1056static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1057{
1058 struct fastrpc_alloc_dma_buf bp;
1059 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1060 struct fastrpc_buf *buf = NULL;
1061 int err;
1062
1063 if (copy_from_user(&bp, argp, sizeof(bp)))
1064 return -EFAULT;
1065
1066 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1067 if (err)
1068 return err;
1069 exp_info.ops = &fastrpc_dma_buf_ops;
1070 exp_info.size = bp.size;
1071 exp_info.flags = O_RDWR;
1072 exp_info.priv = buf;
1073 buf->dmabuf = dma_buf_export(&exp_info);
1074 if (IS_ERR(buf->dmabuf)) {
1075 err = PTR_ERR(buf->dmabuf);
1076 fastrpc_buf_free(buf);
1077 return err;
1078 }
1079
1080 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1081 if (bp.fd < 0) {
1082 dma_buf_put(buf->dmabuf);
1083 return -EINVAL;
1084 }
1085
1086 if (copy_to_user(argp, &bp, sizeof(bp))) {
1087 dma_buf_put(buf->dmabuf);
1088 return -EFAULT;
1089 }
1090
1091 get_dma_buf(buf->dmabuf);
1092
1093 return 0;
1094}
1095
1096static int fastrpc_init_attach(struct fastrpc_user *fl)
1097{
1098 struct fastrpc_invoke_args args[1];
1099 int tgid = fl->tgid;
1100 u32 sc;
1101
1102 args[0].ptr = (u64)(uintptr_t) &tgid;
1103 args[0].length = sizeof(tgid);
1104 args[0].fd = -1;
1105 args[0].reserved = 0;
1106 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1107 fl->pd = 0;
1108
1109 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1110 sc, &args[0]);
1111}
1112
1113static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1114{
1115 struct fastrpc_invoke_args *args = NULL;
1116 struct fastrpc_invoke inv;
1117 u32 nscalars;
1118 int err;
1119
1120 if (copy_from_user(&inv, argp, sizeof(inv)))
1121 return -EFAULT;
1122
1123 /* nscalars is truncated here to max supported value */
1124 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1125 if (nscalars) {
1126 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1127 if (!args)
1128 return -ENOMEM;
1129
1130 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1131 nscalars * sizeof(*args))) {
1132 kfree(args);
1133 return -EFAULT;
1134 }
1135 }
1136
1137 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1138 kfree(args);
1139
1140 return err;
1141}
1142
1143static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1144 unsigned long arg)
1145{
1146 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1147 char __user *argp = (char __user *)arg;
1148 int err;
1149
1150 switch (cmd) {
1151 case FASTRPC_IOCTL_INVOKE:
1152 err = fastrpc_invoke(fl, argp);
1153 break;
1154 case FASTRPC_IOCTL_INIT_ATTACH:
1155 err = fastrpc_init_attach(fl);
1156 break;
1157 case FASTRPC_IOCTL_INIT_CREATE:
1158 err = fastrpc_init_create_process(fl, argp);
1159 break;
1160 case FASTRPC_IOCTL_FREE_DMA_BUFF:
1161 err = fastrpc_dmabuf_free(fl, argp);
1162 break;
1163 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1164 err = fastrpc_dmabuf_alloc(fl, argp);
1165 break;
1166 default:
1167 err = -ENOTTY;
1168 break;
1169 }
1170
1171 return err;
1172}
1173
1174static const struct file_operations fastrpc_fops = {
1175 .open = fastrpc_device_open,
1176 .release = fastrpc_device_release,
1177 .unlocked_ioctl = fastrpc_device_ioctl,
1178 .compat_ioctl = fastrpc_device_ioctl,
1179};
1180
1181static int fastrpc_cb_probe(struct platform_device *pdev)
1182{
1183 struct fastrpc_channel_ctx *cctx;
1184 struct fastrpc_session_ctx *sess;
1185 struct device *dev = &pdev->dev;
1186 int i, sessions = 0;
1187
1188 cctx = dev_get_drvdata(dev->parent);
1189 if (!cctx)
1190 return -EINVAL;
1191
1192 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1193
1194 spin_lock(&cctx->lock);
1195 sess = &cctx->session[cctx->sesscount];
1196 sess->used = false;
1197 sess->valid = true;
1198 sess->dev = dev;
1199 dev_set_drvdata(dev, sess);
1200
1201 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1202 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1203
1204 if (sessions > 0) {
1205 struct fastrpc_session_ctx *dup_sess;
1206
1207 for (i = 1; i < sessions; i++) {
1208 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1209 break;
1210 dup_sess = &cctx->session[cctx->sesscount];
1211 memcpy(dup_sess, sess, sizeof(*dup_sess));
1212 }
1213 }
1214 cctx->sesscount++;
1215 spin_unlock(&cctx->lock);
1216 dma_set_mask(dev, DMA_BIT_MASK(32));
1217
1218 return 0;
1219}
1220
1221static int fastrpc_cb_remove(struct platform_device *pdev)
1222{
1223 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1224 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1225 int i;
1226
1227 spin_lock(&cctx->lock);
1228 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1229 if (cctx->session[i].sid == sess->sid) {
1230 cctx->session[i].valid = false;
1231 cctx->sesscount--;
1232 }
1233 }
1234 spin_unlock(&cctx->lock);
1235
1236 return 0;
1237}
1238
1239static const struct of_device_id fastrpc_match_table[] = {
1240 { .compatible = "qcom,fastrpc-compute-cb", },
1241 {}
1242};
1243
1244static struct platform_driver fastrpc_cb_driver = {
1245 .probe = fastrpc_cb_probe,
1246 .remove = fastrpc_cb_remove,
1247 .driver = {
1248 .name = "qcom,fastrpc-cb",
1249 .of_match_table = fastrpc_match_table,
1250 .suppress_bind_attrs = true,
1251 },
1252};
1253
1254static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1255{
1256 struct device *rdev = &rpdev->dev;
1257 struct fastrpc_channel_ctx *data;
1258 int i, err, domain_id = -1;
1259 const char *domain;
1260
1261 data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
1262 if (!data)
1263 return -ENOMEM;
1264
1265 err = of_property_read_string(rdev->of_node, "label", &domain);
1266 if (err) {
1267 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1268 return err;
1269 }
1270
1271 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1272 if (!strcmp(domains[i], domain)) {
1273 domain_id = i;
1274 break;
1275 }
1276 }
1277
1278 if (domain_id < 0) {
1279 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1280 return -EINVAL;
1281 }
1282
1283 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1284 data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
1285 domains[domain_id]);
1286 data->miscdev.fops = &fastrpc_fops;
1287 err = misc_register(&data->miscdev);
1288 if (err)
1289 return err;
1290
1291 dev_set_drvdata(&rpdev->dev, data);
1292 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1293 INIT_LIST_HEAD(&data->users);
1294 spin_lock_init(&data->lock);
1295 idr_init(&data->ctx_idr);
1296 data->domain_id = domain_id;
1297 data->rpdev = rpdev;
1298
1299 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1300}
1301
1302static void fastrpc_notify_users(struct fastrpc_user *user)
1303{
1304 struct fastrpc_invoke_ctx *ctx;
1305
1306 spin_lock(&user->lock);
1307 list_for_each_entry(ctx, &user->pending, node)
1308 complete(&ctx->work);
1309 spin_unlock(&user->lock);
1310}
1311
1312static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1313{
1314 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1315 struct fastrpc_user *user;
1316
1317 spin_lock(&cctx->lock);
1318 list_for_each_entry(user, &cctx->users, user)
1319 fastrpc_notify_users(user);
1320 spin_unlock(&cctx->lock);
1321
1322 misc_deregister(&cctx->miscdev);
1323 of_platform_depopulate(&rpdev->dev);
1324 kfree(cctx);
1325}
1326
1327static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1328 int len, void *priv, u32 addr)
1329{
1330 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1331 struct fastrpc_invoke_rsp *rsp = data;
1332 struct fastrpc_invoke_ctx *ctx;
1333 unsigned long flags;
1334 unsigned long ctxid;
1335
1336 if (len < sizeof(*rsp))
1337 return -EINVAL;
1338
1339 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1340
1341 spin_lock_irqsave(&cctx->lock, flags);
1342 ctx = idr_find(&cctx->ctx_idr, ctxid);
1343 spin_unlock_irqrestore(&cctx->lock, flags);
1344
1345 if (!ctx) {
1346 dev_err(&rpdev->dev, "No context ID matches response\n");
1347 return -ENOENT;
1348 }
1349
1350 ctx->retval = rsp->retval;
1351 complete(&ctx->work);
1352 fastrpc_context_put(ctx);
1353
1354 return 0;
1355}
1356
1357static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1358 { .compatible = "qcom,fastrpc" },
1359 { },
1360};
1361MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1362
1363static struct rpmsg_driver fastrpc_driver = {
1364 .probe = fastrpc_rpmsg_probe,
1365 .remove = fastrpc_rpmsg_remove,
1366 .callback = fastrpc_rpmsg_callback,
1367 .drv = {
1368 .name = "qcom,fastrpc",
1369 .of_match_table = fastrpc_rpmsg_of_match,
1370 },
1371};
1372
1373static int fastrpc_init(void)
1374{
1375 int ret;
1376
1377 ret = platform_driver_register(&fastrpc_cb_driver);
1378 if (ret < 0) {
1379 pr_err("fastrpc: failed to register cb driver\n");
1380 return ret;
1381 }
1382
1383 ret = register_rpmsg_driver(&fastrpc_driver);
1384 if (ret < 0) {
1385 pr_err("fastrpc: failed to register rpmsg driver\n");
1386 platform_driver_unregister(&fastrpc_cb_driver);
1387 return ret;
1388 }
1389
1390 return 0;
1391}
1392module_init(fastrpc_init);
1393
1394static void fastrpc_exit(void)
1395{
1396 platform_driver_unregister(&fastrpc_cb_driver);
1397 unregister_rpmsg_driver(&fastrpc_driver);
1398}
1399module_exit(fastrpc_exit);
1400
1401MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
new file mode 100644
index 000000000000..99db2b82ada6
--- /dev/null
+++ b/drivers/misc/habanalabs/Kconfig
@@ -0,0 +1,25 @@
1#
2# HabanaLabs AI accelerators driver
3#
4
5config HABANA_AI
6 tristate "HabanaAI accelerators (habanalabs)"
7 depends on PCI && HAS_IOMEM
8 select FRAME_VECTOR
9 select DMA_SHARED_BUFFER
10 select GENERIC_ALLOCATOR
11 select HWMON
12 help
13 Enables PCIe card driver for Habana's AI Processors (AIP) that are
14 designed to accelerate Deep Learning inference and training workloads.
15
16 The driver manages the PCIe devices and provides IOCTL interface for
17 the user to submit workloads to the devices.
18
19 The user-space interface is described in
20 include/uapi/misc/habanalabs.h
21
22 If unsure, say N.
23
24 To compile this driver as a module, choose M here: the
25 module will be called habanalabs.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
new file mode 100644
index 000000000000..c6592db59b25
--- /dev/null
+++ b/drivers/misc/habanalabs/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for HabanaLabs AI accelerators driver
3#
4
5obj-m := habanalabs.o
6
7habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
8 command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
9 command_submission.o mmu.o
10
11habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
12
13include $(src)/goya/Makefile
14habanalabs-y += $(HL_GOYA_FILES)
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
new file mode 100644
index 000000000000..f54e7971a762
--- /dev/null
+++ b/drivers/misc/habanalabs/asid.c
@@ -0,0 +1,57 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12int hl_asid_init(struct hl_device *hdev)
13{
14 hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid),
15 sizeof(*hdev->asid_bitmap), GFP_KERNEL);
16 if (!hdev->asid_bitmap)
17 return -ENOMEM;
18
19 mutex_init(&hdev->asid_mutex);
20
21 /* ASID 0 is reserved for KMD */
22 set_bit(0, hdev->asid_bitmap);
23
24 return 0;
25}
26
27void hl_asid_fini(struct hl_device *hdev)
28{
29 mutex_destroy(&hdev->asid_mutex);
30 kfree(hdev->asid_bitmap);
31}
32
33unsigned long hl_asid_alloc(struct hl_device *hdev)
34{
35 unsigned long found;
36
37 mutex_lock(&hdev->asid_mutex);
38
39 found = find_first_zero_bit(hdev->asid_bitmap,
40 hdev->asic_prop.max_asid);
41 if (found == hdev->asic_prop.max_asid)
42 found = 0;
43 else
44 set_bit(found, hdev->asid_bitmap);
45
46 mutex_unlock(&hdev->asid_mutex);
47
48 return found;
49}
50
51void hl_asid_free(struct hl_device *hdev, unsigned long asid)
52{
53 if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid),
54 "Invalid ASID %lu", asid))
55 return;
56 clear_bit(asid, hdev->asid_bitmap);
57}
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
new file mode 100644
index 000000000000..85f75806a9a7
--- /dev/null
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -0,0 +1,445 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/mm.h>
12#include <linux/slab.h>
13
14static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
15{
16 hdev->asic_funcs->dma_free_coherent(hdev, cb->size,
17 (void *) (uintptr_t) cb->kernel_address,
18 cb->bus_address);
19 kfree(cb);
20}
21
22static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
23{
24 if (cb->is_pool) {
25 spin_lock(&hdev->cb_pool_lock);
26 list_add(&cb->pool_list, &hdev->cb_pool);
27 spin_unlock(&hdev->cb_pool_lock);
28 } else {
29 cb_fini(hdev, cb);
30 }
31}
32
33static void cb_release(struct kref *ref)
34{
35 struct hl_device *hdev;
36 struct hl_cb *cb;
37
38 cb = container_of(ref, struct hl_cb, refcount);
39 hdev = cb->hdev;
40
41 hl_debugfs_remove_cb(cb);
42
43 cb_do_release(hdev, cb);
44}
45
46static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
47 int ctx_id)
48{
49 struct hl_cb *cb;
50 void *p;
51
52 /*
53 * We use of GFP_ATOMIC here because this function can be called from
54 * the latency-sensitive code path for command submission. Due to H/W
55 * limitations in some of the ASICs, the kernel must copy the user CB
56 * that is designated for an external queue and actually enqueue
57 * the kernel's copy. Hence, we must never sleep in this code section
58 * and must use GFP_ATOMIC for all memory allocations.
59 */
60 if (ctx_id == HL_KERNEL_ASID_ID)
61 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
62 else
63 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
64
65 if (!cb)
66 return NULL;
67
68 if (ctx_id == HL_KERNEL_ASID_ID)
69 p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
70 &cb->bus_address, GFP_ATOMIC);
71 else
72 p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
73 &cb->bus_address,
74 GFP_USER | __GFP_ZERO);
75 if (!p) {
76 dev_err(hdev->dev,
77 "failed to allocate %d of dma memory for CB\n",
78 cb_size);
79 kfree(cb);
80 return NULL;
81 }
82
83 cb->kernel_address = (u64) (uintptr_t) p;
84 cb->size = cb_size;
85
86 return cb;
87}
88
89int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
90 u32 cb_size, u64 *handle, int ctx_id)
91{
92 struct hl_cb *cb;
93 bool alloc_new_cb = true;
94 int rc;
95
96 /*
97 * Can't use generic function to check this because of special case
98 * where we create a CB as part of the reset process
99 */
100 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
101 (ctx_id != HL_KERNEL_ASID_ID))) {
102 dev_warn_ratelimited(hdev->dev,
103 "Device is disabled or in reset. Can't create new CBs\n");
104 rc = -EBUSY;
105 goto out_err;
106 }
107
108 if (cb_size > HL_MAX_CB_SIZE) {
109 dev_err(hdev->dev,
110 "CB size %d must be less then %d\n",
111 cb_size, HL_MAX_CB_SIZE);
112 rc = -EINVAL;
113 goto out_err;
114 }
115
116 /* Minimum allocation must be PAGE SIZE */
117 if (cb_size < PAGE_SIZE)
118 cb_size = PAGE_SIZE;
119
120 if (ctx_id == HL_KERNEL_ASID_ID &&
121 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
122
123 spin_lock(&hdev->cb_pool_lock);
124 if (!list_empty(&hdev->cb_pool)) {
125 cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
126 pool_list);
127 list_del(&cb->pool_list);
128 spin_unlock(&hdev->cb_pool_lock);
129 alloc_new_cb = false;
130 } else {
131 spin_unlock(&hdev->cb_pool_lock);
132 dev_dbg(hdev->dev, "CB pool is empty\n");
133 }
134 }
135
136 if (alloc_new_cb) {
137 cb = hl_cb_alloc(hdev, cb_size, ctx_id);
138 if (!cb) {
139 rc = -ENOMEM;
140 goto out_err;
141 }
142 }
143
144 cb->hdev = hdev;
145 cb->ctx_id = ctx_id;
146
147 spin_lock(&mgr->cb_lock);
148 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
149 spin_unlock(&mgr->cb_lock);
150
151 if (rc < 0) {
152 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
153 goto release_cb;
154 }
155
156 cb->id = rc;
157
158 kref_init(&cb->refcount);
159 spin_lock_init(&cb->lock);
160
161 /*
162 * idr is 32-bit so we can safely OR it with a mask that is above
163 * 32 bit
164 */
165 *handle = cb->id | HL_MMAP_CB_MASK;
166 *handle <<= PAGE_SHIFT;
167
168 hl_debugfs_add_cb(cb);
169
170 return 0;
171
172release_cb:
173 cb_do_release(hdev, cb);
174out_err:
175 *handle = 0;
176
177 return rc;
178}
179
180int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
181{
182 struct hl_cb *cb;
183 u32 handle;
184 int rc = 0;
185
186 /*
187 * handle was given to user to do mmap, I need to shift it back to
188 * how the idr module gave it to me
189 */
190 cb_handle >>= PAGE_SHIFT;
191 handle = (u32) cb_handle;
192
193 spin_lock(&mgr->cb_lock);
194
195 cb = idr_find(&mgr->cb_handles, handle);
196 if (cb) {
197 idr_remove(&mgr->cb_handles, handle);
198 spin_unlock(&mgr->cb_lock);
199 kref_put(&cb->refcount, cb_release);
200 } else {
201 spin_unlock(&mgr->cb_lock);
202 dev_err(hdev->dev,
203 "CB destroy failed, no match to handle 0x%x\n", handle);
204 rc = -EINVAL;
205 }
206
207 return rc;
208}
209
210int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
211{
212 union hl_cb_args *args = data;
213 struct hl_device *hdev = hpriv->hdev;
214 u64 handle;
215 int rc;
216
217 switch (args->in.op) {
218 case HL_CB_OP_CREATE:
219 rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
220 &handle, hpriv->ctx->asid);
221 memset(args, 0, sizeof(*args));
222 args->out.cb_handle = handle;
223 break;
224 case HL_CB_OP_DESTROY:
225 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
226 args->in.cb_handle);
227 break;
228 default:
229 rc = -ENOTTY;
230 break;
231 }
232
233 return rc;
234}
235
236static void cb_vm_close(struct vm_area_struct *vma)
237{
238 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
239 long new_mmap_size;
240
241 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
242
243 if (new_mmap_size > 0) {
244 cb->mmap_size = new_mmap_size;
245 return;
246 }
247
248 spin_lock(&cb->lock);
249 cb->mmap = false;
250 spin_unlock(&cb->lock);
251
252 hl_cb_put(cb);
253 vma->vm_private_data = NULL;
254}
255
256static const struct vm_operations_struct cb_vm_ops = {
257 .close = cb_vm_close
258};
259
260int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
261{
262 struct hl_device *hdev = hpriv->hdev;
263 struct hl_cb *cb;
264 phys_addr_t address;
265 u32 handle;
266 int rc;
267
268 handle = vma->vm_pgoff;
269
270 /* reference was taken here */
271 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
272 if (!cb) {
273 dev_err(hdev->dev,
274 "CB mmap failed, no match to handle %d\n", handle);
275 return -EINVAL;
276 }
277
278 /* Validation check */
279 if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
280 dev_err(hdev->dev,
281 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
282 vma->vm_end - vma->vm_start, cb->size);
283 rc = -EINVAL;
284 goto put_cb;
285 }
286
287 spin_lock(&cb->lock);
288
289 if (cb->mmap) {
290 dev_err(hdev->dev,
291 "CB mmap failed, CB already mmaped to user\n");
292 rc = -EINVAL;
293 goto release_lock;
294 }
295
296 cb->mmap = true;
297
298 spin_unlock(&cb->lock);
299
300 vma->vm_ops = &cb_vm_ops;
301
302 /*
303 * Note: We're transferring the cb reference to
304 * vma->vm_private_data here.
305 */
306
307 vma->vm_private_data = cb;
308
309 /* Calculate address for CB */
310 address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
311
312 rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
313 address, cb->size);
314
315 if (rc) {
316 spin_lock(&cb->lock);
317 cb->mmap = false;
318 goto release_lock;
319 }
320
321 cb->mmap_size = cb->size;
322
323 return 0;
324
325release_lock:
326 spin_unlock(&cb->lock);
327put_cb:
328 hl_cb_put(cb);
329 return rc;
330}
331
332struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
333 u32 handle)
334{
335 struct hl_cb *cb;
336
337 spin_lock(&mgr->cb_lock);
338 cb = idr_find(&mgr->cb_handles, handle);
339
340 if (!cb) {
341 spin_unlock(&mgr->cb_lock);
342 dev_warn(hdev->dev,
343 "CB get failed, no match to handle %d\n", handle);
344 return NULL;
345 }
346
347 kref_get(&cb->refcount);
348
349 spin_unlock(&mgr->cb_lock);
350
351 return cb;
352
353}
354
355void hl_cb_put(struct hl_cb *cb)
356{
357 kref_put(&cb->refcount, cb_release);
358}
359
360void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
361{
362 spin_lock_init(&mgr->cb_lock);
363 idr_init(&mgr->cb_handles);
364}
365
366void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
367{
368 struct hl_cb *cb;
369 struct idr *idp;
370 u32 id;
371
372 idp = &mgr->cb_handles;
373
374 idr_for_each_entry(idp, cb, id) {
375 if (kref_put(&cb->refcount, cb_release) != 1)
376 dev_err(hdev->dev,
377 "CB %d for CTX ID %d is still alive\n",
378 id, cb->ctx_id);
379 }
380
381 idr_destroy(&mgr->cb_handles);
382}
383
384struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
385{
386 u64 cb_handle;
387 struct hl_cb *cb;
388 int rc;
389
390 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
391 HL_KERNEL_ASID_ID);
392 if (rc) {
393 dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
394 return NULL;
395 }
396
397 cb_handle >>= PAGE_SHIFT;
398 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
399 /* hl_cb_get should never fail here so use kernel WARN */
400 WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
401 if (!cb)
402 goto destroy_cb;
403
404 return cb;
405
406destroy_cb:
407 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
408
409 return NULL;
410}
411
412int hl_cb_pool_init(struct hl_device *hdev)
413{
414 struct hl_cb *cb;
415 int i;
416
417 INIT_LIST_HEAD(&hdev->cb_pool);
418 spin_lock_init(&hdev->cb_pool_lock);
419
420 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
421 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
422 HL_KERNEL_ASID_ID);
423 if (cb) {
424 cb->is_pool = true;
425 list_add(&cb->pool_list, &hdev->cb_pool);
426 } else {
427 hl_cb_pool_fini(hdev);
428 return -ENOMEM;
429 }
430 }
431
432 return 0;
433}
434
435int hl_cb_pool_fini(struct hl_device *hdev)
436{
437 struct hl_cb *cb, *tmp;
438
439 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
440 list_del(&cb->pool_list);
441 cb_fini(hdev, cb);
442 }
443
444 return 0;
445}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
new file mode 100644
index 000000000000..3525236ed8d9
--- /dev/null
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -0,0 +1,780 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/uaccess.h>
12#include <linux/slab.h>
13
14static void job_wq_completion(struct work_struct *work);
15static long _hl_cs_wait_ioctl(struct hl_device *hdev,
16 struct hl_ctx *ctx, u64 timeout_us, u64 seq);
17static void cs_do_release(struct kref *ref);
18
19static const char *hl_fence_get_driver_name(struct dma_fence *fence)
20{
21 return "HabanaLabs";
22}
23
24static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
25{
26 struct hl_dma_fence *hl_fence =
27 container_of(fence, struct hl_dma_fence, base_fence);
28
29 return dev_name(hl_fence->hdev->dev);
30}
31
32static bool hl_fence_enable_signaling(struct dma_fence *fence)
33{
34 return true;
35}
36
37static void hl_fence_release(struct dma_fence *fence)
38{
39 struct hl_dma_fence *hl_fence =
40 container_of(fence, struct hl_dma_fence, base_fence);
41
42 kfree_rcu(hl_fence, base_fence.rcu);
43}
44
45static const struct dma_fence_ops hl_fence_ops = {
46 .get_driver_name = hl_fence_get_driver_name,
47 .get_timeline_name = hl_fence_get_timeline_name,
48 .enable_signaling = hl_fence_enable_signaling,
49 .wait = dma_fence_default_wait,
50 .release = hl_fence_release
51};
52
53static void cs_get(struct hl_cs *cs)
54{
55 kref_get(&cs->refcount);
56}
57
58static int cs_get_unless_zero(struct hl_cs *cs)
59{
60 return kref_get_unless_zero(&cs->refcount);
61}
62
63static void cs_put(struct hl_cs *cs)
64{
65 kref_put(&cs->refcount, cs_do_release);
66}
67
68/*
69 * cs_parser - parse the user command submission
70 *
71 * @hpriv : pointer to the private data of the fd
72 * @job : pointer to the job that holds the command submission info
73 *
74 * The function parses the command submission of the user. It calls the
75 * ASIC specific parser, which returns a list of memory blocks to send
76 * to the device as different command buffers
77 *
78 */
79static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
80{
81 struct hl_device *hdev = hpriv->hdev;
82 struct hl_cs_parser parser;
83 int rc;
84
85 parser.ctx_id = job->cs->ctx->asid;
86 parser.cs_sequence = job->cs->sequence;
87 parser.job_id = job->id;
88
89 parser.hw_queue_id = job->hw_queue_id;
90 parser.job_userptr_list = &job->userptr_list;
91 parser.patched_cb = NULL;
92 parser.user_cb = job->user_cb;
93 parser.user_cb_size = job->user_cb_size;
94 parser.ext_queue = job->ext_queue;
95 job->patched_cb = NULL;
96 parser.use_virt_addr = hdev->mmu_enable;
97
98 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
99 if (job->ext_queue) {
100 if (!rc) {
101 job->patched_cb = parser.patched_cb;
102 job->job_cb_size = parser.patched_cb_size;
103
104 spin_lock(&job->patched_cb->lock);
105 job->patched_cb->cs_cnt++;
106 spin_unlock(&job->patched_cb->lock);
107 }
108
109 /*
110 * Whether the parsing worked or not, we don't need the
111 * original CB anymore because it was already parsed and
112 * won't be accessed again for this CS
113 */
114 spin_lock(&job->user_cb->lock);
115 job->user_cb->cs_cnt--;
116 spin_unlock(&job->user_cb->lock);
117 hl_cb_put(job->user_cb);
118 job->user_cb = NULL;
119 }
120
121 return rc;
122}
123
124static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
125{
126 struct hl_cs *cs = job->cs;
127
128 if (job->ext_queue) {
129 hl_userptr_delete_list(hdev, &job->userptr_list);
130
131 /*
132 * We might arrive here from rollback and patched CB wasn't
133 * created, so we need to check it's not NULL
134 */
135 if (job->patched_cb) {
136 spin_lock(&job->patched_cb->lock);
137 job->patched_cb->cs_cnt--;
138 spin_unlock(&job->patched_cb->lock);
139
140 hl_cb_put(job->patched_cb);
141 }
142 }
143
144 /*
145 * This is the only place where there can be multiple threads
146 * modifying the list at the same time
147 */
148 spin_lock(&cs->job_lock);
149 list_del(&job->cs_node);
150 spin_unlock(&cs->job_lock);
151
152 hl_debugfs_remove_job(hdev, job);
153
154 if (job->ext_queue)
155 cs_put(cs);
156
157 kfree(job);
158}
159
160static void cs_do_release(struct kref *ref)
161{
162 struct hl_cs *cs = container_of(ref, struct hl_cs,
163 refcount);
164 struct hl_device *hdev = cs->ctx->hdev;
165 struct hl_cs_job *job, *tmp;
166
167 cs->completed = true;
168
169 /*
170 * Although if we reached here it means that all external jobs have
171 * finished, because each one of them took refcnt to CS, we still
172 * need to go over the internal jobs and free them. Otherwise, we
173 * will have leaked memory and what's worse, the CS object (and
174 * potentially the CTX object) could be released, while the JOB
175 * still holds a pointer to them (but no reference).
176 */
177 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
178 free_job(hdev, job);
179
180 /* We also need to update CI for internal queues */
181 if (cs->submitted) {
182 hl_int_hw_queue_update_ci(cs);
183
184 spin_lock(&hdev->hw_queues_mirror_lock);
185 /* remove CS from hw_queues mirror list */
186 list_del_init(&cs->mirror_node);
187 spin_unlock(&hdev->hw_queues_mirror_lock);
188
189 /*
190 * Don't cancel TDR in case this CS was timedout because we
191 * might be running from the TDR context
192 */
193 if ((!cs->timedout) &&
194 (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
195 struct hl_cs *next;
196
197 if (cs->tdr_active)
198 cancel_delayed_work_sync(&cs->work_tdr);
199
200 spin_lock(&hdev->hw_queues_mirror_lock);
201
202 /* queue TDR for next CS */
203 next = list_first_entry_or_null(
204 &hdev->hw_queues_mirror_list,
205 struct hl_cs, mirror_node);
206
207 if ((next) && (!next->tdr_active)) {
208 next->tdr_active = true;
209 schedule_delayed_work(&next->work_tdr,
210 hdev->timeout_jiffies);
211 }
212
213 spin_unlock(&hdev->hw_queues_mirror_lock);
214 }
215 }
216
217 /*
218 * Must be called before hl_ctx_put because inside we use ctx to get
219 * the device
220 */
221 hl_debugfs_remove_cs(cs);
222
223 hl_ctx_put(cs->ctx);
224
225 if (cs->timedout)
226 dma_fence_set_error(cs->fence, -ETIMEDOUT);
227 else if (cs->aborted)
228 dma_fence_set_error(cs->fence, -EIO);
229
230 dma_fence_signal(cs->fence);
231 dma_fence_put(cs->fence);
232
233 kfree(cs);
234}
235
236static void cs_timedout(struct work_struct *work)
237{
238 struct hl_device *hdev;
239 int ctx_asid, rc;
240 struct hl_cs *cs = container_of(work, struct hl_cs,
241 work_tdr.work);
242 rc = cs_get_unless_zero(cs);
243 if (!rc)
244 return;
245
246 if ((!cs->submitted) || (cs->completed)) {
247 cs_put(cs);
248 return;
249 }
250
251 /* Mark the CS is timed out so we won't try to cancel its TDR */
252 cs->timedout = true;
253
254 hdev = cs->ctx->hdev;
255 ctx_asid = cs->ctx->asid;
256
257 /* TODO: add information about last signaled seq and last emitted seq */
258 dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
259
260 cs_put(cs);
261
262 if (hdev->reset_on_lockup)
263 hl_device_reset(hdev, false, false);
264}
265
266static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
267 struct hl_cs **cs_new)
268{
269 struct hl_dma_fence *fence;
270 struct dma_fence *other = NULL;
271 struct hl_cs *cs;
272 int rc;
273
274 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
275 if (!cs)
276 return -ENOMEM;
277
278 cs->ctx = ctx;
279 cs->submitted = false;
280 cs->completed = false;
281 INIT_LIST_HEAD(&cs->job_list);
282 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
283 kref_init(&cs->refcount);
284 spin_lock_init(&cs->job_lock);
285
286 fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
287 if (!fence) {
288 rc = -ENOMEM;
289 goto free_cs;
290 }
291
292 fence->hdev = hdev;
293 spin_lock_init(&fence->lock);
294 cs->fence = &fence->base_fence;
295
296 spin_lock(&ctx->cs_lock);
297
298 fence->cs_seq = ctx->cs_sequence;
299 other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
300 if ((other) && (!dma_fence_is_signaled(other))) {
301 spin_unlock(&ctx->cs_lock);
302 rc = -EAGAIN;
303 goto free_fence;
304 }
305
306 dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
307 ctx->asid, ctx->cs_sequence);
308
309 cs->sequence = fence->cs_seq;
310
311 ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
312 &fence->base_fence;
313 ctx->cs_sequence++;
314
315 dma_fence_get(&fence->base_fence);
316
317 dma_fence_put(other);
318
319 spin_unlock(&ctx->cs_lock);
320
321 *cs_new = cs;
322
323 return 0;
324
325free_fence:
326 kfree(fence);
327free_cs:
328 kfree(cs);
329 return rc;
330}
331
332static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
333{
334 struct hl_cs_job *job, *tmp;
335
336 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
337 free_job(hdev, job);
338}
339
340void hl_cs_rollback_all(struct hl_device *hdev)
341{
342 struct hl_cs *cs, *tmp;
343
344 /* flush all completions */
345 flush_workqueue(hdev->cq_wq);
346
347 /* Make sure we don't have leftovers in the H/W queues mirror list */
348 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
349 mirror_node) {
350 cs_get(cs);
351 cs->aborted = true;
352 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
353 cs->ctx->asid, cs->sequence);
354 cs_rollback(hdev, cs);
355 cs_put(cs);
356 }
357}
358
359static void job_wq_completion(struct work_struct *work)
360{
361 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
362 finish_work);
363 struct hl_cs *cs = job->cs;
364 struct hl_device *hdev = cs->ctx->hdev;
365
366 /* job is no longer needed */
367 free_job(hdev, job);
368}
369
370static struct hl_cb *validate_queue_index(struct hl_device *hdev,
371 struct hl_cb_mgr *cb_mgr,
372 struct hl_cs_chunk *chunk,
373 bool *ext_queue)
374{
375 struct asic_fixed_properties *asic = &hdev->asic_prop;
376 struct hw_queue_properties *hw_queue_prop;
377 u32 cb_handle;
378 struct hl_cb *cb;
379
380 /* Assume external queue */
381 *ext_queue = true;
382
383 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
384
385 if ((chunk->queue_index >= HL_MAX_QUEUES) ||
386 (hw_queue_prop->type == QUEUE_TYPE_NA)) {
387 dev_err(hdev->dev, "Queue index %d is invalid\n",
388 chunk->queue_index);
389 return NULL;
390 }
391
392 if (hw_queue_prop->kmd_only) {
393 dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
394 chunk->queue_index);
395 return NULL;
396 } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
397 *ext_queue = false;
398 return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
399 }
400
401 /* Retrieve CB object */
402 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
403
404 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
405 if (!cb) {
406 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
407 return NULL;
408 }
409
410 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
411 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
412 goto release_cb;
413 }
414
415 spin_lock(&cb->lock);
416 cb->cs_cnt++;
417 spin_unlock(&cb->lock);
418
419 return cb;
420
421release_cb:
422 hl_cb_put(cb);
423 return NULL;
424}
425
426struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
427{
428 struct hl_cs_job *job;
429
430 job = kzalloc(sizeof(*job), GFP_ATOMIC);
431 if (!job)
432 return NULL;
433
434 job->ext_queue = ext_queue;
435
436 if (job->ext_queue) {
437 INIT_LIST_HEAD(&job->userptr_list);
438 INIT_WORK(&job->finish_work, job_wq_completion);
439 }
440
441 return job;
442}
443
444static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
445 u32 num_chunks, u64 *cs_seq)
446{
447 struct hl_device *hdev = hpriv->hdev;
448 struct hl_cs_chunk *cs_chunk_array;
449 struct hl_cs_job *job;
450 struct hl_cs *cs;
451 struct hl_cb *cb;
452 bool ext_queue_present = false;
453 u32 size_to_copy;
454 int rc, i, parse_cnt;
455
456 *cs_seq = ULLONG_MAX;
457
458 if (num_chunks > HL_MAX_JOBS_PER_CS) {
459 dev_err(hdev->dev,
460 "Number of chunks can NOT be larger than %d\n",
461 HL_MAX_JOBS_PER_CS);
462 rc = -EINVAL;
463 goto out;
464 }
465
466 cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
467 GFP_ATOMIC);
468 if (!cs_chunk_array) {
469 rc = -ENOMEM;
470 goto out;
471 }
472
473 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
474 if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
475 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
476 rc = -EFAULT;
477 goto free_cs_chunk_array;
478 }
479
480 /* increment refcnt for context */
481 hl_ctx_get(hdev, hpriv->ctx);
482
483 rc = allocate_cs(hdev, hpriv->ctx, &cs);
484 if (rc) {
485 hl_ctx_put(hpriv->ctx);
486 goto free_cs_chunk_array;
487 }
488
489 *cs_seq = cs->sequence;
490
491 hl_debugfs_add_cs(cs);
492
493 /* Validate ALL the CS chunks before submitting the CS */
494 for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
495 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
496 bool ext_queue;
497
498 cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
499 &ext_queue);
500 if (ext_queue) {
501 ext_queue_present = true;
502 if (!cb) {
503 rc = -EINVAL;
504 goto free_cs_object;
505 }
506 }
507
508 job = hl_cs_allocate_job(hdev, ext_queue);
509 if (!job) {
510 dev_err(hdev->dev, "Failed to allocate a new job\n");
511 rc = -ENOMEM;
512 if (ext_queue)
513 goto release_cb;
514 else
515 goto free_cs_object;
516 }
517
518 job->id = i + 1;
519 job->cs = cs;
520 job->user_cb = cb;
521 job->user_cb_size = chunk->cb_size;
522 if (job->ext_queue)
523 job->job_cb_size = cb->size;
524 else
525 job->job_cb_size = chunk->cb_size;
526 job->hw_queue_id = chunk->queue_index;
527
528 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
529
530 list_add_tail(&job->cs_node, &cs->job_list);
531
532 /*
533 * Increment CS reference. When CS reference is 0, CS is
534 * done and can be signaled to user and free all its resources
535 * Only increment for JOB on external queues, because only
536 * for those JOBs we get completion
537 */
538 if (job->ext_queue)
539 cs_get(cs);
540
541 hl_debugfs_add_job(hdev, job);
542
543 rc = cs_parser(hpriv, job);
544 if (rc) {
545 dev_err(hdev->dev,
546 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
547 cs->ctx->asid, cs->sequence, job->id, rc);
548 goto free_cs_object;
549 }
550 }
551
552 if (!ext_queue_present) {
553 dev_err(hdev->dev,
554 "Reject CS %d.%llu because no external queues jobs\n",
555 cs->ctx->asid, cs->sequence);
556 rc = -EINVAL;
557 goto free_cs_object;
558 }
559
560 rc = hl_hw_queue_schedule_cs(cs);
561 if (rc) {
562 dev_err(hdev->dev,
563 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
564 cs->ctx->asid, cs->sequence, rc);
565 goto free_cs_object;
566 }
567
568 rc = HL_CS_STATUS_SUCCESS;
569 goto put_cs;
570
571release_cb:
572 spin_lock(&cb->lock);
573 cb->cs_cnt--;
574 spin_unlock(&cb->lock);
575 hl_cb_put(cb);
576free_cs_object:
577 cs_rollback(hdev, cs);
578 *cs_seq = ULLONG_MAX;
579 /* The path below is both for good and erroneous exits */
580put_cs:
581 /* We finished with the CS in this function, so put the ref */
582 cs_put(cs);
583free_cs_chunk_array:
584 kfree(cs_chunk_array);
585out:
586 return rc;
587}
588
589int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
590{
591 struct hl_device *hdev = hpriv->hdev;
592 union hl_cs_args *args = data;
593 struct hl_ctx *ctx = hpriv->ctx;
594 void __user *chunks;
595 u32 num_chunks;
596 u64 cs_seq = ULONG_MAX;
597 int rc, do_restore;
598 bool need_soft_reset = false;
599
600 if (hl_device_disabled_or_in_reset(hdev)) {
601 dev_warn(hdev->dev,
602 "Device is %s. Can't submit new CS\n",
603 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
604 rc = -EBUSY;
605 goto out;
606 }
607
608 do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
609
610 if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
611 long ret;
612
613 chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
614 num_chunks = args->in.num_chunks_restore;
615
616 mutex_lock(&hpriv->restore_phase_mutex);
617
618 if (do_restore) {
619 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
620 if (rc) {
621 dev_err_ratelimited(hdev->dev,
622 "Failed to switch to context %d, rejecting CS! %d\n",
623 ctx->asid, rc);
624 /*
625 * If we timedout, or if the device is not IDLE
626 * while we want to do context-switch (-EBUSY),
627 * we need to soft-reset because QMAN is
628 * probably stuck. However, we can't call to
629 * reset here directly because of deadlock, so
630 * need to do it at the very end of this
631 * function
632 */
633 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
634 need_soft_reset = true;
635 mutex_unlock(&hpriv->restore_phase_mutex);
636 goto out;
637 }
638 }
639
640 hdev->asic_funcs->restore_phase_topology(hdev);
641
642 if (num_chunks == 0) {
643 dev_dbg(hdev->dev,
644 "Need to run restore phase but restore CS is empty\n");
645 rc = 0;
646 } else {
647 rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
648 &cs_seq);
649 }
650
651 mutex_unlock(&hpriv->restore_phase_mutex);
652
653 if (rc) {
654 dev_err(hdev->dev,
655 "Failed to submit restore CS for context %d (%d)\n",
656 ctx->asid, rc);
657 goto out;
658 }
659
660 /* Need to wait for restore completion before execution phase */
661 if (num_chunks > 0) {
662 ret = _hl_cs_wait_ioctl(hdev, ctx,
663 jiffies_to_usecs(hdev->timeout_jiffies),
664 cs_seq);
665 if (ret <= 0) {
666 dev_err(hdev->dev,
667 "Restore CS for context %d failed to complete %ld\n",
668 ctx->asid, ret);
669 rc = -ENOEXEC;
670 goto out;
671 }
672 }
673
674 ctx->thread_restore_wait_token = 1;
675 } else if (!ctx->thread_restore_wait_token) {
676 u32 tmp;
677
678 rc = hl_poll_timeout_memory(hdev,
679 (u64) (uintptr_t) &ctx->thread_restore_wait_token,
680 jiffies_to_usecs(hdev->timeout_jiffies),
681 &tmp);
682
683 if (rc || !tmp) {
684 dev_err(hdev->dev,
685 "restore phase hasn't finished in time\n");
686 rc = -ETIMEDOUT;
687 goto out;
688 }
689 }
690
691 chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
692 num_chunks = args->in.num_chunks_execute;
693
694 if (num_chunks == 0) {
695 dev_err(hdev->dev,
696 "Got execute CS with 0 chunks, context %d\n",
697 ctx->asid);
698 rc = -EINVAL;
699 goto out;
700 }
701
702 rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
703
704out:
705 if (rc != -EAGAIN) {
706 memset(args, 0, sizeof(*args));
707 args->out.status = rc;
708 args->out.seq = cs_seq;
709 }
710
711 if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
712 hl_device_reset(hdev, false, false);
713
714 return rc;
715}
716
717static long _hl_cs_wait_ioctl(struct hl_device *hdev,
718 struct hl_ctx *ctx, u64 timeout_us, u64 seq)
719{
720 struct dma_fence *fence;
721 unsigned long timeout;
722 long rc;
723
724 if (timeout_us == MAX_SCHEDULE_TIMEOUT)
725 timeout = timeout_us;
726 else
727 timeout = usecs_to_jiffies(timeout_us);
728
729 hl_ctx_get(hdev, ctx);
730
731 fence = hl_ctx_get_fence(ctx, seq);
732 if (IS_ERR(fence)) {
733 rc = PTR_ERR(fence);
734 } else if (fence) {
735 rc = dma_fence_wait_timeout(fence, true, timeout);
736 if (fence->error == -ETIMEDOUT)
737 rc = -ETIMEDOUT;
738 else if (fence->error == -EIO)
739 rc = -EIO;
740 dma_fence_put(fence);
741 } else
742 rc = 1;
743
744 hl_ctx_put(ctx);
745
746 return rc;
747}
748
749int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
750{
751 struct hl_device *hdev = hpriv->hdev;
752 union hl_wait_cs_args *args = data;
753 u64 seq = args->in.seq;
754 long rc;
755
756 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
757
758 memset(args, 0, sizeof(*args));
759
760 if (rc < 0) {
761 dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
762 rc, seq);
763 if (rc == -ERESTARTSYS) {
764 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
765 rc = -EINTR;
766 } else if (rc == -ETIMEDOUT) {
767 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
768 } else if (rc == -EIO) {
769 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
770 }
771 return rc;
772 }
773
774 if (rc == 0)
775 args->out.status = HL_WAIT_CS_STATUS_BUSY;
776 else
777 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
778
779 return 0;
780}
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
new file mode 100644
index 000000000000..619ace1c4ef7
--- /dev/null
+++ b/drivers/misc/habanalabs/context.c
@@ -0,0 +1,215 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12static void hl_ctx_fini(struct hl_ctx *ctx)
13{
14 struct hl_device *hdev = ctx->hdev;
15 int i;
16
17 /*
18 * If we arrived here, there are no jobs waiting for this context
19 * on its queues so we can safely remove it.
20 * This is because for each CS, we increment the ref count and for
21 * every CS that was finished we decrement it and we won't arrive
22 * to this function unless the ref count is 0
23 */
24
25 for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
26 dma_fence_put(ctx->cs_pending[i]);
27
28 if (ctx->asid != HL_KERNEL_ASID_ID) {
29 hl_vm_ctx_fini(ctx);
30 hl_asid_free(hdev, ctx->asid);
31 }
32}
33
34void hl_ctx_do_release(struct kref *ref)
35{
36 struct hl_ctx *ctx;
37
38 ctx = container_of(ref, struct hl_ctx, refcount);
39
40 hl_ctx_fini(ctx);
41
42 if (ctx->hpriv)
43 hl_hpriv_put(ctx->hpriv);
44
45 kfree(ctx);
46}
47
48int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
49{
50 struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
51 struct hl_ctx *ctx;
52 int rc;
53
54 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
55 if (!ctx) {
56 rc = -ENOMEM;
57 goto out_err;
58 }
59
60 rc = hl_ctx_init(hdev, ctx, false);
61 if (rc)
62 goto free_ctx;
63
64 hl_hpriv_get(hpriv);
65 ctx->hpriv = hpriv;
66
67 /* TODO: remove for multiple contexts */
68 hpriv->ctx = ctx;
69 hdev->user_ctx = ctx;
70
71 mutex_lock(&mgr->ctx_lock);
72 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
73 mutex_unlock(&mgr->ctx_lock);
74
75 if (rc < 0) {
76 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
77 hl_ctx_free(hdev, ctx);
78 goto out_err;
79 }
80
81 return 0;
82
83free_ctx:
84 kfree(ctx);
85out_err:
86 return rc;
87}
88
89void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
90{
91 if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
92 return;
93
94 dev_warn(hdev->dev,
95 "Context %d closed or terminated but its CS are executing\n",
96 ctx->asid);
97}
98
99int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
100{
101 int rc = 0;
102
103 ctx->hdev = hdev;
104
105 kref_init(&ctx->refcount);
106
107 ctx->cs_sequence = 1;
108 spin_lock_init(&ctx->cs_lock);
109 atomic_set(&ctx->thread_restore_token, 1);
110 ctx->thread_restore_wait_token = 0;
111
112 if (is_kernel_ctx) {
113 ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
114 } else {
115 ctx->asid = hl_asid_alloc(hdev);
116 if (!ctx->asid) {
117 dev_err(hdev->dev, "No free ASID, failed to create context\n");
118 return -ENOMEM;
119 }
120
121 rc = hl_vm_ctx_init(ctx);
122 if (rc) {
123 dev_err(hdev->dev, "Failed to init mem ctx module\n");
124 rc = -ENOMEM;
125 goto mem_ctx_err;
126 }
127 }
128
129 return 0;
130
131mem_ctx_err:
132 if (ctx->asid != HL_KERNEL_ASID_ID)
133 hl_asid_free(hdev, ctx->asid);
134
135 return rc;
136}
137
138void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
139{
140 kref_get(&ctx->refcount);
141}
142
143int hl_ctx_put(struct hl_ctx *ctx)
144{
145 return kref_put(&ctx->refcount, hl_ctx_do_release);
146}
147
148struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
149{
150 struct hl_device *hdev = ctx->hdev;
151 struct dma_fence *fence;
152
153 spin_lock(&ctx->cs_lock);
154
155 if (seq >= ctx->cs_sequence) {
156 dev_notice(hdev->dev,
157 "Can't wait on seq %llu because current CS is at seq %llu\n",
158 seq, ctx->cs_sequence);
159 spin_unlock(&ctx->cs_lock);
160 return ERR_PTR(-EINVAL);
161 }
162
163
164 if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
165 dev_dbg(hdev->dev,
166 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
167 seq, ctx->cs_sequence);
168 spin_unlock(&ctx->cs_lock);
169 return NULL;
170 }
171
172 fence = dma_fence_get(
173 ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
174 spin_unlock(&ctx->cs_lock);
175
176 return fence;
177}
178
179/*
180 * hl_ctx_mgr_init - initialize the context manager
181 *
182 * @mgr: pointer to context manager structure
183 *
184 * This manager is an object inside the hpriv object of the user process.
185 * The function is called when a user process opens the FD.
186 */
187void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
188{
189 mutex_init(&mgr->ctx_lock);
190 idr_init(&mgr->ctx_handles);
191}
192
193/*
194 * hl_ctx_mgr_fini - finalize the context manager
195 *
196 * @hdev: pointer to device structure
197 * @mgr: pointer to context manager structure
198 *
199 * This function goes over all the contexts in the manager and frees them.
200 * It is called when a process closes the FD.
201 */
202void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
203{
204 struct hl_ctx *ctx;
205 struct idr *idp;
206 u32 id;
207
208 idp = &mgr->ctx_handles;
209
210 idr_for_each_entry(idp, ctx, id)
211 hl_ctx_free(hdev, ctx);
212
213 idr_destroy(&mgr->ctx_handles);
214 mutex_destroy(&mgr->ctx_lock);
215}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
new file mode 100644
index 000000000000..a53c12aff6ad
--- /dev/null
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -0,0 +1,1077 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9#include "include/hw_ip/mmu/mmu_general.h"
10
11#include <linux/pci.h>
12#include <linux/debugfs.h>
13#include <linux/uaccess.h>
14
15#define MMU_ADDR_BUF_SIZE 40
16#define MMU_ASID_BUF_SIZE 10
17#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
18
19static struct dentry *hl_debug_root;
20
21static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
22 u8 i2c_reg, u32 *val)
23{
24 struct armcp_packet pkt;
25 int rc;
26
27 if (hl_device_disabled_or_in_reset(hdev))
28 return -EBUSY;
29
30 memset(&pkt, 0, sizeof(pkt));
31
32 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD <<
33 ARMCP_PKT_CTL_OPCODE_SHIFT);
34 pkt.i2c_bus = i2c_bus;
35 pkt.i2c_addr = i2c_addr;
36 pkt.i2c_reg = i2c_reg;
37
38 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
39 HL_DEVICE_TIMEOUT_USEC, (long *) val);
40
41 if (rc)
42 dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
43
44 return rc;
45}
46
47static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
48 u8 i2c_reg, u32 val)
49{
50 struct armcp_packet pkt;
51 int rc;
52
53 if (hl_device_disabled_or_in_reset(hdev))
54 return -EBUSY;
55
56 memset(&pkt, 0, sizeof(pkt));
57
58 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR <<
59 ARMCP_PKT_CTL_OPCODE_SHIFT);
60 pkt.i2c_bus = i2c_bus;
61 pkt.i2c_addr = i2c_addr;
62 pkt.i2c_reg = i2c_reg;
63 pkt.value = __cpu_to_le64(val);
64
65 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
66 HL_DEVICE_TIMEOUT_USEC, NULL);
67
68 if (rc)
69 dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
70
71 return rc;
72}
73
74static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
75{
76 struct armcp_packet pkt;
77 int rc;
78
79 if (hl_device_disabled_or_in_reset(hdev))
80 return;
81
82 memset(&pkt, 0, sizeof(pkt));
83
84 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET <<
85 ARMCP_PKT_CTL_OPCODE_SHIFT);
86 pkt.led_index = __cpu_to_le32(led);
87 pkt.value = __cpu_to_le64(state);
88
89 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
90 HL_DEVICE_TIMEOUT_USEC, NULL);
91
92 if (rc)
93 dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
94}
95
96static int command_buffers_show(struct seq_file *s, void *data)
97{
98 struct hl_debugfs_entry *entry = s->private;
99 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
100 struct hl_cb *cb;
101 bool first = true;
102
103 spin_lock(&dev_entry->cb_spinlock);
104
105 list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
106 if (first) {
107 first = false;
108 seq_puts(s, "\n");
109 seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
110 seq_puts(s, "---------------------------------------------------------------\n");
111 }
112 seq_printf(s,
113 " %03d %d 0x%08x %d %d %d\n",
114 cb->id, cb->ctx_id, cb->size,
115 kref_read(&cb->refcount),
116 cb->mmap, cb->cs_cnt);
117 }
118
119 spin_unlock(&dev_entry->cb_spinlock);
120
121 if (!first)
122 seq_puts(s, "\n");
123
124 return 0;
125}
126
127static int command_submission_show(struct seq_file *s, void *data)
128{
129 struct hl_debugfs_entry *entry = s->private;
130 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
131 struct hl_cs *cs;
132 bool first = true;
133
134 spin_lock(&dev_entry->cs_spinlock);
135
136 list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
137 if (first) {
138 first = false;
139 seq_puts(s, "\n");
140 seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
141 seq_puts(s, "------------------------------------------------------\n");
142 }
143 seq_printf(s,
144 " %llu %d %d %d %d\n",
145 cs->sequence, cs->ctx->asid,
146 kref_read(&cs->refcount),
147 cs->submitted, cs->completed);
148 }
149
150 spin_unlock(&dev_entry->cs_spinlock);
151
152 if (!first)
153 seq_puts(s, "\n");
154
155 return 0;
156}
157
158static int command_submission_jobs_show(struct seq_file *s, void *data)
159{
160 struct hl_debugfs_entry *entry = s->private;
161 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
162 struct hl_cs_job *job;
163 bool first = true;
164
165 spin_lock(&dev_entry->cs_job_spinlock);
166
167 list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
168 if (first) {
169 first = false;
170 seq_puts(s, "\n");
171 seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n");
172 seq_puts(s, "---------------------------------------\n");
173 }
174 if (job->cs)
175 seq_printf(s,
176 " %02d %llu %d %d\n",
177 job->id, job->cs->sequence, job->cs->ctx->asid,
178 job->hw_queue_id);
179 else
180 seq_printf(s,
181 " %02d 0 %d %d\n",
182 job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
183 }
184
185 spin_unlock(&dev_entry->cs_job_spinlock);
186
187 if (!first)
188 seq_puts(s, "\n");
189
190 return 0;
191}
192
193static int userptr_show(struct seq_file *s, void *data)
194{
195 struct hl_debugfs_entry *entry = s->private;
196 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
197 struct hl_userptr *userptr;
198 char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
199 "DMA_FROM_DEVICE", "DMA_NONE"};
200 bool first = true;
201
202 spin_lock(&dev_entry->userptr_spinlock);
203
204 list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
205 if (first) {
206 first = false;
207 seq_puts(s, "\n");
208 seq_puts(s, " user virtual address size dma dir\n");
209 seq_puts(s, "----------------------------------------------------------\n");
210 }
211 seq_printf(s,
212 " 0x%-14llx %-10u %-30s\n",
213 userptr->addr, userptr->size, dma_dir[userptr->dir]);
214 }
215
216 spin_unlock(&dev_entry->userptr_spinlock);
217
218 if (!first)
219 seq_puts(s, "\n");
220
221 return 0;
222}
223
224static int vm_show(struct seq_file *s, void *data)
225{
226 struct hl_debugfs_entry *entry = s->private;
227 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
228 struct hl_ctx *ctx;
229 struct hl_vm *vm;
230 struct hl_vm_hash_node *hnode;
231 struct hl_userptr *userptr;
232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
233 enum vm_type_t *vm_type;
234 bool once = true;
235 int i;
236
237 if (!dev_entry->hdev->mmu_enable)
238 return 0;
239
240 spin_lock(&dev_entry->ctx_mem_hash_spinlock);
241
242 list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
243 once = false;
244 seq_puts(s, "\n\n----------------------------------------------------");
245 seq_puts(s, "\n----------------------------------------------------\n\n");
246 seq_printf(s, "ctx asid: %u\n", ctx->asid);
247
248 seq_puts(s, "\nmappings:\n\n");
249 seq_puts(s, " virtual address size handle\n");
250 seq_puts(s, "----------------------------------------------------\n");
251 mutex_lock(&ctx->mem_hash_lock);
252 hash_for_each(ctx->mem_hash, i, hnode, node) {
253 vm_type = hnode->ptr;
254
255 if (*vm_type == VM_TYPE_USERPTR) {
256 userptr = hnode->ptr;
257 seq_printf(s,
258 " 0x%-14llx %-10u\n",
259 hnode->vaddr, userptr->size);
260 } else {
261 phys_pg_pack = hnode->ptr;
262 seq_printf(s,
263 " 0x%-14llx %-10u %-4u\n",
264 hnode->vaddr, phys_pg_pack->total_size,
265 phys_pg_pack->handle);
266 }
267 }
268 mutex_unlock(&ctx->mem_hash_lock);
269
270 vm = &ctx->hdev->vm;
271 spin_lock(&vm->idr_lock);
272
273 if (!idr_is_empty(&vm->phys_pg_pack_handles))
274 seq_puts(s, "\n\nallocations:\n");
275
276 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
277 if (phys_pg_pack->asid != ctx->asid)
278 continue;
279
280 seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
281 seq_printf(s, "page size: %u\n\n",
282 phys_pg_pack->page_size);
283 seq_puts(s, " physical address\n");
284 seq_puts(s, "---------------------\n");
285 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
286 seq_printf(s, " 0x%-14llx\n",
287 phys_pg_pack->pages[i]);
288 }
289 }
290 spin_unlock(&vm->idr_lock);
291
292 }
293
294 spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
295
296 if (!once)
297 seq_puts(s, "\n");
298
299 return 0;
300}
301
302/* these inline functions are copied from mmu.c */
303static inline u64 get_hop0_addr(struct hl_ctx *ctx)
304{
305 return ctx->hdev->asic_prop.mmu_pgt_addr +
306 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
307}
308
309static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
310 u64 virt_addr)
311{
312 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
313 ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
314}
315
316static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
317 u64 virt_addr)
318{
319 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
320 ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
321}
322
323static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
324 u64 virt_addr)
325{
326 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
327 ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
328}
329
330static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
331 u64 virt_addr)
332{
333 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
334 ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
335}
336
337static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
338 u64 virt_addr)
339{
340 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
341 ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
342}
343
344static inline u64 get_next_hop_addr(u64 curr_pte)
345{
346 if (curr_pte & PAGE_PRESENT_MASK)
347 return curr_pte & PHYS_ADDR_MASK;
348 else
349 return ULLONG_MAX;
350}
351
352static int mmu_show(struct seq_file *s, void *data)
353{
354 struct hl_debugfs_entry *entry = s->private;
355 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
356 struct hl_device *hdev = dev_entry->hdev;
357 struct hl_ctx *ctx = hdev->user_ctx;
358
359 u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
360 hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
361 hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
362 hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
363 hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
364 virt_addr = dev_entry->mmu_addr;
365
366 if (!hdev->mmu_enable)
367 return 0;
368
369 if (!ctx) {
370 dev_err(hdev->dev, "no ctx available\n");
371 return 0;
372 }
373
374 mutex_lock(&ctx->mmu_lock);
375
376 /* the following lookup is copied from unmap() in mmu.c */
377
378 hop0_addr = get_hop0_addr(ctx);
379 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
380 hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
381 hop1_addr = get_next_hop_addr(hop0_pte);
382
383 if (hop1_addr == ULLONG_MAX)
384 goto not_mapped;
385
386 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
387 hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
388 hop2_addr = get_next_hop_addr(hop1_pte);
389
390 if (hop2_addr == ULLONG_MAX)
391 goto not_mapped;
392
393 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
394 hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
395 hop3_addr = get_next_hop_addr(hop2_pte);
396
397 if (hop3_addr == ULLONG_MAX)
398 goto not_mapped;
399
400 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
401 hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
402
403 if (!(hop3_pte & LAST_MASK)) {
404 hop4_addr = get_next_hop_addr(hop3_pte);
405
406 if (hop4_addr == ULLONG_MAX)
407 goto not_mapped;
408
409 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
410 hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
411 if (!(hop4_pte & PAGE_PRESENT_MASK))
412 goto not_mapped;
413 } else {
414 if (!(hop3_pte & PAGE_PRESENT_MASK))
415 goto not_mapped;
416 }
417
418 seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
419 dev_entry->mmu_asid, dev_entry->mmu_addr);
420
421 seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
422 seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
423 seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
424
425 seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
426 seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
427 seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
428
429 seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
430 seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
431 seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
432
433 seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
434 seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
435 seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
436
437 if (!(hop3_pte & LAST_MASK)) {
438 seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
439 seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
440 seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
441 }
442
443 goto out;
444
445not_mapped:
446 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
447 virt_addr);
448out:
449 mutex_unlock(&ctx->mmu_lock);
450
451 return 0;
452}
453
454static ssize_t mmu_write(struct file *file, const char __user *buf,
455 size_t count, loff_t *f_pos)
456{
457 struct seq_file *s = file->private_data;
458 struct hl_debugfs_entry *entry = s->private;
459 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
460 struct hl_device *hdev = dev_entry->hdev;
461 char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE],
462 addr_kbuf[MMU_ADDR_BUF_SIZE];
463 char *c;
464 ssize_t rc;
465
466 if (!hdev->mmu_enable)
467 return count;
468
469 memset(kbuf, 0, sizeof(kbuf));
470 memset(asid_kbuf, 0, sizeof(asid_kbuf));
471 memset(addr_kbuf, 0, sizeof(addr_kbuf));
472
473 if (copy_from_user(kbuf, buf, count))
474 goto err;
475
476 kbuf[MMU_KBUF_SIZE - 1] = 0;
477
478 c = strchr(kbuf, ' ');
479 if (!c)
480 goto err;
481
482 memcpy(asid_kbuf, kbuf, c - kbuf);
483
484 rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
485 if (rc)
486 goto err;
487
488 c = strstr(kbuf, " 0x");
489 if (!c)
490 goto err;
491
492 c += 3;
493 memcpy(addr_kbuf, c, (kbuf + count) - c);
494
495 rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
496 if (rc)
497 goto err;
498
499 return count;
500
501err:
502 dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
503
504 return -EINVAL;
505}
506
507static ssize_t hl_data_read32(struct file *f, char __user *buf,
508 size_t count, loff_t *ppos)
509{
510 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
511 struct hl_device *hdev = entry->hdev;
512 char tmp_buf[32];
513 u32 val;
514 ssize_t rc;
515
516 if (*ppos)
517 return 0;
518
519 rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val);
520 if (rc) {
521 dev_err(hdev->dev, "Failed to read from 0x%010llx\n",
522 entry->addr);
523 return rc;
524 }
525
526 sprintf(tmp_buf, "0x%08x\n", val);
527 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
528 strlen(tmp_buf) + 1);
529
530 return rc;
531}
532
533static ssize_t hl_data_write32(struct file *f, const char __user *buf,
534 size_t count, loff_t *ppos)
535{
536 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
537 struct hl_device *hdev = entry->hdev;
538 u32 value;
539 ssize_t rc;
540
541 rc = kstrtouint_from_user(buf, count, 16, &value);
542 if (rc)
543 return rc;
544
545 rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value);
546 if (rc) {
547 dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
548 value, entry->addr);
549 return rc;
550 }
551
552 return count;
553}
554
555static ssize_t hl_get_power_state(struct file *f, char __user *buf,
556 size_t count, loff_t *ppos)
557{
558 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
559 struct hl_device *hdev = entry->hdev;
560 char tmp_buf[200];
561 ssize_t rc;
562 int i;
563
564 if (*ppos)
565 return 0;
566
567 if (hdev->pdev->current_state == PCI_D0)
568 i = 1;
569 else if (hdev->pdev->current_state == PCI_D3hot)
570 i = 2;
571 else
572 i = 3;
573
574 sprintf(tmp_buf,
575 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
576 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
577 strlen(tmp_buf) + 1);
578
579 return rc;
580}
581
582static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
583 size_t count, loff_t *ppos)
584{
585 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
586 struct hl_device *hdev = entry->hdev;
587 u32 value;
588 ssize_t rc;
589
590 rc = kstrtouint_from_user(buf, count, 10, &value);
591 if (rc)
592 return rc;
593
594 if (value == 1) {
595 pci_set_power_state(hdev->pdev, PCI_D0);
596 pci_restore_state(hdev->pdev);
597 rc = pci_enable_device(hdev->pdev);
598 } else if (value == 2) {
599 pci_save_state(hdev->pdev);
600 pci_disable_device(hdev->pdev);
601 pci_set_power_state(hdev->pdev, PCI_D3hot);
602 } else {
603 dev_dbg(hdev->dev, "invalid power state value %u\n", value);
604 return -EINVAL;
605 }
606
607 return count;
608}
609
610static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
611 size_t count, loff_t *ppos)
612{
613 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
614 struct hl_device *hdev = entry->hdev;
615 char tmp_buf[32];
616 u32 val;
617 ssize_t rc;
618
619 if (*ppos)
620 return 0;
621
622 rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
623 entry->i2c_reg, &val);
624 if (rc) {
625 dev_err(hdev->dev,
626 "Failed to read from I2C bus %d, addr %d, reg %d\n",
627 entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
628 return rc;
629 }
630
631 sprintf(tmp_buf, "0x%02x\n", val);
632 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
633 strlen(tmp_buf) + 1);
634
635 return rc;
636}
637
638static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
639 size_t count, loff_t *ppos)
640{
641 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
642 struct hl_device *hdev = entry->hdev;
643 u32 value;
644 ssize_t rc;
645
646 rc = kstrtouint_from_user(buf, count, 16, &value);
647 if (rc)
648 return rc;
649
650 rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
651 entry->i2c_reg, value);
652 if (rc) {
653 dev_err(hdev->dev,
654 "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
655 value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
656 return rc;
657 }
658
659 return count;
660}
661
662static ssize_t hl_led0_write(struct file *f, const char __user *buf,
663 size_t count, loff_t *ppos)
664{
665 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
666 struct hl_device *hdev = entry->hdev;
667 u32 value;
668 ssize_t rc;
669
670 rc = kstrtouint_from_user(buf, count, 10, &value);
671 if (rc)
672 return rc;
673
674 value = value ? 1 : 0;
675
676 hl_debugfs_led_set(hdev, 0, value);
677
678 return count;
679}
680
681static ssize_t hl_led1_write(struct file *f, const char __user *buf,
682 size_t count, loff_t *ppos)
683{
684 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
685 struct hl_device *hdev = entry->hdev;
686 u32 value;
687 ssize_t rc;
688
689 rc = kstrtouint_from_user(buf, count, 10, &value);
690 if (rc)
691 return rc;
692
693 value = value ? 1 : 0;
694
695 hl_debugfs_led_set(hdev, 1, value);
696
697 return count;
698}
699
700static ssize_t hl_led2_write(struct file *f, const char __user *buf,
701 size_t count, loff_t *ppos)
702{
703 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
704 struct hl_device *hdev = entry->hdev;
705 u32 value;
706 ssize_t rc;
707
708 rc = kstrtouint_from_user(buf, count, 10, &value);
709 if (rc)
710 return rc;
711
712 value = value ? 1 : 0;
713
714 hl_debugfs_led_set(hdev, 2, value);
715
716 return count;
717}
718
719static ssize_t hl_device_read(struct file *f, char __user *buf,
720 size_t count, loff_t *ppos)
721{
722 char tmp_buf[200];
723 ssize_t rc;
724
725 if (*ppos)
726 return 0;
727
728 sprintf(tmp_buf,
729 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
730 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
731 strlen(tmp_buf) + 1);
732
733 return rc;
734}
735
736static ssize_t hl_device_write(struct file *f, const char __user *buf,
737 size_t count, loff_t *ppos)
738{
739 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
740 struct hl_device *hdev = entry->hdev;
741 char data[30];
742
743 /* don't allow partial writes */
744 if (*ppos != 0)
745 return 0;
746
747 simple_write_to_buffer(data, 29, ppos, buf, count);
748
749 if (strncmp("disable", data, strlen("disable")) == 0) {
750 hdev->disabled = true;
751 } else if (strncmp("enable", data, strlen("enable")) == 0) {
752 hdev->disabled = false;
753 } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
754 hdev->asic_funcs->suspend(hdev);
755 } else if (strncmp("resume", data, strlen("resume")) == 0) {
756 hdev->asic_funcs->resume(hdev);
757 } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
758 hdev->device_cpu_disabled = true;
759 } else {
760 dev_err(hdev->dev,
761 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
762 count = -EINVAL;
763 }
764
765 return count;
766}
767
768static const struct file_operations hl_data32b_fops = {
769 .owner = THIS_MODULE,
770 .read = hl_data_read32,
771 .write = hl_data_write32
772};
773
774static const struct file_operations hl_i2c_data_fops = {
775 .owner = THIS_MODULE,
776 .read = hl_i2c_data_read,
777 .write = hl_i2c_data_write
778};
779
780static const struct file_operations hl_power_fops = {
781 .owner = THIS_MODULE,
782 .read = hl_get_power_state,
783 .write = hl_set_power_state
784};
785
786static const struct file_operations hl_led0_fops = {
787 .owner = THIS_MODULE,
788 .write = hl_led0_write
789};
790
791static const struct file_operations hl_led1_fops = {
792 .owner = THIS_MODULE,
793 .write = hl_led1_write
794};
795
796static const struct file_operations hl_led2_fops = {
797 .owner = THIS_MODULE,
798 .write = hl_led2_write
799};
800
801static const struct file_operations hl_device_fops = {
802 .owner = THIS_MODULE,
803 .read = hl_device_read,
804 .write = hl_device_write
805};
806
807static const struct hl_info_list hl_debugfs_list[] = {
808 {"command_buffers", command_buffers_show, NULL},
809 {"command_submission", command_submission_show, NULL},
810 {"command_submission_jobs", command_submission_jobs_show, NULL},
811 {"userptr", userptr_show, NULL},
812 {"vm", vm_show, NULL},
813 {"mmu", mmu_show, mmu_write},
814};
815
816static int hl_debugfs_open(struct inode *inode, struct file *file)
817{
818 struct hl_debugfs_entry *node = inode->i_private;
819
820 return single_open(file, node->info_ent->show, node);
821}
822
823static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
824 size_t count, loff_t *f_pos)
825{
826 struct hl_debugfs_entry *node = file->f_inode->i_private;
827
828 if (node->info_ent->write)
829 return node->info_ent->write(file, buf, count, f_pos);
830 else
831 return -EINVAL;
832
833}
834
835static const struct file_operations hl_debugfs_fops = {
836 .owner = THIS_MODULE,
837 .open = hl_debugfs_open,
838 .read = seq_read,
839 .write = hl_debugfs_write,
840 .llseek = seq_lseek,
841 .release = single_release,
842};
843
844void hl_debugfs_add_device(struct hl_device *hdev)
845{
846 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
847 int count = ARRAY_SIZE(hl_debugfs_list);
848 struct hl_debugfs_entry *entry;
849 struct dentry *ent;
850 int i;
851
852 dev_entry->hdev = hdev;
853 dev_entry->entry_arr = kmalloc_array(count,
854 sizeof(struct hl_debugfs_entry),
855 GFP_KERNEL);
856 if (!dev_entry->entry_arr)
857 return;
858
859 INIT_LIST_HEAD(&dev_entry->file_list);
860 INIT_LIST_HEAD(&dev_entry->cb_list);
861 INIT_LIST_HEAD(&dev_entry->cs_list);
862 INIT_LIST_HEAD(&dev_entry->cs_job_list);
863 INIT_LIST_HEAD(&dev_entry->userptr_list);
864 INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
865 mutex_init(&dev_entry->file_mutex);
866 spin_lock_init(&dev_entry->cb_spinlock);
867 spin_lock_init(&dev_entry->cs_spinlock);
868 spin_lock_init(&dev_entry->cs_job_spinlock);
869 spin_lock_init(&dev_entry->userptr_spinlock);
870 spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
871
872 dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
873 hl_debug_root);
874
875 debugfs_create_x64("addr",
876 0644,
877 dev_entry->root,
878 &dev_entry->addr);
879
880 debugfs_create_file("data32",
881 0644,
882 dev_entry->root,
883 dev_entry,
884 &hl_data32b_fops);
885
886 debugfs_create_file("set_power_state",
887 0200,
888 dev_entry->root,
889 dev_entry,
890 &hl_power_fops);
891
892 debugfs_create_u8("i2c_bus",
893 0644,
894 dev_entry->root,
895 &dev_entry->i2c_bus);
896
897 debugfs_create_u8("i2c_addr",
898 0644,
899 dev_entry->root,
900 &dev_entry->i2c_addr);
901
902 debugfs_create_u8("i2c_reg",
903 0644,
904 dev_entry->root,
905 &dev_entry->i2c_reg);
906
907 debugfs_create_file("i2c_data",
908 0644,
909 dev_entry->root,
910 dev_entry,
911 &hl_i2c_data_fops);
912
913 debugfs_create_file("led0",
914 0200,
915 dev_entry->root,
916 dev_entry,
917 &hl_led0_fops);
918
919 debugfs_create_file("led1",
920 0200,
921 dev_entry->root,
922 dev_entry,
923 &hl_led1_fops);
924
925 debugfs_create_file("led2",
926 0200,
927 dev_entry->root,
928 dev_entry,
929 &hl_led2_fops);
930
931 debugfs_create_file("device",
932 0200,
933 dev_entry->root,
934 dev_entry,
935 &hl_device_fops);
936
937 for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
938
939 ent = debugfs_create_file(hl_debugfs_list[i].name,
940 0444,
941 dev_entry->root,
942 entry,
943 &hl_debugfs_fops);
944 entry->dent = ent;
945 entry->info_ent = &hl_debugfs_list[i];
946 entry->dev_entry = dev_entry;
947 }
948}
949
950void hl_debugfs_remove_device(struct hl_device *hdev)
951{
952 struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
953
954 debugfs_remove_recursive(entry->root);
955
956 mutex_destroy(&entry->file_mutex);
957 kfree(entry->entry_arr);
958}
959
960void hl_debugfs_add_file(struct hl_fpriv *hpriv)
961{
962 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
963
964 mutex_lock(&dev_entry->file_mutex);
965 list_add(&hpriv->debugfs_list, &dev_entry->file_list);
966 mutex_unlock(&dev_entry->file_mutex);
967}
968
969void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
970{
971 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
972
973 mutex_lock(&dev_entry->file_mutex);
974 list_del(&hpriv->debugfs_list);
975 mutex_unlock(&dev_entry->file_mutex);
976}
977
978void hl_debugfs_add_cb(struct hl_cb *cb)
979{
980 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
981
982 spin_lock(&dev_entry->cb_spinlock);
983 list_add(&cb->debugfs_list, &dev_entry->cb_list);
984 spin_unlock(&dev_entry->cb_spinlock);
985}
986
987void hl_debugfs_remove_cb(struct hl_cb *cb)
988{
989 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
990
991 spin_lock(&dev_entry->cb_spinlock);
992 list_del(&cb->debugfs_list);
993 spin_unlock(&dev_entry->cb_spinlock);
994}
995
996void hl_debugfs_add_cs(struct hl_cs *cs)
997{
998 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
999
1000 spin_lock(&dev_entry->cs_spinlock);
1001 list_add(&cs->debugfs_list, &dev_entry->cs_list);
1002 spin_unlock(&dev_entry->cs_spinlock);
1003}
1004
1005void hl_debugfs_remove_cs(struct hl_cs *cs)
1006{
1007 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1008
1009 spin_lock(&dev_entry->cs_spinlock);
1010 list_del(&cs->debugfs_list);
1011 spin_unlock(&dev_entry->cs_spinlock);
1012}
1013
1014void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1015{
1016 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1017
1018 spin_lock(&dev_entry->cs_job_spinlock);
1019 list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1020 spin_unlock(&dev_entry->cs_job_spinlock);
1021}
1022
1023void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1024{
1025 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1026
1027 spin_lock(&dev_entry->cs_job_spinlock);
1028 list_del(&job->debugfs_list);
1029 spin_unlock(&dev_entry->cs_job_spinlock);
1030}
1031
1032void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1033{
1034 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1035
1036 spin_lock(&dev_entry->userptr_spinlock);
1037 list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1038 spin_unlock(&dev_entry->userptr_spinlock);
1039}
1040
1041void hl_debugfs_remove_userptr(struct hl_device *hdev,
1042 struct hl_userptr *userptr)
1043{
1044 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1045
1046 spin_lock(&dev_entry->userptr_spinlock);
1047 list_del(&userptr->debugfs_list);
1048 spin_unlock(&dev_entry->userptr_spinlock);
1049}
1050
1051void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1052{
1053 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1054
1055 spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1056 list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1057 spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1058}
1059
1060void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1061{
1062 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1063
1064 spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1065 list_del(&ctx->debugfs_list);
1066 spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1067}
1068
1069void __init hl_debugfs_init(void)
1070{
1071 hl_debug_root = debugfs_create_dir("habanalabs", NULL);
1072}
1073
1074void hl_debugfs_fini(void)
1075{
1076 debugfs_remove_recursive(hl_debug_root);
1077}
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
new file mode 100644
index 000000000000..de46aa6ed154
--- /dev/null
+++ b/drivers/misc/habanalabs/device.c
@@ -0,0 +1,1140 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/pci.h>
11#include <linux/sched/signal.h>
12#include <linux/hwmon.h>
13
14bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
15{
16 if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
17 return true;
18 else
19 return false;
20}
21
22static void hpriv_release(struct kref *ref)
23{
24 struct hl_fpriv *hpriv;
25 struct hl_device *hdev;
26
27 hpriv = container_of(ref, struct hl_fpriv, refcount);
28
29 hdev = hpriv->hdev;
30
31 put_pid(hpriv->taskpid);
32
33 hl_debugfs_remove_file(hpriv);
34
35 mutex_destroy(&hpriv->restore_phase_mutex);
36
37 kfree(hpriv);
38
39 /* Now the FD is really closed */
40 atomic_dec(&hdev->fd_open_cnt);
41
42 /* This allows a new user context to open the device */
43 hdev->user_ctx = NULL;
44}
45
46void hl_hpriv_get(struct hl_fpriv *hpriv)
47{
48 kref_get(&hpriv->refcount);
49}
50
51void hl_hpriv_put(struct hl_fpriv *hpriv)
52{
53 kref_put(&hpriv->refcount, hpriv_release);
54}
55
56/*
57 * hl_device_release - release function for habanalabs device
58 *
59 * @inode: pointer to inode structure
60 * @filp: pointer to file structure
61 *
62 * Called when process closes an habanalabs device
63 */
64static int hl_device_release(struct inode *inode, struct file *filp)
65{
66 struct hl_fpriv *hpriv = filp->private_data;
67
68 hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
69 hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
70
71 filp->private_data = NULL;
72
73 hl_hpriv_put(hpriv);
74
75 return 0;
76}
77
78/*
79 * hl_mmap - mmap function for habanalabs device
80 *
81 * @*filp: pointer to file structure
82 * @*vma: pointer to vm_area_struct of the process
83 *
84 * Called when process does an mmap on habanalabs device. Call the device's mmap
85 * function at the end of the common code.
86 */
87static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
88{
89 struct hl_fpriv *hpriv = filp->private_data;
90
91 if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) {
92 vma->vm_pgoff ^= HL_MMAP_CB_MASK;
93 return hl_cb_mmap(hpriv, vma);
94 }
95
96 return -EINVAL;
97}
98
99static const struct file_operations hl_ops = {
100 .owner = THIS_MODULE,
101 .open = hl_device_open,
102 .release = hl_device_release,
103 .mmap = hl_mmap,
104 .unlocked_ioctl = hl_ioctl,
105 .compat_ioctl = hl_ioctl
106};
107
108/*
109 * device_setup_cdev - setup cdev and device for habanalabs device
110 *
111 * @hdev: pointer to habanalabs device structure
112 * @hclass: pointer to the class object of the device
113 * @minor: minor number of the specific device
114 * @fpos : file operations to install for this device
115 *
116 * Create a cdev and a Linux device for habanalabs's device. Need to be
117 * called at the end of the habanalabs device initialization process,
118 * because this function exposes the device to the user
119 */
120static int device_setup_cdev(struct hl_device *hdev, struct class *hclass,
121 int minor, const struct file_operations *fops)
122{
123 int err, devno = MKDEV(hdev->major, minor);
124 struct cdev *hdev_cdev = &hdev->cdev;
125 char *name;
126
127 name = kasprintf(GFP_KERNEL, "hl%d", hdev->id);
128 if (!name)
129 return -ENOMEM;
130
131 cdev_init(hdev_cdev, fops);
132 hdev_cdev->owner = THIS_MODULE;
133 err = cdev_add(hdev_cdev, devno, 1);
134 if (err) {
135 pr_err("Failed to add char device %s\n", name);
136 goto err_cdev_add;
137 }
138
139 hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name);
140 if (IS_ERR(hdev->dev)) {
141 pr_err("Failed to create device %s\n", name);
142 err = PTR_ERR(hdev->dev);
143 goto err_device_create;
144 }
145
146 dev_set_drvdata(hdev->dev, hdev);
147
148 kfree(name);
149
150 return 0;
151
152err_device_create:
153 cdev_del(hdev_cdev);
154err_cdev_add:
155 kfree(name);
156 return err;
157}
158
159/*
160 * device_early_init - do some early initialization for the habanalabs device
161 *
162 * @hdev: pointer to habanalabs device structure
163 *
164 * Install the relevant function pointers and call the early_init function,
165 * if such a function exists
166 */
167static int device_early_init(struct hl_device *hdev)
168{
169 int rc;
170
171 switch (hdev->asic_type) {
172 case ASIC_GOYA:
173 goya_set_asic_funcs(hdev);
174 strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
175 break;
176 default:
177 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
178 hdev->asic_type);
179 return -EINVAL;
180 }
181
182 rc = hdev->asic_funcs->early_init(hdev);
183 if (rc)
184 return rc;
185
186 rc = hl_asid_init(hdev);
187 if (rc)
188 goto early_fini;
189
190 hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
191 if (hdev->cq_wq == NULL) {
192 dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
193 rc = -ENOMEM;
194 goto asid_fini;
195 }
196
197 hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
198 if (hdev->eq_wq == NULL) {
199 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
200 rc = -ENOMEM;
201 goto free_cq_wq;
202 }
203
204 hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
205 GFP_KERNEL);
206 if (!hdev->hl_chip_info) {
207 rc = -ENOMEM;
208 goto free_eq_wq;
209 }
210
211 hl_cb_mgr_init(&hdev->kernel_cb_mgr);
212
213 mutex_init(&hdev->fd_open_cnt_lock);
214 mutex_init(&hdev->send_cpu_message_lock);
215 INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
216 spin_lock_init(&hdev->hw_queues_mirror_lock);
217 atomic_set(&hdev->in_reset, 0);
218 atomic_set(&hdev->fd_open_cnt, 0);
219
220 return 0;
221
222free_eq_wq:
223 destroy_workqueue(hdev->eq_wq);
224free_cq_wq:
225 destroy_workqueue(hdev->cq_wq);
226asid_fini:
227 hl_asid_fini(hdev);
228early_fini:
229 if (hdev->asic_funcs->early_fini)
230 hdev->asic_funcs->early_fini(hdev);
231
232 return rc;
233}
234
235/*
236 * device_early_fini - finalize all that was done in device_early_init
237 *
238 * @hdev: pointer to habanalabs device structure
239 *
240 */
241static void device_early_fini(struct hl_device *hdev)
242{
243 mutex_destroy(&hdev->send_cpu_message_lock);
244
245 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
246
247 kfree(hdev->hl_chip_info);
248
249 destroy_workqueue(hdev->eq_wq);
250 destroy_workqueue(hdev->cq_wq);
251
252 hl_asid_fini(hdev);
253
254 if (hdev->asic_funcs->early_fini)
255 hdev->asic_funcs->early_fini(hdev);
256
257 mutex_destroy(&hdev->fd_open_cnt_lock);
258}
259
260static void set_freq_to_low_job(struct work_struct *work)
261{
262 struct hl_device *hdev = container_of(work, struct hl_device,
263 work_freq.work);
264
265 if (atomic_read(&hdev->fd_open_cnt) == 0)
266 hl_device_set_frequency(hdev, PLL_LOW);
267
268 schedule_delayed_work(&hdev->work_freq,
269 usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
270}
271
272static void hl_device_heartbeat(struct work_struct *work)
273{
274 struct hl_device *hdev = container_of(work, struct hl_device,
275 work_heartbeat.work);
276
277 if (hl_device_disabled_or_in_reset(hdev))
278 goto reschedule;
279
280 if (!hdev->asic_funcs->send_heartbeat(hdev))
281 goto reschedule;
282
283 dev_err(hdev->dev, "Device heartbeat failed!\n");
284 hl_device_reset(hdev, true, false);
285
286 return;
287
288reschedule:
289 schedule_delayed_work(&hdev->work_heartbeat,
290 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
291}
292
293/*
294 * device_late_init - do late stuff initialization for the habanalabs device
295 *
296 * @hdev: pointer to habanalabs device structure
297 *
298 * Do stuff that either needs the device H/W queues to be active or needs
299 * to happen after all the rest of the initialization is finished
300 */
301static int device_late_init(struct hl_device *hdev)
302{
303 int rc;
304
305 INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
306 hdev->high_pll = hdev->asic_prop.high_pll;
307
308 /* force setting to low frequency */
309 atomic_set(&hdev->curr_pll_profile, PLL_LOW);
310
311 if (hdev->pm_mng_profile == PM_AUTO)
312 hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
313 else
314 hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
315
316 if (hdev->asic_funcs->late_init) {
317 rc = hdev->asic_funcs->late_init(hdev);
318 if (rc) {
319 dev_err(hdev->dev,
320 "failed late initialization for the H/W\n");
321 return rc;
322 }
323 }
324
325 schedule_delayed_work(&hdev->work_freq,
326 usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
327
328 if (hdev->heartbeat) {
329 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
330 schedule_delayed_work(&hdev->work_heartbeat,
331 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
332 }
333
334 hdev->late_init_done = true;
335
336 return 0;
337}
338
339/*
340 * device_late_fini - finalize all that was done in device_late_init
341 *
342 * @hdev: pointer to habanalabs device structure
343 *
344 */
345static void device_late_fini(struct hl_device *hdev)
346{
347 if (!hdev->late_init_done)
348 return;
349
350 cancel_delayed_work_sync(&hdev->work_freq);
351 if (hdev->heartbeat)
352 cancel_delayed_work_sync(&hdev->work_heartbeat);
353
354 if (hdev->asic_funcs->late_fini)
355 hdev->asic_funcs->late_fini(hdev);
356
357 hdev->late_init_done = false;
358}
359
360/*
361 * hl_device_set_frequency - set the frequency of the device
362 *
363 * @hdev: pointer to habanalabs device structure
364 * @freq: the new frequency value
365 *
366 * Change the frequency if needed.
367 * We allose to set PLL to low only if there is no user process
368 * Returns 0 if no change was done, otherwise returns 1;
369 */
370int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
371{
372 enum hl_pll_frequency old_freq =
373 (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH;
374 int ret;
375
376 if (hdev->pm_mng_profile == PM_MANUAL)
377 return 0;
378
379 ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
380 if (ret == freq)
381 return 0;
382
383 /*
384 * in case we want to lower frequency, check if device is not
385 * opened. We must have a check here to workaround race condition with
386 * hl_device_open
387 */
388 if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
389 atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
390 return 0;
391 }
392
393 dev_dbg(hdev->dev, "Changing device frequency to %s\n",
394 freq == PLL_HIGH ? "high" : "low");
395
396 hdev->asic_funcs->set_pll_profile(hdev, freq);
397
398 return 1;
399}
400
401/*
402 * hl_device_suspend - initiate device suspend
403 *
404 * @hdev: pointer to habanalabs device structure
405 *
406 * Puts the hw in the suspend state (all asics).
407 * Returns 0 for success or an error on failure.
408 * Called at driver suspend.
409 */
410int hl_device_suspend(struct hl_device *hdev)
411{
412 int rc;
413
414 pci_save_state(hdev->pdev);
415
416 rc = hdev->asic_funcs->suspend(hdev);
417 if (rc)
418 dev_err(hdev->dev,
419 "Failed to disable PCI access of device CPU\n");
420
421 /* Shut down the device */
422 pci_disable_device(hdev->pdev);
423 pci_set_power_state(hdev->pdev, PCI_D3hot);
424
425 return 0;
426}
427
428/*
429 * hl_device_resume - initiate device resume
430 *
431 * @hdev: pointer to habanalabs device structure
432 *
433 * Bring the hw back to operating state (all asics).
434 * Returns 0 for success or an error on failure.
435 * Called at driver resume.
436 */
437int hl_device_resume(struct hl_device *hdev)
438{
439 int rc;
440
441 pci_set_power_state(hdev->pdev, PCI_D0);
442 pci_restore_state(hdev->pdev);
443 rc = pci_enable_device(hdev->pdev);
444 if (rc) {
445 dev_err(hdev->dev,
446 "Failed to enable PCI device in resume\n");
447 return rc;
448 }
449
450 rc = hdev->asic_funcs->resume(hdev);
451 if (rc) {
452 dev_err(hdev->dev,
453 "Failed to enable PCI access from device CPU\n");
454 return rc;
455 }
456
457 return 0;
458}
459
460static void hl_device_hard_reset_pending(struct work_struct *work)
461{
462 struct hl_device_reset_work *device_reset_work =
463 container_of(work, struct hl_device_reset_work, reset_work);
464 struct hl_device *hdev = device_reset_work->hdev;
465 u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
466 struct task_struct *task = NULL;
467
468 /* Flush all processes that are inside hl_open */
469 mutex_lock(&hdev->fd_open_cnt_lock);
470
471 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
472
473 pending_cnt--;
474
475 dev_info(hdev->dev,
476 "Can't HARD reset, waiting for user to close FD\n");
477 ssleep(1);
478 }
479
480 if (atomic_read(&hdev->fd_open_cnt)) {
481 task = get_pid_task(hdev->user_ctx->hpriv->taskpid,
482 PIDTYPE_PID);
483 if (task) {
484 dev_info(hdev->dev, "Killing user processes\n");
485 send_sig(SIGKILL, task, 1);
486 msleep(100);
487
488 put_task_struct(task);
489 }
490 }
491
492 mutex_unlock(&hdev->fd_open_cnt_lock);
493
494 hl_device_reset(hdev, true, true);
495
496 kfree(device_reset_work);
497}
498
499/*
500 * hl_device_reset - reset the device
501 *
502 * @hdev: pointer to habanalabs device structure
503 * @hard_reset: should we do hard reset to all engines or just reset the
504 * compute/dma engines
505 *
506 * Block future CS and wait for pending CS to be enqueued
507 * Call ASIC H/W fini
508 * Flush all completions
509 * Re-initialize all internal data structures
510 * Call ASIC H/W init, late_init
511 * Test queues
512 * Enable device
513 *
514 * Returns 0 for success or an error on failure.
515 */
516int hl_device_reset(struct hl_device *hdev, bool hard_reset,
517 bool from_hard_reset_thread)
518{
519 int i, rc;
520
521 if (!hdev->init_done) {
522 dev_err(hdev->dev,
523 "Can't reset before initialization is done\n");
524 return 0;
525 }
526
527 /*
528 * Prevent concurrency in this function - only one reset should be
529 * done at any given time. Only need to perform this if we didn't
530 * get from the dedicated hard reset thread
531 */
532 if (!from_hard_reset_thread) {
533 /* Block future CS/VM/JOB completion operations */
534 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
535 if (rc)
536 return 0;
537
538 /* This also blocks future CS/VM/JOB completion operations */
539 hdev->disabled = true;
540
541 /*
542 * Flush anyone that is inside the critical section of enqueue
543 * jobs to the H/W
544 */
545 hdev->asic_funcs->hw_queues_lock(hdev);
546 hdev->asic_funcs->hw_queues_unlock(hdev);
547
548 dev_err(hdev->dev, "Going to RESET device!\n");
549 }
550
551again:
552 if ((hard_reset) && (!from_hard_reset_thread)) {
553 struct hl_device_reset_work *device_reset_work;
554
555 if (!hdev->pdev) {
556 dev_err(hdev->dev,
557 "Reset action is NOT supported in simulator\n");
558 rc = -EINVAL;
559 goto out_err;
560 }
561
562 hdev->hard_reset_pending = true;
563
564 device_reset_work = kzalloc(sizeof(*device_reset_work),
565 GFP_ATOMIC);
566 if (!device_reset_work) {
567 rc = -ENOMEM;
568 goto out_err;
569 }
570
571 /*
572 * Because the reset function can't run from interrupt or
573 * from heartbeat work, we need to call the reset function
574 * from a dedicated work
575 */
576 INIT_WORK(&device_reset_work->reset_work,
577 hl_device_hard_reset_pending);
578 device_reset_work->hdev = hdev;
579 schedule_work(&device_reset_work->reset_work);
580
581 return 0;
582 }
583
584 if (hard_reset) {
585 device_late_fini(hdev);
586
587 /*
588 * Now that the heartbeat thread is closed, flush processes
589 * which are sending messages to CPU
590 */
591 mutex_lock(&hdev->send_cpu_message_lock);
592 mutex_unlock(&hdev->send_cpu_message_lock);
593 }
594
595 /*
596 * Halt the engines and disable interrupts so we won't get any more
597 * completions from H/W and we won't have any accesses from the
598 * H/W to the host machine
599 */
600 hdev->asic_funcs->halt_engines(hdev, hard_reset);
601
602 /* Go over all the queues, release all CS and their jobs */
603 hl_cs_rollback_all(hdev);
604
605 if (hard_reset) {
606 /* Release kernel context */
607 if (hl_ctx_put(hdev->kernel_ctx) != 1) {
608 dev_err(hdev->dev,
609 "kernel ctx is alive during hard reset\n");
610 rc = -EBUSY;
611 goto out_err;
612 }
613
614 hdev->kernel_ctx = NULL;
615 }
616
617 /* Reset the H/W. It will be in idle state after this returns */
618 hdev->asic_funcs->hw_fini(hdev, hard_reset);
619
620 if (hard_reset) {
621 hl_vm_fini(hdev);
622 hl_eq_reset(hdev, &hdev->event_queue);
623 }
624
625 /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
626 hl_hw_queue_reset(hdev, hard_reset);
627 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
628 hl_cq_reset(hdev, &hdev->completion_queue[i]);
629
630 /* Make sure the setup phase for the user context will run again */
631 if (hdev->user_ctx) {
632 atomic_set(&hdev->user_ctx->thread_restore_token, 1);
633 hdev->user_ctx->thread_restore_wait_token = 0;
634 }
635
636 /* Finished tear-down, starting to re-initialize */
637
638 if (hard_reset) {
639 hdev->device_cpu_disabled = false;
640
641 /* Allocate the kernel context */
642 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
643 GFP_KERNEL);
644 if (!hdev->kernel_ctx) {
645 rc = -ENOMEM;
646 goto out_err;
647 }
648
649 hdev->user_ctx = NULL;
650
651 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
652 if (rc) {
653 dev_err(hdev->dev,
654 "failed to init kernel ctx in hard reset\n");
655 kfree(hdev->kernel_ctx);
656 hdev->kernel_ctx = NULL;
657 goto out_err;
658 }
659 }
660
661 rc = hdev->asic_funcs->hw_init(hdev);
662 if (rc) {
663 dev_err(hdev->dev,
664 "failed to initialize the H/W after reset\n");
665 goto out_err;
666 }
667
668 hdev->disabled = false;
669
670 /* Check that the communication with the device is working */
671 rc = hdev->asic_funcs->test_queues(hdev);
672 if (rc) {
673 dev_err(hdev->dev,
674 "Failed to detect if device is alive after reset\n");
675 goto out_err;
676 }
677
678 if (hard_reset) {
679 rc = device_late_init(hdev);
680 if (rc) {
681 dev_err(hdev->dev,
682 "Failed late init after hard reset\n");
683 goto out_err;
684 }
685
686 rc = hl_vm_init(hdev);
687 if (rc) {
688 dev_err(hdev->dev,
689 "Failed to init memory module after hard reset\n");
690 goto out_err;
691 }
692
693 hl_set_max_power(hdev, hdev->max_power);
694
695 hdev->hard_reset_pending = false;
696 } else {
697 rc = hdev->asic_funcs->soft_reset_late_init(hdev);
698 if (rc) {
699 dev_err(hdev->dev,
700 "Failed late init after soft reset\n");
701 goto out_err;
702 }
703 }
704
705 atomic_set(&hdev->in_reset, 0);
706
707 if (hard_reset)
708 hdev->hard_reset_cnt++;
709 else
710 hdev->soft_reset_cnt++;
711
712 return 0;
713
714out_err:
715 hdev->disabled = true;
716
717 if (hard_reset) {
718 dev_err(hdev->dev,
719 "Failed to reset! Device is NOT usable\n");
720 hdev->hard_reset_cnt++;
721 } else {
722 dev_err(hdev->dev,
723 "Failed to do soft-reset, trying hard reset\n");
724 hdev->soft_reset_cnt++;
725 hard_reset = true;
726 goto again;
727 }
728
729 atomic_set(&hdev->in_reset, 0);
730
731 return rc;
732}
733
734/*
735 * hl_device_init - main initialization function for habanalabs device
736 *
737 * @hdev: pointer to habanalabs device structure
738 *
739 * Allocate an id for the device, do early initialization and then call the
740 * ASIC specific initialization functions. Finally, create the cdev and the
741 * Linux device to expose it to the user
742 */
743int hl_device_init(struct hl_device *hdev, struct class *hclass)
744{
745 int i, rc, cq_ready_cnt;
746
747 /* Create device */
748 rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops);
749
750 if (rc)
751 goto out_disabled;
752
753 /* Initialize ASIC function pointers and perform early init */
754 rc = device_early_init(hdev);
755 if (rc)
756 goto release_device;
757
758 /*
759 * Start calling ASIC initialization. First S/W then H/W and finally
760 * late init
761 */
762 rc = hdev->asic_funcs->sw_init(hdev);
763 if (rc)
764 goto early_fini;
765
766 /*
767 * Initialize the H/W queues. Must be done before hw_init, because
768 * there the addresses of the kernel queue are being written to the
769 * registers of the device
770 */
771 rc = hl_hw_queues_create(hdev);
772 if (rc) {
773 dev_err(hdev->dev, "failed to initialize kernel queues\n");
774 goto sw_fini;
775 }
776
777 /*
778 * Initialize the completion queues. Must be done before hw_init,
779 * because there the addresses of the completion queues are being
780 * passed as arguments to request_irq
781 */
782 hdev->completion_queue =
783 kcalloc(hdev->asic_prop.completion_queues_count,
784 sizeof(*hdev->completion_queue), GFP_KERNEL);
785
786 if (!hdev->completion_queue) {
787 dev_err(hdev->dev, "failed to allocate completion queues\n");
788 rc = -ENOMEM;
789 goto hw_queues_destroy;
790 }
791
792 for (i = 0, cq_ready_cnt = 0;
793 i < hdev->asic_prop.completion_queues_count;
794 i++, cq_ready_cnt++) {
795 rc = hl_cq_init(hdev, &hdev->completion_queue[i], i);
796 if (rc) {
797 dev_err(hdev->dev,
798 "failed to initialize completion queue\n");
799 goto cq_fini;
800 }
801 }
802
803 /*
804 * Initialize the event queue. Must be done before hw_init,
805 * because there the address of the event queue is being
806 * passed as argument to request_irq
807 */
808 rc = hl_eq_init(hdev, &hdev->event_queue);
809 if (rc) {
810 dev_err(hdev->dev, "failed to initialize event queue\n");
811 goto cq_fini;
812 }
813
814 /* Allocate the kernel context */
815 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
816 if (!hdev->kernel_ctx) {
817 rc = -ENOMEM;
818 goto eq_fini;
819 }
820
821 hdev->user_ctx = NULL;
822
823 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
824 if (rc) {
825 dev_err(hdev->dev, "failed to initialize kernel context\n");
826 goto free_ctx;
827 }
828
829 rc = hl_cb_pool_init(hdev);
830 if (rc) {
831 dev_err(hdev->dev, "failed to initialize CB pool\n");
832 goto release_ctx;
833 }
834
835 rc = hl_sysfs_init(hdev);
836 if (rc) {
837 dev_err(hdev->dev, "failed to initialize sysfs\n");
838 goto free_cb_pool;
839 }
840
841 hl_debugfs_add_device(hdev);
842
843 if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
844 dev_info(hdev->dev,
845 "H/W state is dirty, must reset before initializing\n");
846 hdev->asic_funcs->hw_fini(hdev, true);
847 }
848
849 rc = hdev->asic_funcs->hw_init(hdev);
850 if (rc) {
851 dev_err(hdev->dev, "failed to initialize the H/W\n");
852 rc = 0;
853 goto out_disabled;
854 }
855
856 hdev->disabled = false;
857
858 /* Check that the communication with the device is working */
859 rc = hdev->asic_funcs->test_queues(hdev);
860 if (rc) {
861 dev_err(hdev->dev, "Failed to detect if device is alive\n");
862 rc = 0;
863 goto out_disabled;
864 }
865
866 /* After test_queues, KMD can start sending messages to device CPU */
867
868 rc = device_late_init(hdev);
869 if (rc) {
870 dev_err(hdev->dev, "Failed late initialization\n");
871 rc = 0;
872 goto out_disabled;
873 }
874
875 dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
876 hdev->asic_name,
877 hdev->asic_prop.dram_size / 1024 / 1024 / 1024);
878
879 rc = hl_vm_init(hdev);
880 if (rc) {
881 dev_err(hdev->dev, "Failed to initialize memory module\n");
882 rc = 0;
883 goto out_disabled;
884 }
885
886 /*
887 * hl_hwmon_init must be called after device_late_init, because only
888 * there we get the information from the device about which
889 * hwmon-related sensors the device supports
890 */
891 rc = hl_hwmon_init(hdev);
892 if (rc) {
893 dev_err(hdev->dev, "Failed to initialize hwmon\n");
894 rc = 0;
895 goto out_disabled;
896 }
897
898 dev_notice(hdev->dev,
899 "Successfully added device to habanalabs driver\n");
900
901 hdev->init_done = true;
902
903 return 0;
904
905free_cb_pool:
906 hl_cb_pool_fini(hdev);
907release_ctx:
908 if (hl_ctx_put(hdev->kernel_ctx) != 1)
909 dev_err(hdev->dev,
910 "kernel ctx is still alive on initialization failure\n");
911free_ctx:
912 kfree(hdev->kernel_ctx);
913eq_fini:
914 hl_eq_fini(hdev, &hdev->event_queue);
915cq_fini:
916 for (i = 0 ; i < cq_ready_cnt ; i++)
917 hl_cq_fini(hdev, &hdev->completion_queue[i]);
918 kfree(hdev->completion_queue);
919hw_queues_destroy:
920 hl_hw_queues_destroy(hdev);
921sw_fini:
922 hdev->asic_funcs->sw_fini(hdev);
923early_fini:
924 device_early_fini(hdev);
925release_device:
926 device_destroy(hclass, hdev->dev->devt);
927 cdev_del(&hdev->cdev);
928out_disabled:
929 hdev->disabled = true;
930 if (hdev->pdev)
931 dev_err(&hdev->pdev->dev,
932 "Failed to initialize hl%d. Device is NOT usable !\n",
933 hdev->id);
934 else
935 pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
936 hdev->id);
937
938 return rc;
939}
940
941/*
942 * hl_device_fini - main tear-down function for habanalabs device
943 *
944 * @hdev: pointer to habanalabs device structure
945 *
946 * Destroy the device, call ASIC fini functions and release the id
947 */
948void hl_device_fini(struct hl_device *hdev)
949{
950 int i, rc;
951 ktime_t timeout;
952
953 dev_info(hdev->dev, "Removing device\n");
954
955 /*
956 * This function is competing with the reset function, so try to
957 * take the reset atomic and if we are already in middle of reset,
958 * wait until reset function is finished. Reset function is designed
959 * to always finish (could take up to a few seconds in worst case).
960 */
961
962 timeout = ktime_add_us(ktime_get(),
963 HL_PENDING_RESET_PER_SEC * 1000 * 1000 * 4);
964 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
965 while (rc) {
966 usleep_range(50, 200);
967 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
968 if (ktime_compare(ktime_get(), timeout) > 0) {
969 WARN(1, "Failed to remove device because reset function did not finish\n");
970 return;
971 }
972 };
973
974 /* Mark device as disabled */
975 hdev->disabled = true;
976
977 hl_hwmon_fini(hdev);
978
979 device_late_fini(hdev);
980
981 hl_debugfs_remove_device(hdev);
982
983 hl_sysfs_fini(hdev);
984
985 /*
986 * Halt the engines and disable interrupts so we won't get any more
987 * completions from H/W and we won't have any accesses from the
988 * H/W to the host machine
989 */
990 hdev->asic_funcs->halt_engines(hdev, true);
991
992 /* Go over all the queues, release all CS and their jobs */
993 hl_cs_rollback_all(hdev);
994
995 hl_cb_pool_fini(hdev);
996
997 /* Release kernel context */
998 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
999 dev_err(hdev->dev, "kernel ctx is still alive\n");
1000
1001 /* Reset the H/W. It will be in idle state after this returns */
1002 hdev->asic_funcs->hw_fini(hdev, true);
1003
1004 hl_vm_fini(hdev);
1005
1006 hl_eq_fini(hdev, &hdev->event_queue);
1007
1008 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1009 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1010 kfree(hdev->completion_queue);
1011
1012 hl_hw_queues_destroy(hdev);
1013
1014 /* Call ASIC S/W finalize function */
1015 hdev->asic_funcs->sw_fini(hdev);
1016
1017 device_early_fini(hdev);
1018
1019 /* Hide device from user */
1020 device_destroy(hdev->dev->class, hdev->dev->devt);
1021 cdev_del(&hdev->cdev);
1022
1023 pr_info("removed device successfully\n");
1024}
1025
1026/*
1027 * hl_poll_timeout_memory - Periodically poll a host memory address
1028 * until it is not zero or a timeout occurs
1029 * @hdev: pointer to habanalabs device structure
1030 * @addr: Address to poll
1031 * @timeout_us: timeout in us
1032 * @val: Variable to read the value into
1033 *
1034 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
1035 * case, the last read value at @addr is stored in @val. Must not
1036 * be called from atomic context if sleep_us or timeout_us are used.
1037 *
1038 * The function sleeps for 100us with timeout value of
1039 * timeout_us
1040 */
1041int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
1042 u32 timeout_us, u32 *val)
1043{
1044 /*
1045 * address in this function points always to a memory location in the
1046 * host's (server's) memory. That location is updated asynchronously
1047 * either by the direct access of the device or by another core
1048 */
1049 u32 *paddr = (u32 *) (uintptr_t) addr;
1050 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
1051
1052 might_sleep();
1053
1054 for (;;) {
1055 /*
1056 * Flush CPU read/write buffers to make sure we read updates
1057 * done by other cores or by the device
1058 */
1059 mb();
1060 *val = *paddr;
1061 if (*val)
1062 break;
1063 if (ktime_compare(ktime_get(), timeout) > 0) {
1064 *val = *paddr;
1065 break;
1066 }
1067 usleep_range((100 >> 2) + 1, 100);
1068 }
1069
1070 return *val ? 0 : -ETIMEDOUT;
1071}
1072
1073/*
1074 * hl_poll_timeout_devicememory - Periodically poll a device memory address
1075 * until it is not zero or a timeout occurs
1076 * @hdev: pointer to habanalabs device structure
1077 * @addr: Device address to poll
1078 * @timeout_us: timeout in us
1079 * @val: Variable to read the value into
1080 *
1081 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
1082 * case, the last read value at @addr is stored in @val. Must not
1083 * be called from atomic context if sleep_us or timeout_us are used.
1084 *
1085 * The function sleeps for 100us with timeout value of
1086 * timeout_us
1087 */
1088int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
1089 u32 timeout_us, u32 *val)
1090{
1091 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
1092
1093 might_sleep();
1094
1095 for (;;) {
1096 *val = readl(addr);
1097 if (*val)
1098 break;
1099 if (ktime_compare(ktime_get(), timeout) > 0) {
1100 *val = readl(addr);
1101 break;
1102 }
1103 usleep_range((100 >> 2) + 1, 100);
1104 }
1105
1106 return *val ? 0 : -ETIMEDOUT;
1107}
1108
1109/*
1110 * MMIO register access helper functions.
1111 */
1112
1113/*
1114 * hl_rreg - Read an MMIO register
1115 *
1116 * @hdev: pointer to habanalabs device structure
1117 * @reg: MMIO register offset (in bytes)
1118 *
1119 * Returns the value of the MMIO register we are asked to read
1120 *
1121 */
1122inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
1123{
1124 return readl(hdev->rmmio + reg);
1125}
1126
1127/*
1128 * hl_wreg - Write to an MMIO register
1129 *
1130 * @hdev: pointer to habanalabs device structure
1131 * @reg: MMIO register offset (in bytes)
1132 * @val: 32-bit value
1133 *
1134 * Writes the 32-bit value into the MMIO register
1135 *
1136 */
1137inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
1138{
1139 writel(val, hdev->rmmio + reg);
1140}
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
new file mode 100644
index 000000000000..e458e5ba500b
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -0,0 +1,3 @@
1subdir-ccflags-y += -I$(src)
2
3HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
new file mode 100644
index 000000000000..238dd57c541b
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -0,0 +1,5391 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
9#include "include/hw_ip/mmu/mmu_general.h"
10#include "include/hw_ip/mmu/mmu_v1_0.h"
11#include "include/goya/asic_reg/goya_masks.h"
12
13#include <linux/pci.h>
14#include <linux/genalloc.h>
15#include <linux/firmware.h>
16#include <linux/hwmon.h>
17#include <linux/io-64-nonatomic-lo-hi.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
19
20/*
21 * GOYA security scheme:
22 *
23 * 1. Host is protected by:
24 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
25 * - MMU
26 *
27 * 2. DRAM is protected by:
28 * - Range registers (protect the first 512MB)
29 * - MMU (isolation between users)
30 *
31 * 3. Configuration is protected by:
32 * - Range registers
33 * - Protection bits
34 *
35 * When MMU is disabled:
36 *
37 * QMAN DMA: PQ, CQ, CP, DMA are secured.
38 * PQ, CB and the data are on the host.
39 *
40 * QMAN TPC/MME:
41 * PQ, CQ and CP are not secured.
42 * PQ, CB and the data are on the SRAM/DRAM.
43 *
44 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
45 * - KMD checks DMA pointer
46 * - WREG, MSG_PROT are not allowed.
47 * - MSG_LONG/SHORT are allowed.
48 *
49 * A read/write transaction by the QMAN to a protected area will succeed if
50 * and only if the QMAN's CP is secured and MSG_PROT is used
51 *
52 *
53 * When MMU is enabled:
54 *
55 * QMAN DMA: PQ, CQ and CP are secured.
56 * MMU is set to bypass on the Secure props register of the QMAN.
57 * The reasons we don't enable MMU for PQ, CQ and CP are:
58 * - PQ entry is in kernel address space and KMD doesn't map it.
59 * - CP writes to MSIX register and to kernel address space (completion
60 * queue).
61 *
62 * DMA is not secured but because CP is secured, KMD still needs to parse the
63 * CB, but doesn't need to check the DMA addresses.
64 *
65 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
66 * doesn't map memory in MMU.
67 *
68 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 *
70 * DMA RR does NOT protect host because DMA is not secured
71 *
72 */
73
74#define GOYA_MMU_REGS_NUM 61
75
76#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
77
78#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
79#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
80#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
81#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
82#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
83#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
84#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
85#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
86#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
87
88#define GOYA_QMAN0_FENCE_VAL 0xD169B243
89
90#define GOYA_MAX_INITIATORS 20
91
92#define GOYA_MAX_STRING_LEN 20
93
94#define GOYA_CB_POOL_CB_CNT 512
95#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
96
97static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
98 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
99 "goya cq 4", "goya cpu eq"
100};
101
102static u16 goya_packet_sizes[MAX_PACKET_ID] = {
103 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
104 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
105 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
106 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
107 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
108 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
109 [PACKET_FENCE] = sizeof(struct packet_fence),
110 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
111 [PACKET_NOP] = sizeof(struct packet_nop),
112 [PACKET_STOP] = sizeof(struct packet_stop)
113};
114
115static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
116 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
117 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
118 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
119 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
120 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
121 mmTPC0_QM_GLBL_SECURE_PROPS,
122 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
123 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
124 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
125 mmTPC0_CFG_ARUSER,
126 mmTPC0_CFG_AWUSER,
127 mmTPC1_QM_GLBL_SECURE_PROPS,
128 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
129 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
130 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
131 mmTPC1_CFG_ARUSER,
132 mmTPC1_CFG_AWUSER,
133 mmTPC2_QM_GLBL_SECURE_PROPS,
134 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
135 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
136 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
137 mmTPC2_CFG_ARUSER,
138 mmTPC2_CFG_AWUSER,
139 mmTPC3_QM_GLBL_SECURE_PROPS,
140 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
141 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
142 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
143 mmTPC3_CFG_ARUSER,
144 mmTPC3_CFG_AWUSER,
145 mmTPC4_QM_GLBL_SECURE_PROPS,
146 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
147 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
148 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
149 mmTPC4_CFG_ARUSER,
150 mmTPC4_CFG_AWUSER,
151 mmTPC5_QM_GLBL_SECURE_PROPS,
152 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
153 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
154 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
155 mmTPC5_CFG_ARUSER,
156 mmTPC5_CFG_AWUSER,
157 mmTPC6_QM_GLBL_SECURE_PROPS,
158 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
159 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
160 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
161 mmTPC6_CFG_ARUSER,
162 mmTPC6_CFG_AWUSER,
163 mmTPC7_QM_GLBL_SECURE_PROPS,
164 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
165 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
166 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
167 mmTPC7_CFG_ARUSER,
168 mmTPC7_CFG_AWUSER,
169 mmMME_QM_GLBL_SECURE_PROPS,
170 mmMME_QM_GLBL_NON_SECURE_PROPS,
171 mmMME_CMDQ_GLBL_SECURE_PROPS,
172 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
173 mmMME_SBA_CONTROL_DATA,
174 mmMME_SBB_CONTROL_DATA,
175 mmMME_SBC_CONTROL_DATA,
176 mmMME_WBC_CONTROL_DATA
177};
178
179#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
180
181static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
182 GOYA_ASYNC_EVENT_ID_PCIE_IF,
183 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
186 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
187 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
188 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
189 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
190 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
191 GOYA_ASYNC_EVENT_ID_MME_ECC,
192 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
193 GOYA_ASYNC_EVENT_ID_MMU_ECC,
194 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
195 GOYA_ASYNC_EVENT_ID_DMA_ECC,
196 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
197 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
198 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
199 GOYA_ASYNC_EVENT_ID_SRAM0,
200 GOYA_ASYNC_EVENT_ID_SRAM1,
201 GOYA_ASYNC_EVENT_ID_SRAM2,
202 GOYA_ASYNC_EVENT_ID_SRAM3,
203 GOYA_ASYNC_EVENT_ID_SRAM4,
204 GOYA_ASYNC_EVENT_ID_SRAM5,
205 GOYA_ASYNC_EVENT_ID_SRAM6,
206 GOYA_ASYNC_EVENT_ID_SRAM7,
207 GOYA_ASYNC_EVENT_ID_SRAM8,
208 GOYA_ASYNC_EVENT_ID_SRAM9,
209 GOYA_ASYNC_EVENT_ID_SRAM10,
210 GOYA_ASYNC_EVENT_ID_SRAM11,
211 GOYA_ASYNC_EVENT_ID_SRAM12,
212 GOYA_ASYNC_EVENT_ID_SRAM13,
213 GOYA_ASYNC_EVENT_ID_SRAM14,
214 GOYA_ASYNC_EVENT_ID_SRAM15,
215 GOYA_ASYNC_EVENT_ID_SRAM16,
216 GOYA_ASYNC_EVENT_ID_SRAM17,
217 GOYA_ASYNC_EVENT_ID_SRAM18,
218 GOYA_ASYNC_EVENT_ID_SRAM19,
219 GOYA_ASYNC_EVENT_ID_SRAM20,
220 GOYA_ASYNC_EVENT_ID_SRAM21,
221 GOYA_ASYNC_EVENT_ID_SRAM22,
222 GOYA_ASYNC_EVENT_ID_SRAM23,
223 GOYA_ASYNC_EVENT_ID_SRAM24,
224 GOYA_ASYNC_EVENT_ID_SRAM25,
225 GOYA_ASYNC_EVENT_ID_SRAM26,
226 GOYA_ASYNC_EVENT_ID_SRAM27,
227 GOYA_ASYNC_EVENT_ID_SRAM28,
228 GOYA_ASYNC_EVENT_ID_SRAM29,
229 GOYA_ASYNC_EVENT_ID_GIC500,
230 GOYA_ASYNC_EVENT_ID_PLL0,
231 GOYA_ASYNC_EVENT_ID_PLL1,
232 GOYA_ASYNC_EVENT_ID_PLL3,
233 GOYA_ASYNC_EVENT_ID_PLL4,
234 GOYA_ASYNC_EVENT_ID_PLL5,
235 GOYA_ASYNC_EVENT_ID_PLL6,
236 GOYA_ASYNC_EVENT_ID_AXI_ECC,
237 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
238 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
239 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
240 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
244 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
245 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
246 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
247 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
248 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
249 GOYA_ASYNC_EVENT_ID_MME_WACS,
250 GOYA_ASYNC_EVENT_ID_MME_WACSD,
251 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
252 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
253 GOYA_ASYNC_EVENT_ID_PSOC,
254 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
258 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
259 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
260 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
261 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
262 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
266 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
267 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
268 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
269 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
270 GOYA_ASYNC_EVENT_ID_TPC0_QM,
271 GOYA_ASYNC_EVENT_ID_TPC1_QM,
272 GOYA_ASYNC_EVENT_ID_TPC2_QM,
273 GOYA_ASYNC_EVENT_ID_TPC3_QM,
274 GOYA_ASYNC_EVENT_ID_TPC4_QM,
275 GOYA_ASYNC_EVENT_ID_TPC5_QM,
276 GOYA_ASYNC_EVENT_ID_TPC6_QM,
277 GOYA_ASYNC_EVENT_ID_TPC7_QM,
278 GOYA_ASYNC_EVENT_ID_MME_QM,
279 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
280 GOYA_ASYNC_EVENT_ID_DMA0_QM,
281 GOYA_ASYNC_EVENT_ID_DMA1_QM,
282 GOYA_ASYNC_EVENT_ID_DMA2_QM,
283 GOYA_ASYNC_EVENT_ID_DMA3_QM,
284 GOYA_ASYNC_EVENT_ID_DMA4_QM,
285 GOYA_ASYNC_EVENT_ID_DMA0_CH,
286 GOYA_ASYNC_EVENT_ID_DMA1_CH,
287 GOYA_ASYNC_EVENT_ID_DMA2_CH,
288 GOYA_ASYNC_EVENT_ID_DMA3_CH,
289 GOYA_ASYNC_EVENT_ID_DMA4_CH,
290 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
294 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
295 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
296 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
297 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
298 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
299 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
300 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
301 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
302 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
303};
304
305static int goya_armcp_info_get(struct hl_device *hdev);
306static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
307static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
308static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
309static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
310 u64 phys_addr);
311
312static void goya_get_fixed_properties(struct hl_device *hdev)
313{
314 struct asic_fixed_properties *prop = &hdev->asic_prop;
315 int i;
316
317 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
318 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
319 prop->hw_queues_props[i].kmd_only = 0;
320 }
321
322 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
323 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
324 prop->hw_queues_props[i].kmd_only = 1;
325 }
326
327 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
328 NUMBER_OF_INT_HW_QUEUES; i++) {
329 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
330 prop->hw_queues_props[i].kmd_only = 0;
331 }
332
333 for (; i < HL_MAX_QUEUES; i++)
334 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
335
336 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
337
338 prop->dram_base_address = DRAM_PHYS_BASE;
339 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
340 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
341 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
342
343 prop->sram_base_address = SRAM_BASE_ADDR;
344 prop->sram_size = SRAM_SIZE;
345 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
346 prop->sram_user_base_address = prop->sram_base_address +
347 SRAM_USER_BASE_OFFSET;
348
349 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
350 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
351 if (hdev->pldm)
352 prop->mmu_pgt_size = 0x800000; /* 8MB */
353 else
354 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
355 prop->mmu_pte_size = HL_PTE_SIZE;
356 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
357 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
358 prop->dram_page_size = PAGE_SIZE_2MB;
359
360 prop->host_phys_base_address = HOST_PHYS_BASE;
361 prop->va_space_host_start_address = VA_HOST_SPACE_START;
362 prop->va_space_host_end_address = VA_HOST_SPACE_END;
363 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
364 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
365 prop->dram_size_for_default_page_mapping =
366 prop->va_space_dram_end_address;
367 prop->cfg_size = CFG_SIZE;
368 prop->max_asid = MAX_ASID;
369 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
370 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
371 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
372 prop->max_power_default = MAX_POWER_DEFAULT;
373 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
374
375 prop->high_pll = PLL_HIGH_DEFAULT;
376}
377
378int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
379{
380 struct armcp_packet pkt;
381
382 memset(&pkt, 0, sizeof(pkt));
383
384 pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
385
386 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
387 sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
388}
389
390/*
391 * goya_pci_bars_map - Map PCI BARS of Goya device
392 *
393 * @hdev: pointer to hl_device structure
394 *
395 * Request PCI regions and map them to kernel virtual addresses.
396 * Returns 0 on success
397 *
398 */
399static int goya_pci_bars_map(struct hl_device *hdev)
400{
401 struct pci_dev *pdev = hdev->pdev;
402 int rc;
403
404 rc = pci_request_regions(pdev, HL_NAME);
405 if (rc) {
406 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
407 return rc;
408 }
409
410 hdev->pcie_bar[SRAM_CFG_BAR_ID] =
411 pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
412 if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
413 dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
414 rc = -ENODEV;
415 goto err_release_regions;
416 }
417
418 hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
419 if (!hdev->pcie_bar[MSIX_BAR_ID]) {
420 dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
421 rc = -ENODEV;
422 goto err_unmap_sram_cfg;
423 }
424
425 hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
426 if (!hdev->pcie_bar[DDR_BAR_ID]) {
427 dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
428 rc = -ENODEV;
429 goto err_unmap_msix;
430 }
431
432 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
433 (CFG_BASE - SRAM_BASE_ADDR);
434
435 return 0;
436
437err_unmap_msix:
438 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
439err_unmap_sram_cfg:
440 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
441err_release_regions:
442 pci_release_regions(pdev);
443
444 return rc;
445}
446
447/*
448 * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
449 *
450 * @hdev: pointer to hl_device structure
451 *
452 * Release all PCI BARS and unmap their virtual addresses
453 *
454 */
455static void goya_pci_bars_unmap(struct hl_device *hdev)
456{
457 struct pci_dev *pdev = hdev->pdev;
458
459 iounmap(hdev->pcie_bar[DDR_BAR_ID]);
460 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
461 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
462 pci_release_regions(pdev);
463}
464
465/*
466 * goya_elbi_write - Write through the ELBI interface
467 *
468 * @hdev: pointer to hl_device structure
469 *
470 * return 0 on success, -1 on failure
471 *
472 */
473static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
474{
475 struct pci_dev *pdev = hdev->pdev;
476 ktime_t timeout;
477 u32 val;
478
479 /* Clear previous status */
480 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
481
482 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
483 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
484 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
485 PCI_CONFIG_ELBI_CTRL_WRITE);
486
487 timeout = ktime_add_ms(ktime_get(), 10);
488 for (;;) {
489 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
490 if (val & PCI_CONFIG_ELBI_STS_MASK)
491 break;
492 if (ktime_compare(ktime_get(), timeout) > 0) {
493 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
494 &val);
495 break;
496 }
497 usleep_range(300, 500);
498 }
499
500 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
501 return 0;
502
503 if (val & PCI_CONFIG_ELBI_STS_ERR) {
504 dev_err(hdev->dev, "Error writing to ELBI\n");
505 return -EIO;
506 }
507
508 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
509 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
510 return -EIO;
511 }
512
513 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
514 return -EIO;
515}
516
517/*
518 * goya_iatu_write - iatu write routine
519 *
520 * @hdev: pointer to hl_device structure
521 *
522 */
523static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
524{
525 u32 dbi_offset;
526 int rc;
527
528 dbi_offset = addr & 0xFFF;
529
530 rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
531 rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
532
533 if (rc)
534 return -EIO;
535
536 return 0;
537}
538
539static void goya_reset_link_through_bridge(struct hl_device *hdev)
540{
541 struct pci_dev *pdev = hdev->pdev;
542 struct pci_dev *parent_port;
543 u16 val;
544
545 parent_port = pdev->bus->self;
546 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
547 val |= PCI_BRIDGE_CTL_BUS_RESET;
548 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
549 ssleep(1);
550
551 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
552 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
553 ssleep(3);
554}
555
556/*
557 * goya_set_ddr_bar_base - set DDR bar to map specific device address
558 *
559 * @hdev: pointer to hl_device structure
560 * @addr: address in DDR. Must be aligned to DDR bar size
561 *
562 * This function configures the iATU so that the DDR bar will start at the
563 * specified addr.
564 *
565 */
566static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
567{
568 struct goya_device *goya = hdev->asic_specific;
569 int rc;
570
571 if ((goya) && (goya->ddr_bar_cur_addr == addr))
572 return 0;
573
574 /* Inbound Region 1 - Bar 4 - Point to DDR */
575 rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
576 rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
577 rc |= goya_iatu_write(hdev, 0x300, 0);
578 /* Enable + Bar match + match enable + Bar 4 */
579 rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
580
581 /* Return the DBI window to the default location */
582 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
583 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
584
585 if (rc) {
586 dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
587 return -EIO;
588 }
589
590 if (goya)
591 goya->ddr_bar_cur_addr = addr;
592
593 return 0;
594}
595
596/*
597 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
598 *
599 * @hdev: pointer to hl_device structure
600 *
601 * This is needed in case the firmware doesn't initialize the iATU
602 *
603 */
604static int goya_init_iatu(struct hl_device *hdev)
605{
606 int rc;
607
608 /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
609 rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
610 rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
611 rc |= goya_iatu_write(hdev, 0x100, 0);
612 /* Enable + Bar match + match enable */
613 rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
614
615 /* Inbound Region 1 - Bar 4 - Point to DDR */
616 rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
617
618 /* Outbound Region 0 - Point to Host */
619 rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
620 rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
621 rc |= goya_iatu_write(hdev, 0x010,
622 lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
623 rc |= goya_iatu_write(hdev, 0x014, 0);
624 rc |= goya_iatu_write(hdev, 0x018, 0);
625 rc |= goya_iatu_write(hdev, 0x020,
626 upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
627 /* Increase region size */
628 rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
629 /* Enable */
630 rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
631
632 /* Return the DBI window to the default location */
633 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
634 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
635
636 if (rc)
637 return -EIO;
638
639 return 0;
640}
641
642/*
643 * goya_early_init - GOYA early initialization code
644 *
645 * @hdev: pointer to hl_device structure
646 *
647 * Verify PCI bars
648 * Set DMA masks
649 * PCI controller initialization
650 * Map PCI bars
651 *
652 */
653static int goya_early_init(struct hl_device *hdev)
654{
655 struct asic_fixed_properties *prop = &hdev->asic_prop;
656 struct pci_dev *pdev = hdev->pdev;
657 u32 val;
658 int rc;
659
660 goya_get_fixed_properties(hdev);
661
662 /* Check BAR sizes */
663 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
664 dev_err(hdev->dev,
665 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
666 SRAM_CFG_BAR_ID,
667 (unsigned long long) pci_resource_len(pdev,
668 SRAM_CFG_BAR_ID),
669 CFG_BAR_SIZE);
670 return -ENODEV;
671 }
672
673 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
674 dev_err(hdev->dev,
675 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
676 MSIX_BAR_ID,
677 (unsigned long long) pci_resource_len(pdev,
678 MSIX_BAR_ID),
679 MSIX_BAR_SIZE);
680 return -ENODEV;
681 }
682
683 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
684
685 /* set DMA mask for GOYA */
686 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
687 if (rc) {
688 dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
689 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
690 if (rc) {
691 dev_err(hdev->dev,
692 "Unable to set pci dma mask to 32 bits\n");
693 return rc;
694 }
695 }
696
697 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
698 if (rc) {
699 dev_warn(hdev->dev,
700 "Unable to set pci consistent dma mask to 39 bits\n");
701 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
702 if (rc) {
703 dev_err(hdev->dev,
704 "Unable to set pci consistent dma mask to 32 bits\n");
705 return rc;
706 }
707 }
708
709 if (hdev->reset_pcilink)
710 goya_reset_link_through_bridge(hdev);
711
712 rc = pci_enable_device_mem(pdev);
713 if (rc) {
714 dev_err(hdev->dev, "can't enable PCI device\n");
715 return rc;
716 }
717
718 pci_set_master(pdev);
719
720 rc = goya_init_iatu(hdev);
721 if (rc) {
722 dev_err(hdev->dev, "Failed to initialize iATU\n");
723 goto disable_device;
724 }
725
726 rc = goya_pci_bars_map(hdev);
727 if (rc) {
728 dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
729 goto disable_device;
730 }
731
732 if (!hdev->pldm) {
733 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
734 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
735 dev_warn(hdev->dev,
736 "PCI strap is not configured correctly, PCI bus errors may occur\n");
737 }
738
739 return 0;
740
741disable_device:
742 pci_clear_master(pdev);
743 pci_disable_device(pdev);
744
745 return rc;
746}
747
748/*
749 * goya_early_fini - GOYA early finalization code
750 *
751 * @hdev: pointer to hl_device structure
752 *
753 * Unmap PCI bars
754 *
755 */
756static int goya_early_fini(struct hl_device *hdev)
757{
758 goya_pci_bars_unmap(hdev);
759
760 pci_clear_master(hdev->pdev);
761 pci_disable_device(hdev->pdev);
762
763 return 0;
764}
765
766/*
767 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
768 *
769 * @hdev: pointer to hl_device structure
770 *
771 */
772static void goya_fetch_psoc_frequency(struct hl_device *hdev)
773{
774 struct asic_fixed_properties *prop = &hdev->asic_prop;
775
776 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
777 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
778 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
779 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
780}
781
782/*
783 * goya_late_init - GOYA late initialization code
784 *
785 * @hdev: pointer to hl_device structure
786 *
787 * Get ArmCP info and send message to CPU to enable PCI access
788 */
789static int goya_late_init(struct hl_device *hdev)
790{
791 struct asic_fixed_properties *prop = &hdev->asic_prop;
792 struct goya_device *goya = hdev->asic_specific;
793 int rc;
794
795 rc = goya->armcp_info_get(hdev);
796 if (rc) {
797 dev_err(hdev->dev, "Failed to get armcp info\n");
798 return rc;
799 }
800
801 /* Now that we have the DRAM size in ASIC prop, we need to check
802 * its size and configure the DMA_IF DDR wrap protection (which is in
803 * the MMU block) accordingly. The value is the log2 of the DRAM size
804 */
805 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
806
807 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
808 if (rc) {
809 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
810 return rc;
811 }
812
813 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
814 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
815
816 goya_fetch_psoc_frequency(hdev);
817
818 rc = goya_mmu_clear_pgt_range(hdev);
819 if (rc) {
820 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
821 goto disable_pci_access;
822 }
823
824 rc = goya_mmu_set_dram_default_page(hdev);
825 if (rc) {
826 dev_err(hdev->dev, "Failed to set DRAM default page\n");
827 goto disable_pci_access;
828 }
829
830 return 0;
831
832disable_pci_access:
833 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
834
835 return rc;
836}
837
838/*
839 * goya_late_fini - GOYA late tear-down code
840 *
841 * @hdev: pointer to hl_device structure
842 *
843 * Free sensors allocated structures
844 */
845void goya_late_fini(struct hl_device *hdev)
846{
847 const struct hwmon_channel_info **channel_info_arr;
848 int i = 0;
849
850 if (!hdev->hl_chip_info->info)
851 return;
852
853 channel_info_arr = hdev->hl_chip_info->info;
854
855 while (channel_info_arr[i]) {
856 kfree(channel_info_arr[i]->config);
857 kfree(channel_info_arr[i]);
858 i++;
859 }
860
861 kfree(channel_info_arr);
862
863 hdev->hl_chip_info->info = NULL;
864}
865
866/*
867 * goya_sw_init - Goya software initialization code
868 *
869 * @hdev: pointer to hl_device structure
870 *
871 */
872static int goya_sw_init(struct hl_device *hdev)
873{
874 struct goya_device *goya;
875 int rc;
876
877 /* Allocate device structure */
878 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
879 if (!goya)
880 return -ENOMEM;
881
882 goya->test_cpu_queue = goya_test_cpu_queue;
883 goya->armcp_info_get = goya_armcp_info_get;
884
885 /* according to goya_init_iatu */
886 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
887
888 goya->mme_clk = GOYA_PLL_FREQ_LOW;
889 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
890 goya->ic_clk = GOYA_PLL_FREQ_LOW;
891
892 hdev->asic_specific = goya;
893
894 /* Create DMA pool for small allocations */
895 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
896 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
897 if (!hdev->dma_pool) {
898 dev_err(hdev->dev, "failed to create DMA pool\n");
899 rc = -ENOMEM;
900 goto free_goya_device;
901 }
902
903 hdev->cpu_accessible_dma_mem =
904 hdev->asic_funcs->dma_alloc_coherent(hdev,
905 CPU_ACCESSIBLE_MEM_SIZE,
906 &hdev->cpu_accessible_dma_address,
907 GFP_KERNEL | __GFP_ZERO);
908
909 if (!hdev->cpu_accessible_dma_mem) {
910 dev_err(hdev->dev,
911 "failed to allocate %d of dma memory for CPU accessible memory space\n",
912 CPU_ACCESSIBLE_MEM_SIZE);
913 rc = -ENOMEM;
914 goto free_dma_pool;
915 }
916
917 hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
918 if (!hdev->cpu_accessible_dma_pool) {
919 dev_err(hdev->dev,
920 "Failed to create CPU accessible DMA pool\n");
921 rc = -ENOMEM;
922 goto free_cpu_pq_dma_mem;
923 }
924
925 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
926 (uintptr_t) hdev->cpu_accessible_dma_mem,
927 CPU_ACCESSIBLE_MEM_SIZE, -1);
928 if (rc) {
929 dev_err(hdev->dev,
930 "Failed to add memory to CPU accessible DMA pool\n");
931 rc = -EFAULT;
932 goto free_cpu_pq_pool;
933 }
934
935 spin_lock_init(&goya->hw_queues_lock);
936
937 return 0;
938
939free_cpu_pq_pool:
940 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
941free_cpu_pq_dma_mem:
942 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
943 hdev->cpu_accessible_dma_mem,
944 hdev->cpu_accessible_dma_address);
945free_dma_pool:
946 dma_pool_destroy(hdev->dma_pool);
947free_goya_device:
948 kfree(goya);
949
950 return rc;
951}
952
953/*
954 * goya_sw_fini - Goya software tear-down code
955 *
956 * @hdev: pointer to hl_device structure
957 *
958 */
959static int goya_sw_fini(struct hl_device *hdev)
960{
961 struct goya_device *goya = hdev->asic_specific;
962
963 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
964
965 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
966 hdev->cpu_accessible_dma_mem,
967 hdev->cpu_accessible_dma_address);
968
969 dma_pool_destroy(hdev->dma_pool);
970
971 kfree(goya);
972
973 return 0;
974}
975
976static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
977 dma_addr_t bus_address)
978{
979 struct goya_device *goya = hdev->asic_specific;
980 u32 mtr_base_lo, mtr_base_hi;
981 u32 so_base_lo, so_base_hi;
982 u32 gic_base_lo, gic_base_hi;
983 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
984
985 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
986 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
987 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
988 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
989
990 gic_base_lo =
991 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
992 gic_base_hi =
993 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
994
995 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
996 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
997
998 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
999 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
1000 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1001
1002 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1003 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1004 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1005 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1006 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1007 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1008 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1009 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1010
1011 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1012 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1013 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1014
1015 if (goya->hw_cap_initialized & HW_CAP_MMU)
1016 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
1017 else
1018 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
1019
1020 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
1021 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1022}
1023
1024static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1025{
1026 u32 gic_base_lo, gic_base_hi;
1027 u64 sob_addr;
1028 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1029
1030 gic_base_lo =
1031 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1032 gic_base_hi =
1033 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1034
1035 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1036 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1037 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1038 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1039
1040 if (dma_id)
1041 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1042 (dma_id - 1) * 4;
1043 else
1044 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1045
1046 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
1047 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1048 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
1049}
1050
1051/*
1052 * goya_init_dma_qmans - Initialize QMAN DMA registers
1053 *
1054 * @hdev: pointer to hl_device structure
1055 *
1056 * Initialize the H/W registers of the QMAN DMA channels
1057 *
1058 */
1059static void goya_init_dma_qmans(struct hl_device *hdev)
1060{
1061 struct goya_device *goya = hdev->asic_specific;
1062 struct hl_hw_queue *q;
1063 dma_addr_t bus_address;
1064 int i;
1065
1066 if (goya->hw_cap_initialized & HW_CAP_DMA)
1067 return;
1068
1069 q = &hdev->kernel_queues[0];
1070
1071 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1072 bus_address = q->bus_address +
1073 hdev->asic_prop.host_phys_base_address;
1074
1075 goya_init_dma_qman(hdev, i, bus_address);
1076 goya_init_dma_ch(hdev, i);
1077 }
1078
1079 goya->hw_cap_initialized |= HW_CAP_DMA;
1080}
1081
1082/*
1083 * goya_disable_external_queues - Disable external queues
1084 *
1085 * @hdev: pointer to hl_device structure
1086 *
1087 */
1088static void goya_disable_external_queues(struct hl_device *hdev)
1089{
1090 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1091 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1092 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1093 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1094 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1095}
1096
1097static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1098 u32 cp_sts_reg, u32 glbl_sts0_reg)
1099{
1100 int rc;
1101 u32 status;
1102
1103 /* use the values of TPC0 as they are all the same*/
1104
1105 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1106
1107 status = RREG32(cp_sts_reg);
1108 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1109 rc = hl_poll_timeout(
1110 hdev,
1111 cp_sts_reg,
1112 status,
1113 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1114 1000,
1115 QMAN_FENCE_TIMEOUT_USEC);
1116
1117 /* if QMAN is stuck in fence no need to check for stop */
1118 if (rc)
1119 return 0;
1120 }
1121
1122 rc = hl_poll_timeout(
1123 hdev,
1124 glbl_sts0_reg,
1125 status,
1126 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1127 1000,
1128 QMAN_STOP_TIMEOUT_USEC);
1129
1130 if (rc) {
1131 dev_err(hdev->dev,
1132 "Timeout while waiting for QMAN to stop\n");
1133 return -EINVAL;
1134 }
1135
1136 return 0;
1137}
1138
1139/*
1140 * goya_stop_external_queues - Stop external queues
1141 *
1142 * @hdev: pointer to hl_device structure
1143 *
1144 * Returns 0 on success
1145 *
1146 */
1147static int goya_stop_external_queues(struct hl_device *hdev)
1148{
1149 int rc, retval = 0;
1150
1151 rc = goya_stop_queue(hdev,
1152 mmDMA_QM_0_GLBL_CFG1,
1153 mmDMA_QM_0_CP_STS,
1154 mmDMA_QM_0_GLBL_STS0);
1155
1156 if (rc) {
1157 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1158 retval = -EIO;
1159 }
1160
1161 rc = goya_stop_queue(hdev,
1162 mmDMA_QM_1_GLBL_CFG1,
1163 mmDMA_QM_1_CP_STS,
1164 mmDMA_QM_1_GLBL_STS0);
1165
1166 if (rc) {
1167 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1168 retval = -EIO;
1169 }
1170
1171 rc = goya_stop_queue(hdev,
1172 mmDMA_QM_2_GLBL_CFG1,
1173 mmDMA_QM_2_CP_STS,
1174 mmDMA_QM_2_GLBL_STS0);
1175
1176 if (rc) {
1177 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1178 retval = -EIO;
1179 }
1180
1181 rc = goya_stop_queue(hdev,
1182 mmDMA_QM_3_GLBL_CFG1,
1183 mmDMA_QM_3_CP_STS,
1184 mmDMA_QM_3_GLBL_STS0);
1185
1186 if (rc) {
1187 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1188 retval = -EIO;
1189 }
1190
1191 rc = goya_stop_queue(hdev,
1192 mmDMA_QM_4_GLBL_CFG1,
1193 mmDMA_QM_4_CP_STS,
1194 mmDMA_QM_4_GLBL_STS0);
1195
1196 if (rc) {
1197 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1198 retval = -EIO;
1199 }
1200
1201 return retval;
1202}
1203
1204static void goya_resume_external_queues(struct hl_device *hdev)
1205{
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1211}
1212
1213/*
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1215 *
1216 * @hdev: pointer to hl_device structure
1217 *
1218 * Returns 0 on success
1219 *
1220 */
1221static int goya_init_cpu_queues(struct hl_device *hdev)
1222{
1223 struct goya_device *goya = hdev->asic_specific;
1224 struct hl_eq *eq;
1225 dma_addr_t bus_address;
1226 u32 status;
1227 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1228 int err;
1229
1230 if (!hdev->cpu_queues_enable)
1231 return 0;
1232
1233 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1234 return 0;
1235
1236 eq = &hdev->event_queue;
1237
1238 bus_address = cpu_pq->bus_address +
1239 hdev->asic_prop.host_phys_base_address;
1240 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
1241 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
1242
1243 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
1244 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
1245 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
1246
1247 bus_address = hdev->cpu_accessible_dma_address +
1248 hdev->asic_prop.host_phys_base_address;
1249 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
1250 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
1251
1252 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
1253 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
1254 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
1255
1256 /* Used for EQ CI */
1257 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
1258
1259 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1260
1261 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
1262
1263 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1264 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1265
1266 err = hl_poll_timeout(
1267 hdev,
1268 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1269 status,
1270 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1271 1000,
1272 GOYA_CPU_TIMEOUT_USEC);
1273
1274 if (err) {
1275 dev_err(hdev->dev,
1276 "Failed to communicate with ARM CPU (ArmCP timeout)\n");
1277 return -EIO;
1278 }
1279
1280 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1281 return 0;
1282}
1283
1284static void goya_set_pll_refclk(struct hl_device *hdev)
1285{
1286 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1287 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1288 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1289 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1290
1291 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1292 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1293 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1294 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1295
1296 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1297 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1298 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1299 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1300
1301 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1302 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1303 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1304 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1305
1306 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1307 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1308 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1309 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1310
1311 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1312 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1313 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1314 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1315
1316 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1317 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1318 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1319 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1320}
1321
1322static void goya_disable_clk_rlx(struct hl_device *hdev)
1323{
1324 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1325 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1326}
1327
1328static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1329{
1330 u64 tpc_eml_address;
1331 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1332 int err, slm_index;
1333
1334 tpc_offset = tpc_id * 0x40000;
1335 tpc_eml_offset = tpc_id * 0x200000;
1336 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1337 tpc_slm_offset = tpc_eml_address + 0x100000;
1338
1339 /*
1340 * Workaround for Bug H2 #2443 :
1341 * "TPC SB is not initialized on chip reset"
1342 */
1343
1344 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1345 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1346 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1347 tpc_id);
1348
1349 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1350
1351 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1352 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1353 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1354 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1355 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1356 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1357 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1358 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1359 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1360 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1361
1362 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1363 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1364
1365 err = hl_poll_timeout(
1366 hdev,
1367 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1368 val,
1369 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1370 1000,
1371 HL_DEVICE_TIMEOUT_USEC);
1372
1373 if (err)
1374 dev_err(hdev->dev,
1375 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1376
1377 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1378 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1379
1380 msleep(GOYA_RESET_WAIT_MSEC);
1381
1382 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1383 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1384
1385 msleep(GOYA_RESET_WAIT_MSEC);
1386
1387 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1388 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1389
1390 val = RREG32(tpc_slm_offset);
1391}
1392
1393static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1394{
1395 struct goya_device *goya = hdev->asic_specific;
1396 int i;
1397
1398 if (hdev->pldm)
1399 return;
1400
1401 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1402 return;
1403
1404 /* Workaround for H2 #2443 */
1405
1406 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1407 _goya_tpc_mbist_workaround(hdev, i);
1408
1409 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1410}
1411
1412/*
1413 * goya_init_golden_registers - Initialize golden registers
1414 *
1415 * @hdev: pointer to hl_device structure
1416 *
1417 * Initialize the H/W registers of the device
1418 *
1419 */
1420static void goya_init_golden_registers(struct hl_device *hdev)
1421{
1422 struct goya_device *goya = hdev->asic_specific;
1423 u32 polynom[10], tpc_intr_mask, offset;
1424 int i;
1425
1426 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1427 return;
1428
1429 polynom[0] = 0x00020080;
1430 polynom[1] = 0x00401000;
1431 polynom[2] = 0x00200800;
1432 polynom[3] = 0x00002000;
1433 polynom[4] = 0x00080200;
1434 polynom[5] = 0x00040100;
1435 polynom[6] = 0x00100400;
1436 polynom[7] = 0x00004000;
1437 polynom[8] = 0x00010000;
1438 polynom[9] = 0x00008000;
1439
1440 /* Mask all arithmetic interrupts from TPC */
1441 tpc_intr_mask = 0x7FFF;
1442
1443 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1444 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1445 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1446 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1447 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1448 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1449
1450 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1451 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1452 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1453 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1454 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1455
1456
1457 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1458 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1459 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1460 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1461 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1462
1463 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1464 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1465 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1466 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1467 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1468
1469 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1470 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1471 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1472 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1473 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1474
1475 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1476 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1477 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1478 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1479 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1480 }
1481
1482 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1483 WREG32(mmMME_AGU, 0x0f0f0f10);
1484 WREG32(mmMME_SEI_MASK, ~0x0);
1485
1486 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1487 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1488 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1489 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1490 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1491 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1492 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1493 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1494 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1495 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1496 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1497 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1498 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1499 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1500 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1501 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1502 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1503 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1504 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1505 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1506 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1507 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1508 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1509 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1510 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1511 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1512 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1513 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1514 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1515 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1516 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1517 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1518 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1519 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1520 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1521 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1522 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1523 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1524 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1525 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1526 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1527 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1528 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1529 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1530 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1531 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1532 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1533 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1534 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1535 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1536 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1537 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1538 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1539 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1540 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1541 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1542 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1543 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1544 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1545 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1546 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1547 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1548 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1549 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1550 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1551 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1552 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1553 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1554 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1555 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1556 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1557 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1558 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1559 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1560 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1561 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1562 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1563 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1564 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1565 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1566 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1567 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1568 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1569 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1570
1571 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1572 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1573 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1574 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1575 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1576 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1577 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1578 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1579 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1580 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1581 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1582 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1583
1584 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1585 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1586 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1587 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1588 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1589 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1590 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1591 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1592 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1593 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1594 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1595 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1596
1597 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1598 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1599 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1600 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1601 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1602 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1603 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1604 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1605 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1606 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1607 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1608 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1609
1610 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1611 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1612 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1613 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1614 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1615 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1616 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1617 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1618 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1619 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1620 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1621 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1622
1623 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1624 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1625 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1626 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1627 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1628 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1629 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1630 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1631 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1632 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1633 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1634 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1635
1636 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1637 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1638 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1639 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1640 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1641 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1642 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1643 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1644 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1645 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1646 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1647 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1648
1649 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1650 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1651 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1652 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1653 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1654 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1655 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1656
1657 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1658 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1659 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1660 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1661 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1662 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1663 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1664 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1665
1666 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1667 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1668 }
1669
1670 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1671 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1672 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1673 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1674 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1675 }
1676
1677 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1678 /*
1679 * Workaround for Bug H2 #2441 :
1680 * "ST.NOP set trace event illegal opcode"
1681 */
1682 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1683
1684 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1685 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1686 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1687 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1688 }
1689
1690 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1691 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1692 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1693
1694 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1695 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1696 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1697
1698 /*
1699 * Workaround for H2 #HW-23 bug
1700 * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
1701 * to 16 on KMD DMA
1702 * We need to limit only these DMAs because the user can only read
1703 * from Host using DMA CH 1
1704 */
1705 WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
1706 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1707
1708 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1709}
1710
1711static void goya_init_mme_qman(struct hl_device *hdev)
1712{
1713 u32 mtr_base_lo, mtr_base_hi;
1714 u32 so_base_lo, so_base_hi;
1715 u32 gic_base_lo, gic_base_hi;
1716 u64 qman_base_addr;
1717
1718 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1719 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1720 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1721 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1722
1723 gic_base_lo =
1724 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1725 gic_base_hi =
1726 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1727
1728 qman_base_addr = hdev->asic_prop.sram_base_address +
1729 MME_QMAN_BASE_OFFSET;
1730
1731 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1732 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1733 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1734 WREG32(mmMME_QM_PQ_PI, 0);
1735 WREG32(mmMME_QM_PQ_CI, 0);
1736 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1737 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1738 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1739 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1740
1741 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1742 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1743 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1744 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1745
1746 /* QMAN CQ has 8 cache lines */
1747 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1748
1749 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1750 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1751
1752 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1753
1754 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1755
1756 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1757
1758 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1759}
1760
1761static void goya_init_mme_cmdq(struct hl_device *hdev)
1762{
1763 u32 mtr_base_lo, mtr_base_hi;
1764 u32 so_base_lo, so_base_hi;
1765 u32 gic_base_lo, gic_base_hi;
1766 u64 qman_base_addr;
1767
1768 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1769 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1770 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1771 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1772
1773 gic_base_lo =
1774 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1775 gic_base_hi =
1776 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1777
1778 qman_base_addr = hdev->asic_prop.sram_base_address +
1779 MME_QMAN_BASE_OFFSET;
1780
1781 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1782 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1783 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1784 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1785
1786 /* CMDQ CQ has 20 cache lines */
1787 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1788
1789 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1790 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1791
1792 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1793
1794 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1795
1796 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1797
1798 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1799}
1800
1801static void goya_init_mme_qmans(struct hl_device *hdev)
1802{
1803 struct goya_device *goya = hdev->asic_specific;
1804 u32 so_base_lo, so_base_hi;
1805
1806 if (goya->hw_cap_initialized & HW_CAP_MME)
1807 return;
1808
1809 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1810 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1811
1812 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1813 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1814
1815 goya_init_mme_qman(hdev);
1816 goya_init_mme_cmdq(hdev);
1817
1818 goya->hw_cap_initialized |= HW_CAP_MME;
1819}
1820
1821static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1822{
1823 u32 mtr_base_lo, mtr_base_hi;
1824 u32 so_base_lo, so_base_hi;
1825 u32 gic_base_lo, gic_base_hi;
1826 u64 qman_base_addr;
1827 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1828
1829 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1830 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1831 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1832 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1833
1834 gic_base_lo =
1835 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1836 gic_base_hi =
1837 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1838
1839 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1840
1841 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1842 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1843 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1844 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1845 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1846 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1847 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1848 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1849 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1850
1851 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1852 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1853 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1854 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1855
1856 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1857
1858 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1859 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1860
1861 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1862 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1863
1864 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1865
1866 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1867
1868 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1869}
1870
1871static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1872{
1873 u32 mtr_base_lo, mtr_base_hi;
1874 u32 so_base_lo, so_base_hi;
1875 u32 gic_base_lo, gic_base_hi;
1876 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1877
1878 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1879 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1880 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1881 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1882
1883 gic_base_lo =
1884 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1885 gic_base_hi =
1886 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1887
1888 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1889 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1890 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1891 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1892
1893 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1894
1895 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1896 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1897
1898 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1899 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1900
1901 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1902
1903 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1904
1905 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1906}
1907
1908static void goya_init_tpc_qmans(struct hl_device *hdev)
1909{
1910 struct goya_device *goya = hdev->asic_specific;
1911 u32 so_base_lo, so_base_hi;
1912 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1913 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1914 int i;
1915
1916 if (goya->hw_cap_initialized & HW_CAP_TPC)
1917 return;
1918
1919 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1920 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1921
1922 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1923 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1924 so_base_lo);
1925 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1926 so_base_hi);
1927 }
1928
1929 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1930 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1931 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1932 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1933 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1934 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1935 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1936 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1937
1938 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1939 goya_init_tpc_cmdq(hdev, i);
1940
1941 goya->hw_cap_initialized |= HW_CAP_TPC;
1942}
1943
1944/*
1945 * goya_disable_internal_queues - Disable internal queues
1946 *
1947 * @hdev: pointer to hl_device structure
1948 *
1949 */
1950static void goya_disable_internal_queues(struct hl_device *hdev)
1951{
1952 WREG32(mmMME_QM_GLBL_CFG0, 0);
1953 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1954
1955 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1956 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1957
1958 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1959 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1960
1961 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1962 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1963
1964 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1965 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1966
1967 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1968 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1969
1970 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1971 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1972
1973 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1974 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1975
1976 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1977 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1978}
1979
1980/*
1981 * goya_stop_internal_queues - Stop internal queues
1982 *
1983 * @hdev: pointer to hl_device structure
1984 *
1985 * Returns 0 on success
1986 *
1987 */
1988static int goya_stop_internal_queues(struct hl_device *hdev)
1989{
1990 int rc, retval = 0;
1991
1992 /*
1993 * Each queue (QMAN) is a separate H/W logic. That means that each
1994 * QMAN can be stopped independently and failure to stop one does NOT
1995 * mandate we should not try to stop other QMANs
1996 */
1997
1998 rc = goya_stop_queue(hdev,
1999 mmMME_QM_GLBL_CFG1,
2000 mmMME_QM_CP_STS,
2001 mmMME_QM_GLBL_STS0);
2002
2003 if (rc) {
2004 dev_err(hdev->dev, "failed to stop MME QMAN\n");
2005 retval = -EIO;
2006 }
2007
2008 rc = goya_stop_queue(hdev,
2009 mmMME_CMDQ_GLBL_CFG1,
2010 mmMME_CMDQ_CP_STS,
2011 mmMME_CMDQ_GLBL_STS0);
2012
2013 if (rc) {
2014 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2015 retval = -EIO;
2016 }
2017
2018 rc = goya_stop_queue(hdev,
2019 mmTPC0_QM_GLBL_CFG1,
2020 mmTPC0_QM_CP_STS,
2021 mmTPC0_QM_GLBL_STS0);
2022
2023 if (rc) {
2024 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2025 retval = -EIO;
2026 }
2027
2028 rc = goya_stop_queue(hdev,
2029 mmTPC0_CMDQ_GLBL_CFG1,
2030 mmTPC0_CMDQ_CP_STS,
2031 mmTPC0_CMDQ_GLBL_STS0);
2032
2033 if (rc) {
2034 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2035 retval = -EIO;
2036 }
2037
2038 rc = goya_stop_queue(hdev,
2039 mmTPC1_QM_GLBL_CFG1,
2040 mmTPC1_QM_CP_STS,
2041 mmTPC1_QM_GLBL_STS0);
2042
2043 if (rc) {
2044 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2045 retval = -EIO;
2046 }
2047
2048 rc = goya_stop_queue(hdev,
2049 mmTPC1_CMDQ_GLBL_CFG1,
2050 mmTPC1_CMDQ_CP_STS,
2051 mmTPC1_CMDQ_GLBL_STS0);
2052
2053 if (rc) {
2054 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2055 retval = -EIO;
2056 }
2057
2058 rc = goya_stop_queue(hdev,
2059 mmTPC2_QM_GLBL_CFG1,
2060 mmTPC2_QM_CP_STS,
2061 mmTPC2_QM_GLBL_STS0);
2062
2063 if (rc) {
2064 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2065 retval = -EIO;
2066 }
2067
2068 rc = goya_stop_queue(hdev,
2069 mmTPC2_CMDQ_GLBL_CFG1,
2070 mmTPC2_CMDQ_CP_STS,
2071 mmTPC2_CMDQ_GLBL_STS0);
2072
2073 if (rc) {
2074 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2075 retval = -EIO;
2076 }
2077
2078 rc = goya_stop_queue(hdev,
2079 mmTPC3_QM_GLBL_CFG1,
2080 mmTPC3_QM_CP_STS,
2081 mmTPC3_QM_GLBL_STS0);
2082
2083 if (rc) {
2084 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2085 retval = -EIO;
2086 }
2087
2088 rc = goya_stop_queue(hdev,
2089 mmTPC3_CMDQ_GLBL_CFG1,
2090 mmTPC3_CMDQ_CP_STS,
2091 mmTPC3_CMDQ_GLBL_STS0);
2092
2093 if (rc) {
2094 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2095 retval = -EIO;
2096 }
2097
2098 rc = goya_stop_queue(hdev,
2099 mmTPC4_QM_GLBL_CFG1,
2100 mmTPC4_QM_CP_STS,
2101 mmTPC4_QM_GLBL_STS0);
2102
2103 if (rc) {
2104 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2105 retval = -EIO;
2106 }
2107
2108 rc = goya_stop_queue(hdev,
2109 mmTPC4_CMDQ_GLBL_CFG1,
2110 mmTPC4_CMDQ_CP_STS,
2111 mmTPC4_CMDQ_GLBL_STS0);
2112
2113 if (rc) {
2114 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2115 retval = -EIO;
2116 }
2117
2118 rc = goya_stop_queue(hdev,
2119 mmTPC5_QM_GLBL_CFG1,
2120 mmTPC5_QM_CP_STS,
2121 mmTPC5_QM_GLBL_STS0);
2122
2123 if (rc) {
2124 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2125 retval = -EIO;
2126 }
2127
2128 rc = goya_stop_queue(hdev,
2129 mmTPC5_CMDQ_GLBL_CFG1,
2130 mmTPC5_CMDQ_CP_STS,
2131 mmTPC5_CMDQ_GLBL_STS0);
2132
2133 if (rc) {
2134 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2135 retval = -EIO;
2136 }
2137
2138 rc = goya_stop_queue(hdev,
2139 mmTPC6_QM_GLBL_CFG1,
2140 mmTPC6_QM_CP_STS,
2141 mmTPC6_QM_GLBL_STS0);
2142
2143 if (rc) {
2144 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2145 retval = -EIO;
2146 }
2147
2148 rc = goya_stop_queue(hdev,
2149 mmTPC6_CMDQ_GLBL_CFG1,
2150 mmTPC6_CMDQ_CP_STS,
2151 mmTPC6_CMDQ_GLBL_STS0);
2152
2153 if (rc) {
2154 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2155 retval = -EIO;
2156 }
2157
2158 rc = goya_stop_queue(hdev,
2159 mmTPC7_QM_GLBL_CFG1,
2160 mmTPC7_QM_CP_STS,
2161 mmTPC7_QM_GLBL_STS0);
2162
2163 if (rc) {
2164 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2165 retval = -EIO;
2166 }
2167
2168 rc = goya_stop_queue(hdev,
2169 mmTPC7_CMDQ_GLBL_CFG1,
2170 mmTPC7_CMDQ_CP_STS,
2171 mmTPC7_CMDQ_GLBL_STS0);
2172
2173 if (rc) {
2174 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2175 retval = -EIO;
2176 }
2177
2178 return retval;
2179}
2180
2181static void goya_resume_internal_queues(struct hl_device *hdev)
2182{
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2185
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2188
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2191
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2194
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2197
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2200
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2203
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2206
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2209}
2210
2211static void goya_dma_stall(struct hl_device *hdev)
2212{
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2214 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2215 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2216 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2217 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2218}
2219
2220static void goya_tpc_stall(struct hl_device *hdev)
2221{
2222 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2223 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2224 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2225 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2226 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2227 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2228 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2229 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2230}
2231
2232static void goya_mme_stall(struct hl_device *hdev)
2233{
2234 WREG32(mmMME_STALL, 0xFFFFFFFF);
2235}
2236
2237static int goya_enable_msix(struct hl_device *hdev)
2238{
2239 struct goya_device *goya = hdev->asic_specific;
2240 int cq_cnt = hdev->asic_prop.completion_queues_count;
2241 int rc, i, irq_cnt_init, irq;
2242
2243 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2244 return 0;
2245
2246 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2247 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2248 if (rc < 0) {
2249 dev_err(hdev->dev,
2250 "MSI-X: Failed to enable support -- %d/%d\n",
2251 GOYA_MSIX_ENTRIES, rc);
2252 return rc;
2253 }
2254
2255 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2256 irq = pci_irq_vector(hdev->pdev, i);
2257 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2258 &hdev->completion_queue[i]);
2259 if (rc) {
2260 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2261 goto free_irqs;
2262 }
2263 }
2264
2265 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2266
2267 rc = request_irq(irq, hl_irq_handler_eq, 0,
2268 goya_irq_name[EVENT_QUEUE_MSIX_IDX],
2269 &hdev->event_queue);
2270 if (rc) {
2271 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2272 goto free_irqs;
2273 }
2274
2275 goya->hw_cap_initialized |= HW_CAP_MSIX;
2276 return 0;
2277
2278free_irqs:
2279 for (i = 0 ; i < irq_cnt_init ; i++)
2280 free_irq(pci_irq_vector(hdev->pdev, i),
2281 &hdev->completion_queue[i]);
2282
2283 pci_free_irq_vectors(hdev->pdev);
2284 return rc;
2285}
2286
2287static void goya_sync_irqs(struct hl_device *hdev)
2288{
2289 struct goya_device *goya = hdev->asic_specific;
2290 int i;
2291
2292 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2293 return;
2294
2295 /* Wait for all pending IRQs to be finished */
2296 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2297 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2298
2299 synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
2300}
2301
2302static void goya_disable_msix(struct hl_device *hdev)
2303{
2304 struct goya_device *goya = hdev->asic_specific;
2305 int i, irq;
2306
2307 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2308 return;
2309
2310 goya_sync_irqs(hdev);
2311
2312 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2313 free_irq(irq, &hdev->event_queue);
2314
2315 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2316 irq = pci_irq_vector(hdev->pdev, i);
2317 free_irq(irq, &hdev->completion_queue[i]);
2318 }
2319
2320 pci_free_irq_vectors(hdev->pdev);
2321
2322 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2323}
2324
2325static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2326{
2327 u32 wait_timeout_ms, cpu_timeout_ms;
2328
2329 dev_info(hdev->dev,
2330 "Halting compute engines and disabling interrupts\n");
2331
2332 if (hdev->pldm) {
2333 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2334 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2335 } else {
2336 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2337 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2338 }
2339
2340 if (hard_reset) {
2341 /*
2342 * I don't know what is the state of the CPU so make sure it is
2343 * stopped in any means necessary
2344 */
2345 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2346 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2347 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2348 msleep(cpu_timeout_ms);
2349 }
2350
2351 goya_stop_external_queues(hdev);
2352 goya_stop_internal_queues(hdev);
2353
2354 msleep(wait_timeout_ms);
2355
2356 goya_dma_stall(hdev);
2357 goya_tpc_stall(hdev);
2358 goya_mme_stall(hdev);
2359
2360 msleep(wait_timeout_ms);
2361
2362 goya_disable_external_queues(hdev);
2363 goya_disable_internal_queues(hdev);
2364
2365 if (hard_reset)
2366 goya_disable_msix(hdev);
2367 else
2368 goya_sync_irqs(hdev);
2369}
2370
2371/*
2372 * goya_push_fw_to_device - Push FW code to device
2373 *
2374 * @hdev: pointer to hl_device structure
2375 *
2376 * Copy fw code from firmware file to device memory.
2377 * Returns 0 on success
2378 *
2379 */
2380static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
2381 void __iomem *dst)
2382{
2383 const struct firmware *fw;
2384 const u64 *fw_data;
2385 size_t fw_size, i;
2386 int rc;
2387
2388 rc = request_firmware(&fw, fw_name, hdev->dev);
2389
2390 if (rc) {
2391 dev_err(hdev->dev, "Failed to request %s\n", fw_name);
2392 goto out;
2393 }
2394
2395 fw_size = fw->size;
2396 if ((fw_size % 4) != 0) {
2397 dev_err(hdev->dev, "illegal %s firmware size %zu\n",
2398 fw_name, fw_size);
2399 rc = -EINVAL;
2400 goto out;
2401 }
2402
2403 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
2404
2405 fw_data = (const u64 *) fw->data;
2406
2407 if ((fw->size % 8) != 0)
2408 fw_size -= 8;
2409
2410 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
2411 if (!(i & (0x80000 - 1))) {
2412 dev_dbg(hdev->dev,
2413 "copied so far %zu out of %zu for %s firmware",
2414 i, fw_size, fw_name);
2415 usleep_range(20, 100);
2416 }
2417
2418 writeq(*fw_data, dst);
2419 }
2420
2421 if ((fw->size % 8) != 0)
2422 writel(*(const u32 *) fw_data, dst);
2423
2424out:
2425 release_firmware(fw);
2426 return rc;
2427}
2428
2429static int goya_pldm_init_cpu(struct hl_device *hdev)
2430{
2431 char fw_name[200];
2432 void __iomem *dst;
2433 u32 val, unit_rst_val;
2434 int rc;
2435
2436 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2437 goya_init_golden_registers(hdev);
2438
2439 /* Put ARM cores into reset */
2440 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2441 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2442
2443 /* Reset the CA53 MACRO */
2444 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2445 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2446 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2447 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2448 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2449
2450 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2451 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2452 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2453 if (rc)
2454 return rc;
2455
2456 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2457 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2458 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2459 if (rc)
2460 return rc;
2461
2462 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2463 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2464
2465 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2466 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2467 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2468 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2469
2470 /* Release ARM core 0 from reset */
2471 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2472 CPU_RESET_CORE0_DEASSERT);
2473 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2474
2475 return 0;
2476}
2477
2478/*
2479 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2480 * The version string should be located by that offset.
2481 */
2482static void goya_read_device_fw_version(struct hl_device *hdev,
2483 enum goya_fw_component fwc)
2484{
2485 const char *name;
2486 u32 ver_off;
2487 char *dest;
2488
2489 switch (fwc) {
2490 case FW_COMP_UBOOT:
2491 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2492 dest = hdev->asic_prop.uboot_ver;
2493 name = "U-Boot";
2494 break;
2495 case FW_COMP_PREBOOT:
2496 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2497 dest = hdev->asic_prop.preboot_ver;
2498 name = "Preboot";
2499 break;
2500 default:
2501 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2502 return;
2503 }
2504
2505 ver_off &= ~((u32)SRAM_BASE_ADDR);
2506
2507 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2508 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2509 VERSION_MAX_LEN);
2510 } else {
2511 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2512 name, ver_off);
2513 strcpy(dest, "unavailable");
2514 }
2515}
2516
2517static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2518{
2519 struct goya_device *goya = hdev->asic_specific;
2520 char fw_name[200];
2521 void __iomem *dst;
2522 u32 status;
2523 int rc;
2524
2525 if (!hdev->cpu_enable)
2526 return 0;
2527
2528 if (goya->hw_cap_initialized & HW_CAP_CPU)
2529 return 0;
2530
2531 /*
2532 * Before pushing u-boot/linux to device, need to set the ddr bar to
2533 * base address of dram
2534 */
2535 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2536 if (rc) {
2537 dev_err(hdev->dev,
2538 "failed to map DDR bar to DRAM base address\n");
2539 return rc;
2540 }
2541
2542 if (hdev->pldm) {
2543 rc = goya_pldm_init_cpu(hdev);
2544 if (rc)
2545 return rc;
2546
2547 goto out;
2548 }
2549
2550 /* Make sure CPU boot-loader is running */
2551 rc = hl_poll_timeout(
2552 hdev,
2553 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2554 status,
2555 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2556 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2557 10000,
2558 cpu_timeout);
2559
2560 if (rc) {
2561 dev_err(hdev->dev, "Error in ARM u-boot!");
2562 switch (status) {
2563 case CPU_BOOT_STATUS_NA:
2564 dev_err(hdev->dev,
2565 "ARM status %d - BTL did NOT run\n", status);
2566 break;
2567 case CPU_BOOT_STATUS_IN_WFE:
2568 dev_err(hdev->dev,
2569 "ARM status %d - Inside WFE loop\n", status);
2570 break;
2571 case CPU_BOOT_STATUS_IN_BTL:
2572 dev_err(hdev->dev,
2573 "ARM status %d - Stuck in BTL\n", status);
2574 break;
2575 case CPU_BOOT_STATUS_IN_PREBOOT:
2576 dev_err(hdev->dev,
2577 "ARM status %d - Stuck in Preboot\n", status);
2578 break;
2579 case CPU_BOOT_STATUS_IN_SPL:
2580 dev_err(hdev->dev,
2581 "ARM status %d - Stuck in SPL\n", status);
2582 break;
2583 case CPU_BOOT_STATUS_IN_UBOOT:
2584 dev_err(hdev->dev,
2585 "ARM status %d - Stuck in u-boot\n", status);
2586 break;
2587 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2588 dev_err(hdev->dev,
2589 "ARM status %d - DDR initialization failed\n",
2590 status);
2591 break;
2592 default:
2593 dev_err(hdev->dev,
2594 "ARM status %d - Invalid status code\n",
2595 status);
2596 break;
2597 }
2598 return -EIO;
2599 }
2600
2601 /* Read U-Boot version now in case we will later fail */
2602 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2603 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2604
2605 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2606 goto out;
2607
2608 if (!hdev->fw_loading) {
2609 dev_info(hdev->dev, "Skip loading FW\n");
2610 goto out;
2611 }
2612
2613 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2614 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2615 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2616 if (rc)
2617 return rc;
2618
2619 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2620
2621 rc = hl_poll_timeout(
2622 hdev,
2623 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2624 status,
2625 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2626 10000,
2627 cpu_timeout);
2628
2629 if (rc) {
2630 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2631 dev_err(hdev->dev,
2632 "ARM u-boot reports FIT image is corrupted\n");
2633 else
2634 dev_err(hdev->dev,
2635 "ARM Linux failed to load, %d\n", status);
2636 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2637 return -EIO;
2638 }
2639
2640 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2641
2642out:
2643 goya->hw_cap_initialized |= HW_CAP_CPU;
2644
2645 return 0;
2646}
2647
2648static int goya_mmu_init(struct hl_device *hdev)
2649{
2650 struct asic_fixed_properties *prop = &hdev->asic_prop;
2651 struct goya_device *goya = hdev->asic_specific;
2652 u64 hop0_addr;
2653 int rc, i;
2654
2655 if (!hdev->mmu_enable)
2656 return 0;
2657
2658 if (goya->hw_cap_initialized & HW_CAP_MMU)
2659 return 0;
2660
2661 hdev->dram_supports_virtual_memory = true;
2662 hdev->dram_default_page_mapping = true;
2663
2664 for (i = 0 ; i < prop->max_asid ; i++) {
2665 hop0_addr = prop->mmu_pgt_addr +
2666 (i * prop->mmu_hop_table_size);
2667
2668 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2669 if (rc) {
2670 dev_err(hdev->dev,
2671 "failed to set hop0 addr for asid %d\n", i);
2672 goto err;
2673 }
2674 }
2675
2676 goya->hw_cap_initialized |= HW_CAP_MMU;
2677
2678 /* init MMU cache manage page */
2679 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2680 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2681 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2682
2683 /* Remove follower feature due to performance bug */
2684 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2685 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2686
2687 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2688
2689 WREG32(mmMMU_MMU_ENABLE, 1);
2690 WREG32(mmMMU_SPI_MASK, 0xF);
2691
2692 return 0;
2693
2694err:
2695 return rc;
2696}
2697
2698/*
2699 * goya_hw_init - Goya hardware initialization code
2700 *
2701 * @hdev: pointer to hl_device structure
2702 *
2703 * Returns 0 on success
2704 *
2705 */
2706static int goya_hw_init(struct hl_device *hdev)
2707{
2708 struct asic_fixed_properties *prop = &hdev->asic_prop;
2709 u32 val;
2710 int rc;
2711
2712 dev_info(hdev->dev, "Starting initialization of H/W\n");
2713
2714 /* Perform read from the device to make sure device is up */
2715 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2716
2717 /*
2718 * Let's mark in the H/W that we have reached this point. We check
2719 * this value in the reset_before_init function to understand whether
2720 * we need to reset the chip before doing H/W init. This register is
2721 * cleared by the H/W upon H/W reset
2722 */
2723 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2724
2725 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2726 if (rc) {
2727 dev_err(hdev->dev, "failed to initialize CPU\n");
2728 return rc;
2729 }
2730
2731 goya_tpc_mbist_workaround(hdev);
2732
2733 goya_init_golden_registers(hdev);
2734
2735 /*
2736 * After CPU initialization is finished, change DDR bar mapping inside
2737 * iATU to point to the start address of the MMU page tables
2738 */
2739 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2740 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
2741 if (rc) {
2742 dev_err(hdev->dev,
2743 "failed to map DDR bar to MMU page tables\n");
2744 return rc;
2745 }
2746
2747 rc = goya_mmu_init(hdev);
2748 if (rc)
2749 return rc;
2750
2751 goya_init_security(hdev);
2752
2753 goya_init_dma_qmans(hdev);
2754
2755 goya_init_mme_qmans(hdev);
2756
2757 goya_init_tpc_qmans(hdev);
2758
2759 /* MSI-X must be enabled before CPU queues are initialized */
2760 rc = goya_enable_msix(hdev);
2761 if (rc)
2762 goto disable_queues;
2763
2764 rc = goya_init_cpu_queues(hdev);
2765 if (rc) {
2766 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
2767 rc);
2768 goto disable_msix;
2769 }
2770
2771 /* CPU initialization is finished, we can now move to 48 bit DMA mask */
2772 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2773 if (rc) {
2774 dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
2775 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2776 if (rc) {
2777 dev_err(hdev->dev,
2778 "Unable to set pci dma mask to 32 bits\n");
2779 goto disable_pci_access;
2780 }
2781 }
2782
2783 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2784 if (rc) {
2785 dev_warn(hdev->dev,
2786 "Unable to set pci consistent dma mask to 48 bits\n");
2787 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2788 if (rc) {
2789 dev_err(hdev->dev,
2790 "Unable to set pci consistent dma mask to 32 bits\n");
2791 goto disable_pci_access;
2792 }
2793 }
2794
2795 /* Perform read from the device to flush all MSI-X configuration */
2796 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2797
2798 return 0;
2799
2800disable_pci_access:
2801 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2802disable_msix:
2803 goya_disable_msix(hdev);
2804disable_queues:
2805 goya_disable_internal_queues(hdev);
2806 goya_disable_external_queues(hdev);
2807
2808 return rc;
2809}
2810
2811/*
2812 * goya_hw_fini - Goya hardware tear-down code
2813 *
2814 * @hdev: pointer to hl_device structure
2815 * @hard_reset: should we do hard reset to all engines or just reset the
2816 * compute/dma engines
2817 */
2818static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2819{
2820 struct goya_device *goya = hdev->asic_specific;
2821 u32 reset_timeout_ms, status;
2822
2823 if (hdev->pldm)
2824 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2825 else
2826 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2827
2828 if (hard_reset) {
2829 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2830 goya_disable_clk_rlx(hdev);
2831 goya_set_pll_refclk(hdev);
2832
2833 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2834 dev_info(hdev->dev,
2835 "Issued HARD reset command, going to wait %dms\n",
2836 reset_timeout_ms);
2837 } else {
2838 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2839 dev_info(hdev->dev,
2840 "Issued SOFT reset command, going to wait %dms\n",
2841 reset_timeout_ms);
2842 }
2843
2844 /*
2845 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2846 * itself is in reset. In either reset we need to wait until the reset
2847 * is deasserted
2848 */
2849 msleep(reset_timeout_ms);
2850
2851 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2852 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2853 dev_err(hdev->dev,
2854 "Timeout while waiting for device to reset 0x%x\n",
2855 status);
2856
2857 if (!hard_reset) {
2858 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2859 HW_CAP_GOLDEN | HW_CAP_TPC);
2860 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2861 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2862 return;
2863 }
2864
2865 /* Chicken bit to re-initiate boot sequencer flow */
2866 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2867 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2868 /* Move boot manager FSM to pre boot sequencer init state */
2869 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2870 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2871
2872 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2873 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2874 HW_CAP_DMA | HW_CAP_MME |
2875 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2876 HW_CAP_GOLDEN | HW_CAP_TPC);
2877 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2878
2879 if (!hdev->pldm) {
2880 int rc;
2881 /* In case we are running inside VM and the VM is
2882 * shutting down, we need to make sure CPU boot-loader
2883 * is running before we can continue the VM shutdown.
2884 * That is because the VM will send an FLR signal that
2885 * we must answer
2886 */
2887 dev_info(hdev->dev,
2888 "Going to wait up to %ds for CPU boot loader\n",
2889 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2890
2891 rc = hl_poll_timeout(
2892 hdev,
2893 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2894 status,
2895 (status == CPU_BOOT_STATUS_DRAM_RDY),
2896 10000,
2897 GOYA_CPU_TIMEOUT_USEC);
2898 if (rc)
2899 dev_err(hdev->dev,
2900 "failed to wait for CPU boot loader\n");
2901 }
2902}
2903
2904int goya_suspend(struct hl_device *hdev)
2905{
2906 int rc;
2907
2908 rc = goya_stop_internal_queues(hdev);
2909
2910 if (rc) {
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2912 return rc;
2913 }
2914
2915 rc = goya_stop_external_queues(hdev);
2916
2917 if (rc) {
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2919 return rc;
2920 }
2921
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2923 if (rc)
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2925
2926 return rc;
2927}
2928
2929int goya_resume(struct hl_device *hdev)
2930{
2931 int rc;
2932
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2935
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2937 if (rc)
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2939 return rc;
2940}
2941
2942static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2943 u64 kaddress, phys_addr_t paddress, u32 size)
2944{
2945 int rc;
2946
2947 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2948 VM_DONTCOPY | VM_NORESERVE;
2949
2950 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2951 size, vma->vm_page_prot);
2952 if (rc)
2953 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2954
2955 return rc;
2956}
2957
2958static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2959{
2960 u32 db_reg_offset, db_value;
2961 bool invalid_queue = false;
2962
2963 switch (hw_queue_id) {
2964 case GOYA_QUEUE_ID_DMA_0:
2965 db_reg_offset = mmDMA_QM_0_PQ_PI;
2966 break;
2967
2968 case GOYA_QUEUE_ID_DMA_1:
2969 db_reg_offset = mmDMA_QM_1_PQ_PI;
2970 break;
2971
2972 case GOYA_QUEUE_ID_DMA_2:
2973 db_reg_offset = mmDMA_QM_2_PQ_PI;
2974 break;
2975
2976 case GOYA_QUEUE_ID_DMA_3:
2977 db_reg_offset = mmDMA_QM_3_PQ_PI;
2978 break;
2979
2980 case GOYA_QUEUE_ID_DMA_4:
2981 db_reg_offset = mmDMA_QM_4_PQ_PI;
2982 break;
2983
2984 case GOYA_QUEUE_ID_CPU_PQ:
2985 if (hdev->cpu_queues_enable)
2986 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2987 else
2988 invalid_queue = true;
2989 break;
2990
2991 case GOYA_QUEUE_ID_MME:
2992 db_reg_offset = mmMME_QM_PQ_PI;
2993 break;
2994
2995 case GOYA_QUEUE_ID_TPC0:
2996 db_reg_offset = mmTPC0_QM_PQ_PI;
2997 break;
2998
2999 case GOYA_QUEUE_ID_TPC1:
3000 db_reg_offset = mmTPC1_QM_PQ_PI;
3001 break;
3002
3003 case GOYA_QUEUE_ID_TPC2:
3004 db_reg_offset = mmTPC2_QM_PQ_PI;
3005 break;
3006
3007 case GOYA_QUEUE_ID_TPC3:
3008 db_reg_offset = mmTPC3_QM_PQ_PI;
3009 break;
3010
3011 case GOYA_QUEUE_ID_TPC4:
3012 db_reg_offset = mmTPC4_QM_PQ_PI;
3013 break;
3014
3015 case GOYA_QUEUE_ID_TPC5:
3016 db_reg_offset = mmTPC5_QM_PQ_PI;
3017 break;
3018
3019 case GOYA_QUEUE_ID_TPC6:
3020 db_reg_offset = mmTPC6_QM_PQ_PI;
3021 break;
3022
3023 case GOYA_QUEUE_ID_TPC7:
3024 db_reg_offset = mmTPC7_QM_PQ_PI;
3025 break;
3026
3027 default:
3028 invalid_queue = true;
3029 }
3030
3031 if (invalid_queue) {
3032 /* Should never get here */
3033 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
3034 hw_queue_id);
3035 return;
3036 }
3037
3038 db_value = pi;
3039
3040 /* ring the doorbell */
3041 WREG32(db_reg_offset, db_value);
3042
3043 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
3044 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
3045 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
3046}
3047
3048void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
3049{
3050 /* Not needed in Goya */
3051}
3052
3053static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
3054 dma_addr_t *dma_handle, gfp_t flags)
3055{
3056 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
3057}
3058
3059static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
3060 void *cpu_addr, dma_addr_t dma_handle)
3061{
3062 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
3063}
3064
3065void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3066 dma_addr_t *dma_handle, u16 *queue_len)
3067{
3068 void *base;
3069 u32 offset;
3070
3071 *dma_handle = hdev->asic_prop.sram_base_address;
3072
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
3074
3075 switch (queue_id) {
3076 case GOYA_QUEUE_ID_MME:
3077 offset = MME_QMAN_BASE_OFFSET;
3078 *queue_len = MME_QMAN_LENGTH;
3079 break;
3080 case GOYA_QUEUE_ID_TPC0:
3081 offset = TPC0_QMAN_BASE_OFFSET;
3082 *queue_len = TPC_QMAN_LENGTH;
3083 break;
3084 case GOYA_QUEUE_ID_TPC1:
3085 offset = TPC1_QMAN_BASE_OFFSET;
3086 *queue_len = TPC_QMAN_LENGTH;
3087 break;
3088 case GOYA_QUEUE_ID_TPC2:
3089 offset = TPC2_QMAN_BASE_OFFSET;
3090 *queue_len = TPC_QMAN_LENGTH;
3091 break;
3092 case GOYA_QUEUE_ID_TPC3:
3093 offset = TPC3_QMAN_BASE_OFFSET;
3094 *queue_len = TPC_QMAN_LENGTH;
3095 break;
3096 case GOYA_QUEUE_ID_TPC4:
3097 offset = TPC4_QMAN_BASE_OFFSET;
3098 *queue_len = TPC_QMAN_LENGTH;
3099 break;
3100 case GOYA_QUEUE_ID_TPC5:
3101 offset = TPC5_QMAN_BASE_OFFSET;
3102 *queue_len = TPC_QMAN_LENGTH;
3103 break;
3104 case GOYA_QUEUE_ID_TPC6:
3105 offset = TPC6_QMAN_BASE_OFFSET;
3106 *queue_len = TPC_QMAN_LENGTH;
3107 break;
3108 case GOYA_QUEUE_ID_TPC7:
3109 offset = TPC7_QMAN_BASE_OFFSET;
3110 *queue_len = TPC_QMAN_LENGTH;
3111 break;
3112 default:
3113 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3114 return NULL;
3115 }
3116
3117 base += offset;
3118 *dma_handle += offset;
3119
3120 return base;
3121}
3122
3123static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3124{
3125 struct goya_device *goya = hdev->asic_specific;
3126 struct packet_msg_prot *fence_pkt;
3127 u32 *fence_ptr;
3128 dma_addr_t fence_dma_addr;
3129 struct hl_cb *cb;
3130 u32 tmp, timeout;
3131 int rc;
3132
3133 if (hdev->pldm)
3134 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
3135 else
3136 timeout = HL_DEVICE_TIMEOUT_USEC;
3137
3138 if (!hdev->asic_funcs->is_device_idle(hdev)) {
3139 dev_err_ratelimited(hdev->dev,
3140 "Can't send KMD job on QMAN0 if device is not idle\n");
3141 return -EBUSY;
3142 }
3143
3144 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3145 &fence_dma_addr);
3146 if (!fence_ptr) {
3147 dev_err(hdev->dev,
3148 "Failed to allocate fence memory for QMAN0\n");
3149 return -ENOMEM;
3150 }
3151
3152 *fence_ptr = 0;
3153
3154 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3155 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
3156 RREG32(mmDMA_QM_0_GLBL_PROT);
3157 }
3158
3159 /*
3160 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
3161 * synchronized kernel jobs we only need space for 1 packet_msg_prot
3162 */
3163 job->job_cb_size -= sizeof(struct packet_msg_prot);
3164
3165 cb = job->patched_cb;
3166
3167 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
3168 job->job_cb_size - sizeof(struct packet_msg_prot));
3169
3170 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3171 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3172 (1 << GOYA_PKT_CTL_MB_SHIFT);
3173 fence_pkt->ctl = cpu_to_le32(tmp);
3174 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3175 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
3176 hdev->asic_prop.host_phys_base_address);
3177
3178 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3179 job->job_cb_size, cb->bus_address);
3180 if (rc) {
3181 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3182 goto free_fence_ptr;
3183 }
3184
3185 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
3186 &tmp);
3187
3188 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3189
3190 if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
3191 dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
3192 rc = -ETIMEDOUT;
3193 }
3194
3195free_fence_ptr:
3196 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3197 fence_dma_addr);
3198
3199 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3200 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
3201 RREG32(mmDMA_QM_0_GLBL_PROT);
3202 }
3203
3204 return rc;
3205}
3206
3207int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3208 u32 timeout, long *result)
3209{
3210 struct goya_device *goya = hdev->asic_specific;
3211 struct armcp_packet *pkt;
3212 dma_addr_t pkt_dma_addr;
3213 u32 tmp;
3214 int rc = 0;
3215
3216 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3217 if (result)
3218 *result = 0;
3219 return 0;
3220 }
3221
3222 if (len > CPU_CB_SIZE) {
3223 dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
3224 len);
3225 return -ENOMEM;
3226 }
3227
3228 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
3229 &pkt_dma_addr);
3230 if (!pkt) {
3231 dev_err(hdev->dev,
3232 "Failed to allocate DMA memory for packet to CPU\n");
3233 return -ENOMEM;
3234 }
3235
3236 memcpy(pkt, msg, len);
3237
3238 mutex_lock(&hdev->send_cpu_message_lock);
3239
3240 if (hdev->disabled)
3241 goto out;
3242
3243 if (hdev->device_cpu_disabled) {
3244 rc = -EIO;
3245 goto out;
3246 }
3247
3248 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
3249 pkt_dma_addr);
3250 if (rc) {
3251 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
3252 goto out;
3253 }
3254
3255 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
3256 timeout, &tmp);
3257
3258 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
3259
3260 if (rc == -ETIMEDOUT) {
3261 dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
3262 hdev->device_cpu_disabled = true;
3263 goto out;
3264 }
3265
3266 if (tmp == ARMCP_PACKET_FENCE_VAL) {
3267 u32 ctl = le32_to_cpu(pkt->ctl);
3268
3269 rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
3270 if (rc) {
3271 dev_err(hdev->dev,
3272 "F/W ERROR %d for CPU packet %d\n",
3273 rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
3274 >> ARMCP_PKT_CTL_OPCODE_SHIFT);
3275 rc = -EINVAL;
3276 } else if (result) {
3277 *result = (long) le64_to_cpu(pkt->result);
3278 }
3279 } else {
3280 dev_err(hdev->dev, "CPU packet wrong fence value\n");
3281 rc = -EINVAL;
3282 }
3283
3284out:
3285 mutex_unlock(&hdev->send_cpu_message_lock);
3286
3287 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
3288
3289 return rc;
3290}
3291
3292int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3293{
3294 struct packet_msg_prot *fence_pkt;
3295 dma_addr_t pkt_dma_addr;
3296 u32 fence_val, tmp;
3297 dma_addr_t fence_dma_addr;
3298 u32 *fence_ptr;
3299 int rc;
3300
3301 fence_val = GOYA_QMAN0_FENCE_VAL;
3302
3303 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3304 &fence_dma_addr);
3305 if (!fence_ptr) {
3306 dev_err(hdev->dev,
3307 "Failed to allocate memory for queue testing\n");
3308 return -ENOMEM;
3309 }
3310
3311 *fence_ptr = 0;
3312
3313 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
3314 sizeof(struct packet_msg_prot),
3315 GFP_KERNEL, &pkt_dma_addr);
3316 if (!fence_pkt) {
3317 dev_err(hdev->dev,
3318 "Failed to allocate packet for queue testing\n");
3319 rc = -ENOMEM;
3320 goto free_fence_ptr;
3321 }
3322
3323 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3324 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3325 (1 << GOYA_PKT_CTL_MB_SHIFT);
3326 fence_pkt->ctl = cpu_to_le32(tmp);
3327 fence_pkt->value = cpu_to_le32(fence_val);
3328 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
3329 hdev->asic_prop.host_phys_base_address);
3330
3331 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3332 sizeof(struct packet_msg_prot),
3333 pkt_dma_addr);
3334 if (rc) {
3335 dev_err(hdev->dev,
3336 "Failed to send fence packet\n");
3337 goto free_pkt;
3338 }
3339
3340 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
3341 GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
3342
3343 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3344
3345 if ((!rc) && (tmp == fence_val)) {
3346 dev_info(hdev->dev,
3347 "queue test on H/W queue %d succeeded\n",
3348 hw_queue_id);
3349 } else {
3350 dev_err(hdev->dev,
3351 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3352 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3353 rc = -EINVAL;
3354 }
3355
3356free_pkt:
3357 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
3358 pkt_dma_addr);
3359free_fence_ptr:
3360 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3361 fence_dma_addr);
3362 return rc;
3363}
3364
3365int goya_test_cpu_queue(struct hl_device *hdev)
3366{
3367 struct armcp_packet test_pkt;
3368 long result;
3369 int rc;
3370
3371 /* cpu_queues_enable flag is always checked in send cpu message */
3372
3373 memset(&test_pkt, 0, sizeof(test_pkt));
3374
3375 test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
3376 ARMCP_PKT_CTL_OPCODE_SHIFT);
3377 test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
3378
3379 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
3380 sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
3381
3382 if (!rc) {
3383 if (result == ARMCP_PACKET_FENCE_VAL)
3384 dev_info(hdev->dev,
3385 "queue test on CPU queue succeeded\n");
3386 else
3387 dev_err(hdev->dev,
3388 "CPU queue test failed (0x%08lX)\n", result);
3389 } else {
3390 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
3391 }
3392
3393 return rc;
3394}
3395
3396static int goya_test_queues(struct hl_device *hdev)
3397{
3398 struct goya_device *goya = hdev->asic_specific;
3399 int i, rc, ret_val = 0;
3400
3401 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3402 rc = goya_test_queue(hdev, i);
3403 if (rc)
3404 ret_val = -EINVAL;
3405 }
3406
3407 if (hdev->cpu_queues_enable) {
3408 rc = goya->test_cpu_queue(hdev);
3409 if (rc)
3410 ret_val = -EINVAL;
3411 }
3412
3413 return ret_val;
3414}
3415
3416static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3417 gfp_t mem_flags, dma_addr_t *dma_handle)
3418{
3419 if (size > GOYA_DMA_POOL_BLK_SIZE)
3420 return NULL;
3421
3422 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3423}
3424
3425static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3426 dma_addr_t dma_addr)
3427{
3428 dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
3429}
3430
3431static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
3432 size_t size, dma_addr_t *dma_handle)
3433{
3434 u64 kernel_addr;
3435
3436 /* roundup to CPU_PKT_SIZE */
3437 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3438
3439 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
3440
3441 *dma_handle = hdev->cpu_accessible_dma_address +
3442 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
3443
3444 return (void *) (uintptr_t) kernel_addr;
3445}
3446
3447static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
3448 size_t size, void *vaddr)
3449{
3450 /* roundup to CPU_PKT_SIZE */
3451 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3452
3453 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
3454 size);
3455}
3456
3457static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
3458 int nents, enum dma_data_direction dir)
3459{
3460 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
3461 return -ENOMEM;
3462
3463 return 0;
3464}
3465
3466static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
3467 int nents, enum dma_data_direction dir)
3468{
3469 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
3470}
3471
3472u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3473{
3474 struct scatterlist *sg, *sg_next_iter;
3475 u32 count, dma_desc_cnt;
3476 u64 len, len_next;
3477 dma_addr_t addr, addr_next;
3478
3479 dma_desc_cnt = 0;
3480
3481 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3482
3483 len = sg_dma_len(sg);
3484 addr = sg_dma_address(sg);
3485
3486 if (len == 0)
3487 break;
3488
3489 while ((count + 1) < sgt->nents) {
3490 sg_next_iter = sg_next(sg);
3491 len_next = sg_dma_len(sg_next_iter);
3492 addr_next = sg_dma_address(sg_next_iter);
3493
3494 if (len_next == 0)
3495 break;
3496
3497 if ((addr + len == addr_next) &&
3498 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3499 len += len_next;
3500 count++;
3501 sg = sg_next_iter;
3502 } else {
3503 break;
3504 }
3505 }
3506
3507 dma_desc_cnt++;
3508 }
3509
3510 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3511}
3512
3513static int goya_pin_memory_before_cs(struct hl_device *hdev,
3514 struct hl_cs_parser *parser,
3515 struct packet_lin_dma *user_dma_pkt,
3516 u64 addr, enum dma_data_direction dir)
3517{
3518 struct hl_userptr *userptr;
3519 int rc;
3520
3521 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3522 parser->job_userptr_list, &userptr))
3523 goto already_pinned;
3524
3525 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3526 if (!userptr)
3527 return -ENOMEM;
3528
3529 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3530 userptr);
3531 if (rc)
3532 goto free_userptr;
3533
3534 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3535
3536 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3537 userptr->sgt->nents, dir);
3538 if (rc) {
3539 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3540 goto unpin_memory;
3541 }
3542
3543 userptr->dma_mapped = true;
3544 userptr->dir = dir;
3545
3546already_pinned:
3547 parser->patched_cb_size +=
3548 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3549
3550 return 0;
3551
3552unpin_memory:
3553 hl_unpin_host_memory(hdev, userptr);
3554free_userptr:
3555 kfree(userptr);
3556 return rc;
3557}
3558
3559static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3560 struct hl_cs_parser *parser,
3561 struct packet_lin_dma *user_dma_pkt)
3562{
3563 u64 device_memory_addr, addr;
3564 enum dma_data_direction dir;
3565 enum goya_dma_direction user_dir;
3566 bool sram_addr = true;
3567 bool skip_host_mem_pin = false;
3568 bool user_memset;
3569 u32 ctl;
3570 int rc = 0;
3571
3572 ctl = le32_to_cpu(user_dma_pkt->ctl);
3573
3574 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3575 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3576
3577 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3578 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3579
3580 switch (user_dir) {
3581 case DMA_HOST_TO_DRAM:
3582 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3583 dir = DMA_TO_DEVICE;
3584 sram_addr = false;
3585 addr = le64_to_cpu(user_dma_pkt->src_addr);
3586 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3587 if (user_memset)
3588 skip_host_mem_pin = true;
3589 break;
3590
3591 case DMA_DRAM_TO_HOST:
3592 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3593 dir = DMA_FROM_DEVICE;
3594 sram_addr = false;
3595 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3596 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3597 break;
3598
3599 case DMA_HOST_TO_SRAM:
3600 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3601 dir = DMA_TO_DEVICE;
3602 addr = le64_to_cpu(user_dma_pkt->src_addr);
3603 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3604 if (user_memset)
3605 skip_host_mem_pin = true;
3606 break;
3607
3608 case DMA_SRAM_TO_HOST:
3609 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3610 dir = DMA_FROM_DEVICE;
3611 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3612 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3613 break;
3614 default:
3615 dev_err(hdev->dev, "DMA direction is undefined\n");
3616 return -EFAULT;
3617 }
3618
3619 if (parser->ctx_id != HL_KERNEL_ASID_ID) {
3620 if (sram_addr) {
3621 if (!hl_mem_area_inside_range(device_memory_addr,
3622 le32_to_cpu(user_dma_pkt->tsize),
3623 hdev->asic_prop.sram_user_base_address,
3624 hdev->asic_prop.sram_end_address)) {
3625
3626 dev_err(hdev->dev,
3627 "SRAM address 0x%llx + 0x%x is invalid\n",
3628 device_memory_addr,
3629 user_dma_pkt->tsize);
3630 return -EFAULT;
3631 }
3632 } else {
3633 if (!hl_mem_area_inside_range(device_memory_addr,
3634 le32_to_cpu(user_dma_pkt->tsize),
3635 hdev->asic_prop.dram_user_base_address,
3636 hdev->asic_prop.dram_end_address)) {
3637
3638 dev_err(hdev->dev,
3639 "DRAM address 0x%llx + 0x%x is invalid\n",
3640 device_memory_addr,
3641 user_dma_pkt->tsize);
3642 return -EFAULT;
3643 }
3644 }
3645 }
3646
3647 if (skip_host_mem_pin)
3648 parser->patched_cb_size += sizeof(*user_dma_pkt);
3649 else {
3650 if ((dir == DMA_TO_DEVICE) &&
3651 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3652 dev_err(hdev->dev,
3653 "Can't DMA from host on queue other then 1\n");
3654 return -EFAULT;
3655 }
3656
3657 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3658 addr, dir);
3659 }
3660
3661 return rc;
3662}
3663
3664static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3665 struct hl_cs_parser *parser,
3666 struct packet_lin_dma *user_dma_pkt)
3667{
3668 u64 sram_memory_addr, dram_memory_addr;
3669 enum goya_dma_direction user_dir;
3670 u32 ctl;
3671
3672 ctl = le32_to_cpu(user_dma_pkt->ctl);
3673 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3674 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3675
3676 if (user_dir == DMA_DRAM_TO_SRAM) {
3677 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3678 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3679 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3680 } else {
3681 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3682 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3683 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3684 }
3685
3686 if (!hl_mem_area_inside_range(sram_memory_addr,
3687 le32_to_cpu(user_dma_pkt->tsize),
3688 hdev->asic_prop.sram_user_base_address,
3689 hdev->asic_prop.sram_end_address)) {
3690 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3691 sram_memory_addr, user_dma_pkt->tsize);
3692 return -EFAULT;
3693 }
3694
3695 if (!hl_mem_area_inside_range(dram_memory_addr,
3696 le32_to_cpu(user_dma_pkt->tsize),
3697 hdev->asic_prop.dram_user_base_address,
3698 hdev->asic_prop.dram_end_address)) {
3699 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3700 dram_memory_addr, user_dma_pkt->tsize);
3701 return -EFAULT;
3702 }
3703
3704 parser->patched_cb_size += sizeof(*user_dma_pkt);
3705
3706 return 0;
3707}
3708
3709static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3710 struct hl_cs_parser *parser,
3711 struct packet_lin_dma *user_dma_pkt)
3712{
3713 enum goya_dma_direction user_dir;
3714 u32 ctl;
3715 int rc;
3716
3717 dev_dbg(hdev->dev, "DMA packet details:\n");
3718 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3719 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3720 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3721
3722 ctl = le32_to_cpu(user_dma_pkt->ctl);
3723 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3724 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3725
3726 /*
3727 * Special handling for DMA with size 0. The H/W has a bug where
3728 * this can cause the QMAN DMA to get stuck, so block it here.
3729 */
3730 if (user_dma_pkt->tsize == 0) {
3731 dev_err(hdev->dev,
3732 "Got DMA with size 0, might reset the device\n");
3733 return -EINVAL;
3734 }
3735
3736 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3737 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3738 else
3739 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3740
3741 return rc;
3742}
3743
3744static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3745 struct hl_cs_parser *parser,
3746 struct packet_lin_dma *user_dma_pkt)
3747{
3748 dev_dbg(hdev->dev, "DMA packet details:\n");
3749 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3750 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3751 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3752
3753 /*
3754 * WA for HW-23.
3755 * We can't allow user to read from Host using QMANs other than 1.
3756 */
3757 if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
3758 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3759 le32_to_cpu(user_dma_pkt->tsize),
3760 hdev->asic_prop.va_space_host_start_address,
3761 hdev->asic_prop.va_space_host_end_address)) {
3762 dev_err(hdev->dev,
3763 "Can't DMA from host on queue other then 1\n");
3764 return -EFAULT;
3765 }
3766
3767 if (user_dma_pkt->tsize == 0) {
3768 dev_err(hdev->dev,
3769 "Got DMA with size 0, might reset the device\n");
3770 return -EINVAL;
3771 }
3772
3773 parser->patched_cb_size += sizeof(*user_dma_pkt);
3774
3775 return 0;
3776}
3777
3778static int goya_validate_wreg32(struct hl_device *hdev,
3779 struct hl_cs_parser *parser,
3780 struct packet_wreg32 *wreg_pkt)
3781{
3782 struct goya_device *goya = hdev->asic_specific;
3783 u32 sob_start_addr, sob_end_addr;
3784 u16 reg_offset;
3785
3786 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3787 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3788
3789 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3790 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3791 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3792
3793 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3794 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3795 reg_offset);
3796 return -EPERM;
3797 }
3798
3799 /*
3800 * With MMU, DMA channels are not secured, so it doesn't matter where
3801 * the WR COMP will be written to because it will go out with
3802 * non-secured property
3803 */
3804 if (goya->hw_cap_initialized & HW_CAP_MMU)
3805 return 0;
3806
3807 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3808 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3809
3810 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3811 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3812
3813 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3814 wreg_pkt->value);
3815 return -EPERM;
3816 }
3817
3818 return 0;
3819}
3820
3821static int goya_validate_cb(struct hl_device *hdev,
3822 struct hl_cs_parser *parser, bool is_mmu)
3823{
3824 u32 cb_parsed_length = 0;
3825 int rc = 0;
3826
3827 parser->patched_cb_size = 0;
3828
3829 /* cb_user_size is more than 0 so loop will always be executed */
3830 while (cb_parsed_length < parser->user_cb_size) {
3831 enum packet_id pkt_id;
3832 u16 pkt_size;
3833 void *user_pkt;
3834
3835 user_pkt = (void *) (uintptr_t)
3836 (parser->user_cb->kernel_address + cb_parsed_length);
3837
3838 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3839 PACKET_HEADER_PACKET_ID_MASK) >>
3840 PACKET_HEADER_PACKET_ID_SHIFT);
3841
3842 pkt_size = goya_packet_sizes[pkt_id];
3843 cb_parsed_length += pkt_size;
3844 if (cb_parsed_length > parser->user_cb_size) {
3845 dev_err(hdev->dev,
3846 "packet 0x%x is out of CB boundary\n", pkt_id);
3847 rc = -EINVAL;
3848 break;
3849 }
3850
3851 switch (pkt_id) {
3852 case PACKET_WREG_32:
3853 /*
3854 * Although it is validated after copy in patch_cb(),
3855 * need to validate here as well because patch_cb() is
3856 * not called in MMU path while this function is called
3857 */
3858 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3859 break;
3860
3861 case PACKET_WREG_BULK:
3862 dev_err(hdev->dev,
3863 "User not allowed to use WREG_BULK\n");
3864 rc = -EPERM;
3865 break;
3866
3867 case PACKET_MSG_PROT:
3868 dev_err(hdev->dev,
3869 "User not allowed to use MSG_PROT\n");
3870 rc = -EPERM;
3871 break;
3872
3873 case PACKET_CP_DMA:
3874 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3875 rc = -EPERM;
3876 break;
3877
3878 case PACKET_STOP:
3879 dev_err(hdev->dev, "User not allowed to use STOP\n");
3880 rc = -EPERM;
3881 break;
3882
3883 case PACKET_LIN_DMA:
3884 if (is_mmu)
3885 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3886 user_pkt);
3887 else
3888 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3889 user_pkt);
3890 break;
3891
3892 case PACKET_MSG_LONG:
3893 case PACKET_MSG_SHORT:
3894 case PACKET_FENCE:
3895 case PACKET_NOP:
3896 parser->patched_cb_size += pkt_size;
3897 break;
3898
3899 default:
3900 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3901 pkt_id);
3902 rc = -EINVAL;
3903 break;
3904 }
3905
3906 if (rc)
3907 break;
3908 }
3909
3910 /*
3911 * The new CB should have space at the end for two MSG_PROT packets:
3912 * 1. A packet that will act as a completion packet
3913 * 2. A packet that will generate MSI-X interrupt
3914 */
3915 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3916
3917 return rc;
3918}
3919
3920static int goya_patch_dma_packet(struct hl_device *hdev,
3921 struct hl_cs_parser *parser,
3922 struct packet_lin_dma *user_dma_pkt,
3923 struct packet_lin_dma *new_dma_pkt,
3924 u32 *new_dma_pkt_size)
3925{
3926 struct hl_userptr *userptr;
3927 struct scatterlist *sg, *sg_next_iter;
3928 u32 count, dma_desc_cnt;
3929 u64 len, len_next;
3930 dma_addr_t dma_addr, dma_addr_next;
3931 enum goya_dma_direction user_dir;
3932 u64 device_memory_addr, addr;
3933 enum dma_data_direction dir;
3934 struct sg_table *sgt;
3935 bool skip_host_mem_pin = false;
3936 bool user_memset;
3937 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3938
3939 ctl = le32_to_cpu(user_dma_pkt->ctl);
3940
3941 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3942 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3943
3944 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3945 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3946
3947 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3948 (user_dma_pkt->tsize == 0)) {
3949 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3950 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3951 return 0;
3952 }
3953
3954 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3955 addr = le64_to_cpu(user_dma_pkt->src_addr);
3956 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3957 dir = DMA_TO_DEVICE;
3958 if (user_memset)
3959 skip_host_mem_pin = true;
3960 } else {
3961 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3962 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3963 dir = DMA_FROM_DEVICE;
3964 }
3965
3966 if ((!skip_host_mem_pin) &&
3967 (hl_userptr_is_pinned(hdev, addr,
3968 le32_to_cpu(user_dma_pkt->tsize),
3969 parser->job_userptr_list, &userptr) == false)) {
3970 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3971 addr, user_dma_pkt->tsize);
3972 return -EFAULT;
3973 }
3974
3975 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3976 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3977 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3978 return 0;
3979 }
3980
3981 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3982
3983 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3984
3985 sgt = userptr->sgt;
3986 dma_desc_cnt = 0;
3987
3988 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3989 len = sg_dma_len(sg);
3990 dma_addr = sg_dma_address(sg);
3991
3992 if (len == 0)
3993 break;
3994
3995 while ((count + 1) < sgt->nents) {
3996 sg_next_iter = sg_next(sg);
3997 len_next = sg_dma_len(sg_next_iter);
3998 dma_addr_next = sg_dma_address(sg_next_iter);
3999
4000 if (len_next == 0)
4001 break;
4002
4003 if ((dma_addr + len == dma_addr_next) &&
4004 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
4005 len += len_next;
4006 count++;
4007 sg = sg_next_iter;
4008 } else {
4009 break;
4010 }
4011 }
4012
4013 ctl = le32_to_cpu(user_dma_pkt->ctl);
4014 if (likely(dma_desc_cnt))
4015 ctl &= ~GOYA_PKT_CTL_EB_MASK;
4016 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
4017 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
4018 new_dma_pkt->ctl = cpu_to_le32(ctl);
4019 new_dma_pkt->tsize = cpu_to_le32((u32) len);
4020
4021 dma_addr += hdev->asic_prop.host_phys_base_address;
4022
4023 if (dir == DMA_TO_DEVICE) {
4024 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
4025 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
4026 } else {
4027 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
4028 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
4029 }
4030
4031 if (!user_memset)
4032 device_memory_addr += len;
4033 dma_desc_cnt++;
4034 new_dma_pkt++;
4035 }
4036
4037 if (!dma_desc_cnt) {
4038 dev_err(hdev->dev,
4039 "Error of 0 SG entries when patching DMA packet\n");
4040 return -EFAULT;
4041 }
4042
4043 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
4044 new_dma_pkt--;
4045 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
4046
4047 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
4048
4049 return 0;
4050}
4051
4052static int goya_patch_cb(struct hl_device *hdev,
4053 struct hl_cs_parser *parser)
4054{
4055 u32 cb_parsed_length = 0;
4056 u32 cb_patched_cur_length = 0;
4057 int rc = 0;
4058
4059 /* cb_user_size is more than 0 so loop will always be executed */
4060 while (cb_parsed_length < parser->user_cb_size) {
4061 enum packet_id pkt_id;
4062 u16 pkt_size;
4063 u32 new_pkt_size = 0;
4064 void *user_pkt, *kernel_pkt;
4065
4066 user_pkt = (void *) (uintptr_t)
4067 (parser->user_cb->kernel_address + cb_parsed_length);
4068 kernel_pkt = (void *) (uintptr_t)
4069 (parser->patched_cb->kernel_address +
4070 cb_patched_cur_length);
4071
4072 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
4073 PACKET_HEADER_PACKET_ID_MASK) >>
4074 PACKET_HEADER_PACKET_ID_SHIFT);
4075
4076 pkt_size = goya_packet_sizes[pkt_id];
4077 cb_parsed_length += pkt_size;
4078 if (cb_parsed_length > parser->user_cb_size) {
4079 dev_err(hdev->dev,
4080 "packet 0x%x is out of CB boundary\n", pkt_id);
4081 rc = -EINVAL;
4082 break;
4083 }
4084
4085 switch (pkt_id) {
4086 case PACKET_LIN_DMA:
4087 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
4088 kernel_pkt, &new_pkt_size);
4089 cb_patched_cur_length += new_pkt_size;
4090 break;
4091
4092 case PACKET_WREG_32:
4093 memcpy(kernel_pkt, user_pkt, pkt_size);
4094 cb_patched_cur_length += pkt_size;
4095 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
4096 break;
4097
4098 case PACKET_WREG_BULK:
4099 dev_err(hdev->dev,
4100 "User not allowed to use WREG_BULK\n");
4101 rc = -EPERM;
4102 break;
4103
4104 case PACKET_MSG_PROT:
4105 dev_err(hdev->dev,
4106 "User not allowed to use MSG_PROT\n");
4107 rc = -EPERM;
4108 break;
4109
4110 case PACKET_CP_DMA:
4111 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
4112 rc = -EPERM;
4113 break;
4114
4115 case PACKET_STOP:
4116 dev_err(hdev->dev, "User not allowed to use STOP\n");
4117 rc = -EPERM;
4118 break;
4119
4120 case PACKET_MSG_LONG:
4121 case PACKET_MSG_SHORT:
4122 case PACKET_FENCE:
4123 case PACKET_NOP:
4124 memcpy(kernel_pkt, user_pkt, pkt_size);
4125 cb_patched_cur_length += pkt_size;
4126 break;
4127
4128 default:
4129 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
4130 pkt_id);
4131 rc = -EINVAL;
4132 break;
4133 }
4134
4135 if (rc)
4136 break;
4137 }
4138
4139 return rc;
4140}
4141
4142static int goya_parse_cb_mmu(struct hl_device *hdev,
4143 struct hl_cs_parser *parser)
4144{
4145 u64 patched_cb_handle;
4146 u32 patched_cb_size;
4147 struct hl_cb *user_cb;
4148 int rc;
4149
4150 /*
4151 * The new CB should have space at the end for two MSG_PROT pkt:
4152 * 1. A packet that will act as a completion packet
4153 * 2. A packet that will generate MSI-X interrupt
4154 */
4155 parser->patched_cb_size = parser->user_cb_size +
4156 sizeof(struct packet_msg_prot) * 2;
4157
4158 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4159 parser->patched_cb_size,
4160 &patched_cb_handle, HL_KERNEL_ASID_ID);
4161
4162 if (rc) {
4163 dev_err(hdev->dev,
4164 "Failed to allocate patched CB for DMA CS %d\n",
4165 rc);
4166 return rc;
4167 }
4168
4169 patched_cb_handle >>= PAGE_SHIFT;
4170 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4171 (u32) patched_cb_handle);
4172 /* hl_cb_get should never fail here so use kernel WARN */
4173 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4174 (u32) patched_cb_handle);
4175 if (!parser->patched_cb) {
4176 rc = -EFAULT;
4177 goto out;
4178 }
4179
4180 /*
4181 * The check that parser->user_cb_size <= parser->user_cb->size was done
4182 * in validate_queue_index().
4183 */
4184 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
4185 (void *) (uintptr_t) parser->user_cb->kernel_address,
4186 parser->user_cb_size);
4187
4188 patched_cb_size = parser->patched_cb_size;
4189
4190 /* validate patched CB instead of user CB */
4191 user_cb = parser->user_cb;
4192 parser->user_cb = parser->patched_cb;
4193 rc = goya_validate_cb(hdev, parser, true);
4194 parser->user_cb = user_cb;
4195
4196 if (rc) {
4197 hl_cb_put(parser->patched_cb);
4198 goto out;
4199 }
4200
4201 if (patched_cb_size != parser->patched_cb_size) {
4202 dev_err(hdev->dev, "user CB size mismatch\n");
4203 hl_cb_put(parser->patched_cb);
4204 rc = -EINVAL;
4205 goto out;
4206 }
4207
4208out:
4209 /*
4210 * Always call cb destroy here because we still have 1 reference
4211 * to it by calling cb_get earlier. After the job will be completed,
4212 * cb_put will release it, but here we want to remove it from the
4213 * idr
4214 */
4215 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4216 patched_cb_handle << PAGE_SHIFT);
4217
4218 return rc;
4219}
4220
4221static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4222 struct hl_cs_parser *parser)
4223{
4224 u64 patched_cb_handle;
4225 int rc;
4226
4227 rc = goya_validate_cb(hdev, parser, false);
4228
4229 if (rc)
4230 goto free_userptr;
4231
4232 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4233 parser->patched_cb_size,
4234 &patched_cb_handle, HL_KERNEL_ASID_ID);
4235 if (rc) {
4236 dev_err(hdev->dev,
4237 "Failed to allocate patched CB for DMA CS %d\n", rc);
4238 goto free_userptr;
4239 }
4240
4241 patched_cb_handle >>= PAGE_SHIFT;
4242 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4243 (u32) patched_cb_handle);
4244 /* hl_cb_get should never fail here so use kernel WARN */
4245 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4246 (u32) patched_cb_handle);
4247 if (!parser->patched_cb) {
4248 rc = -EFAULT;
4249 goto out;
4250 }
4251
4252 rc = goya_patch_cb(hdev, parser);
4253
4254 if (rc)
4255 hl_cb_put(parser->patched_cb);
4256
4257out:
4258 /*
4259 * Always call cb destroy here because we still have 1 reference
4260 * to it by calling cb_get earlier. After the job will be completed,
4261 * cb_put will release it, but here we want to remove it from the
4262 * idr
4263 */
4264 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4265 patched_cb_handle << PAGE_SHIFT);
4266
4267free_userptr:
4268 if (rc)
4269 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4270 return rc;
4271}
4272
4273static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
4274 struct hl_cs_parser *parser)
4275{
4276 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4277 struct goya_device *goya = hdev->asic_specific;
4278
4279 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
4280 /* For internal queue jobs, just check if cb address is valid */
4281 if (hl_mem_area_inside_range(
4282 (u64) (uintptr_t) parser->user_cb,
4283 parser->user_cb_size,
4284 asic_prop->sram_user_base_address,
4285 asic_prop->sram_end_address))
4286 return 0;
4287
4288 if (hl_mem_area_inside_range(
4289 (u64) (uintptr_t) parser->user_cb,
4290 parser->user_cb_size,
4291 asic_prop->dram_user_base_address,
4292 asic_prop->dram_end_address))
4293 return 0;
4294
4295 dev_err(hdev->dev,
4296 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
4297 parser->user_cb, parser->user_cb_size);
4298
4299 return -EFAULT;
4300 }
4301
4302 return 0;
4303}
4304
4305int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4306{
4307 struct goya_device *goya = hdev->asic_specific;
4308
4309 if (!parser->ext_queue)
4310 return goya_parse_cb_no_ext_quque(hdev, parser);
4311
4312 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
4313 return goya_parse_cb_mmu(hdev, parser);
4314 else
4315 return goya_parse_cb_no_mmu(hdev, parser);
4316}
4317
4318void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
4319 u32 cq_val, u32 msix_vec)
4320{
4321 struct packet_msg_prot *cq_pkt;
4322 u32 tmp;
4323
4324 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
4325 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
4326
4327 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4328 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4329 (1 << GOYA_PKT_CTL_MB_SHIFT);
4330 cq_pkt->ctl = cpu_to_le32(tmp);
4331 cq_pkt->value = cpu_to_le32(cq_val);
4332 cq_pkt->addr = cpu_to_le64(cq_addr);
4333
4334 cq_pkt++;
4335
4336 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4337 (1 << GOYA_PKT_CTL_MB_SHIFT);
4338 cq_pkt->ctl = cpu_to_le32(tmp);
4339 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4340 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4341}
4342
4343static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4344{
4345 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
4346}
4347
4348static void goya_restore_phase_topology(struct hl_device *hdev)
4349{
4350 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4351
4352 num_of_sob_in_longs =
4353 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4354
4355 num_of_mon_in_longs =
4356 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4357
4358 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4359 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4360
4361 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4362 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4363
4364 /* Flush all WREG to prevent race */
4365 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4366}
4367
4368/*
4369 * goya_debugfs_read32 - read a 32bit value from a given device address
4370 *
4371 * @hdev: pointer to hl_device structure
4372 * @addr: address in device
4373 * @val: returned value
4374 *
4375 * In case of DDR address that is not mapped into the default aperture that
4376 * the DDR bar exposes, the function will configure the iATU so that the DDR
4377 * bar will be positioned at a base address that allows reading from the
4378 * required address. Configuring the iATU during normal operation can
4379 * lead to undefined behavior and therefore, should be done with extreme care
4380 *
4381 */
4382static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4383{
4384 struct asic_fixed_properties *prop = &hdev->asic_prop;
4385 int rc = 0;
4386
4387 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4388 *val = RREG32(addr - CFG_BASE);
4389
4390 } else if ((addr >= SRAM_BASE_ADDR) &&
4391 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4392
4393 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4394 (addr - SRAM_BASE_ADDR));
4395
4396 } else if ((addr >= DRAM_PHYS_BASE) &&
4397 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4398
4399 u64 bar_base_addr = DRAM_PHYS_BASE +
4400 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4401
4402 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4403 if (!rc) {
4404 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4405 (addr - bar_base_addr));
4406
4407 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4408 (MMU_PAGE_TABLES_ADDR &
4409 ~(prop->dram_pci_bar_size - 0x1ull)));
4410 }
4411 } else {
4412 rc = -EFAULT;
4413 }
4414
4415 return rc;
4416}
4417
4418/*
4419 * goya_debugfs_write32 - write a 32bit value to a given device address
4420 *
4421 * @hdev: pointer to hl_device structure
4422 * @addr: address in device
4423 * @val: returned value
4424 *
4425 * In case of DDR address that is not mapped into the default aperture that
4426 * the DDR bar exposes, the function will configure the iATU so that the DDR
4427 * bar will be positioned at a base address that allows writing to the
4428 * required address. Configuring the iATU during normal operation can
4429 * lead to undefined behavior and therefore, should be done with extreme care
4430 *
4431 */
4432static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4433{
4434 struct asic_fixed_properties *prop = &hdev->asic_prop;
4435 int rc = 0;
4436
4437 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4438 WREG32(addr - CFG_BASE, val);
4439
4440 } else if ((addr >= SRAM_BASE_ADDR) &&
4441 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4442
4443 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4444 (addr - SRAM_BASE_ADDR));
4445
4446 } else if ((addr >= DRAM_PHYS_BASE) &&
4447 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4448
4449 u64 bar_base_addr = DRAM_PHYS_BASE +
4450 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4451
4452 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4453 if (!rc) {
4454 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4455 (addr - bar_base_addr));
4456
4457 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4458 (MMU_PAGE_TABLES_ADDR &
4459 ~(prop->dram_pci_bar_size - 0x1ull)));
4460 }
4461 } else {
4462 rc = -EFAULT;
4463 }
4464
4465 return rc;
4466}
4467
4468static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4469{
4470 struct goya_device *goya = hdev->asic_specific;
4471
4472 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4473 (addr - goya->ddr_bar_cur_addr));
4474}
4475
4476static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4477{
4478 struct goya_device *goya = hdev->asic_specific;
4479
4480 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4481 (addr - goya->ddr_bar_cur_addr));
4482}
4483
4484static const char *_goya_get_event_desc(u16 event_type)
4485{
4486 switch (event_type) {
4487 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4488 return "PCIe_dec";
4489 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4490 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4491 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4492 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4493 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4494 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4495 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4496 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4497 return "TPC%d_dec";
4498 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4499 return "MME_wacs";
4500 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4501 return "MME_wacsd";
4502 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4503 return "CPU_axi_splitter";
4504 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4505 return "PSOC_axi_dec";
4506 case GOYA_ASYNC_EVENT_ID_PSOC:
4507 return "PSOC";
4508 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4509 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4510 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4511 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4512 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4513 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4514 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4515 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4516 return "TPC%d_krn_err";
4517 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4518 return "TPC%d_cq";
4519 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4520 return "TPC%d_qm";
4521 case GOYA_ASYNC_EVENT_ID_MME_QM:
4522 return "MME_qm";
4523 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4524 return "MME_cq";
4525 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4526 return "DMA%d_qm";
4527 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4528 return "DMA%d_ch";
4529 default:
4530 return "N/A";
4531 }
4532}
4533
4534static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4535{
4536 u8 index;
4537
4538 switch (event_type) {
4539 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4540 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4541 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4542 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4543 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4544 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4545 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4546 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4547 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4548 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4549 break;
4550 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4551 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4552 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4553 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4554 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4555 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4556 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4557 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4558 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4559 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4560 break;
4561 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4562 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4563 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4564 break;
4565 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4566 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4567 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4568 break;
4569 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4570 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4571 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4572 break;
4573 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4574 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4575 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4576 break;
4577 default:
4578 snprintf(desc, size, _goya_get_event_desc(event_type));
4579 break;
4580 }
4581}
4582
4583static void goya_print_razwi_info(struct hl_device *hdev)
4584{
4585 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4586 dev_err(hdev->dev, "Illegal write to LBW\n");
4587 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4588 }
4589
4590 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4591 dev_err(hdev->dev, "Illegal read from LBW\n");
4592 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4593 }
4594
4595 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4596 dev_err(hdev->dev, "Illegal write to HBW\n");
4597 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4598 }
4599
4600 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4601 dev_err(hdev->dev, "Illegal read from HBW\n");
4602 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4603 }
4604}
4605
4606static void goya_print_mmu_error_info(struct hl_device *hdev)
4607{
4608 struct goya_device *goya = hdev->asic_specific;
4609 u64 addr;
4610 u32 val;
4611
4612 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4613 return;
4614
4615 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4616 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4617 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4618 addr <<= 32;
4619 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4620
4621 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4622
4623 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4624 }
4625}
4626
4627static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
4628{
4629 char desc[20] = "";
4630
4631 goya_get_event_desc(event_type, desc, sizeof(desc));
4632 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4633 event_type, desc);
4634
4635 goya_print_razwi_info(hdev);
4636 goya_print_mmu_error_info(hdev);
4637}
4638
4639static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4640 size_t irq_arr_size)
4641{
4642 struct armcp_unmask_irq_arr_packet *pkt;
4643 size_t total_pkt_size;
4644 long result;
4645 int rc;
4646
4647 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4648 irq_arr_size;
4649
4650 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4651 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4652
4653 /* total_pkt_size is casted to u16 later on */
4654 if (total_pkt_size > USHRT_MAX) {
4655 dev_err(hdev->dev, "too many elements in IRQ array\n");
4656 return -EINVAL;
4657 }
4658
4659 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4660 if (!pkt)
4661 return -ENOMEM;
4662
4663 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
4664 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4665
4666 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4667 ARMCP_PKT_CTL_OPCODE_SHIFT);
4668
4669 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4670 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
4671
4672 if (rc)
4673 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4674
4675 kfree(pkt);
4676
4677 return rc;
4678}
4679
4680static int goya_soft_reset_late_init(struct hl_device *hdev)
4681{
4682 /*
4683 * Unmask all IRQs since some could have been received
4684 * during the soft reset
4685 */
4686 return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
4687 sizeof(goya_non_fatal_events));
4688}
4689
4690static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4691{
4692 struct armcp_packet pkt;
4693 long result;
4694 int rc;
4695
4696 memset(&pkt, 0, sizeof(pkt));
4697
4698 pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4699 ARMCP_PKT_CTL_OPCODE_SHIFT);
4700 pkt.value = cpu_to_le64(event_type);
4701
4702 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4703 HL_DEVICE_TIMEOUT_USEC, &result);
4704
4705 if (rc)
4706 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4707
4708 return rc;
4709}
4710
4711void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4712{
4713 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4714 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4715 >> EQ_CTL_EVENT_TYPE_SHIFT);
4716 struct goya_device *goya = hdev->asic_specific;
4717
4718 goya->events_stat[event_type]++;
4719
4720 switch (event_type) {
4721 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4722 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4723 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4724 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4725 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4726 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4727 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4728 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4729 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4730 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4731 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4732 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4733 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4734 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4735 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4736 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4737 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4738 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4739 case GOYA_ASYNC_EVENT_ID_GIC500:
4740 case GOYA_ASYNC_EVENT_ID_PLL0:
4741 case GOYA_ASYNC_EVENT_ID_PLL1:
4742 case GOYA_ASYNC_EVENT_ID_PLL3:
4743 case GOYA_ASYNC_EVENT_ID_PLL4:
4744 case GOYA_ASYNC_EVENT_ID_PLL5:
4745 case GOYA_ASYNC_EVENT_ID_PLL6:
4746 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4747 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4748 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4749 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4750 dev_err(hdev->dev,
4751 "Received H/W interrupt %d, reset the chip\n",
4752 event_type);
4753 hl_device_reset(hdev, true, false);
4754 break;
4755
4756 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4757 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4758 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4759 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4760 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4761 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4762 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4763 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4764 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4765 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4766 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4767 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4768 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4769 case GOYA_ASYNC_EVENT_ID_PSOC:
4770 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4771 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4772 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4773 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4774 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4775 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4776 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4777 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4778 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4779 case GOYA_ASYNC_EVENT_ID_MME_QM:
4780 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4781 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4782 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4783 goya_print_irq_info(hdev, event_type);
4784 goya_unmask_irq(hdev, event_type);
4785 break;
4786
4787 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4788 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4789 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4790 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4791 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4792 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4793 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4794 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4795 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
4796 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
4797 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
4798 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
4799 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4800 dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
4801 break;
4802
4803 default:
4804 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4805 event_type);
4806 break;
4807 }
4808}
4809
4810void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4811{
4812 struct goya_device *goya = hdev->asic_specific;
4813
4814 *size = (u32) sizeof(goya->events_stat);
4815
4816 return goya->events_stat;
4817}
4818
4819static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4820 u64 val, bool is_dram)
4821{
4822 struct packet_lin_dma *lin_dma_pkt;
4823 struct hl_cs_parser parser;
4824 struct hl_cs_job *job;
4825 u32 cb_size, ctl;
4826 struct hl_cb *cb;
4827 int rc;
4828
4829 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
4830 if (!cb)
4831 return -EFAULT;
4832
4833 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
4834
4835 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4836 cb_size = sizeof(*lin_dma_pkt);
4837
4838 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4839 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4840 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4841 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4842 (1 << GOYA_PKT_CTL_MB_SHIFT));
4843 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4844 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4845 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4846
4847 lin_dma_pkt->src_addr = cpu_to_le64(val);
4848 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4849 lin_dma_pkt->tsize = cpu_to_le32(size);
4850
4851 job = hl_cs_allocate_job(hdev, true);
4852 if (!job) {
4853 dev_err(hdev->dev, "Failed to allocate a new job\n");
4854 rc = -ENOMEM;
4855 goto release_cb;
4856 }
4857
4858 job->id = 0;
4859 job->user_cb = cb;
4860 job->user_cb->cs_cnt++;
4861 job->user_cb_size = cb_size;
4862 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4863
4864 hl_debugfs_add_job(hdev, job);
4865
4866 parser.ctx_id = HL_KERNEL_ASID_ID;
4867 parser.cs_sequence = 0;
4868 parser.job_id = job->id;
4869 parser.hw_queue_id = job->hw_queue_id;
4870 parser.job_userptr_list = &job->userptr_list;
4871 parser.user_cb = job->user_cb;
4872 parser.user_cb_size = job->user_cb_size;
4873 parser.ext_queue = job->ext_queue;
4874 parser.use_virt_addr = hdev->mmu_enable;
4875
4876 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4877 if (rc) {
4878 dev_err(hdev->dev, "Failed to parse kernel CB\n");
4879 goto free_job;
4880 }
4881
4882 job->patched_cb = parser.patched_cb;
4883 job->job_cb_size = parser.patched_cb_size;
4884 job->patched_cb->cs_cnt++;
4885
4886 rc = goya_send_job_on_qman0(hdev, job);
4887
4888 job->patched_cb->cs_cnt--;
4889 hl_cb_put(job->patched_cb);
4890
4891free_job:
4892 hl_userptr_delete_list(hdev, &job->userptr_list);
4893 hl_debugfs_remove_job(hdev, job);
4894 kfree(job);
4895 cb->cs_cnt--;
4896
4897release_cb:
4898 hl_cb_put(cb);
4899 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4900
4901 return rc;
4902}
4903
4904static int goya_context_switch(struct hl_device *hdev, u32 asid)
4905{
4906 struct asic_fixed_properties *prop = &hdev->asic_prop;
4907 u64 addr = prop->sram_base_address;
4908 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4909 u64 val = 0x7777777777777777ull;
4910 int rc;
4911
4912 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4913 if (rc) {
4914 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4915 return rc;
4916 }
4917
4918 goya_mmu_prepare(hdev, asid);
4919
4920 return 0;
4921}
4922
4923static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4924{
4925 struct asic_fixed_properties *prop = &hdev->asic_prop;
4926 struct goya_device *goya = hdev->asic_specific;
4927 u64 addr = prop->mmu_pgt_addr;
4928 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4929 MMU_CACHE_MNG_SIZE;
4930
4931 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4932 return 0;
4933
4934 return goya_memset_device_memory(hdev, addr, size, 0, true);
4935}
4936
4937static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4938{
4939 struct goya_device *goya = hdev->asic_specific;
4940 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4941 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4942 u64 val = 0x9999999999999999ull;
4943
4944 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4945 return 0;
4946
4947 return goya_memset_device_memory(hdev, addr, size, val, true);
4948}
4949
4950static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4951{
4952 struct goya_device *goya = hdev->asic_specific;
4953 int i;
4954
4955 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4956 return;
4957
4958 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4959 WARN(1, "asid %u is too big\n", asid);
4960 return;
4961 }
4962
4963 /* zero the MMBP and ASID bits and then set the ASID */
4964 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
4965 WREG32_AND(goya_mmu_regs[i], ~0x7FF);
4966 WREG32_OR(goya_mmu_regs[i], asid);
4967 }
4968}
4969
4970static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4971{
4972 struct goya_device *goya = hdev->asic_specific;
4973 u32 status, timeout_usec;
4974 int rc;
4975
4976 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4977 return;
4978
4979 /* no need in L1 only invalidation in Goya */
4980 if (!is_hard)
4981 return;
4982
4983 if (hdev->pldm)
4984 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4985 else
4986 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4987
4988 mutex_lock(&hdev->mmu_cache_lock);
4989
4990 /* L0 & L1 invalidation */
4991 WREG32(mmSTLB_INV_ALL_START, 1);
4992
4993 rc = hl_poll_timeout(
4994 hdev,
4995 mmSTLB_INV_ALL_START,
4996 status,
4997 !status,
4998 1000,
4999 timeout_usec);
5000
5001 mutex_unlock(&hdev->mmu_cache_lock);
5002
5003 if (rc)
5004 dev_notice_ratelimited(hdev->dev,
5005 "Timeout when waiting for MMU cache invalidation\n");
5006}
5007
5008static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
5009 bool is_hard, u32 asid, u64 va, u64 size)
5010{
5011 struct goya_device *goya = hdev->asic_specific;
5012 u32 status, timeout_usec, inv_data, pi;
5013 int rc;
5014
5015 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5016 return;
5017
5018 /* no need in L1 only invalidation in Goya */
5019 if (!is_hard)
5020 return;
5021
5022 if (hdev->pldm)
5023 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5024 else
5025 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5026
5027 mutex_lock(&hdev->mmu_cache_lock);
5028
5029 /*
5030 * TODO: currently invalidate entire L0 & L1 as in regular hard
5031 * invalidation. Need to apply invalidation of specific cache lines with
5032 * mask of ASID & VA & size.
5033 * Note that L1 with be flushed entirely in any case.
5034 */
5035
5036 /* L0 & L1 invalidation */
5037 inv_data = RREG32(mmSTLB_CACHE_INV);
5038 /* PI is 8 bit */
5039 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
5040 WREG32(mmSTLB_CACHE_INV,
5041 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
5042
5043 rc = hl_poll_timeout(
5044 hdev,
5045 mmSTLB_INV_CONSUMER_INDEX,
5046 status,
5047 status == pi,
5048 1000,
5049 timeout_usec);
5050
5051 mutex_unlock(&hdev->mmu_cache_lock);
5052
5053 if (rc)
5054 dev_notice_ratelimited(hdev->dev,
5055 "Timeout when waiting for MMU cache invalidation\n");
5056}
5057
5058static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
5059 u64 phys_addr)
5060{
5061 u32 status, timeout_usec;
5062 int rc;
5063
5064 if (hdev->pldm)
5065 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5066 else
5067 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5068
5069 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
5070 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
5071 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
5072
5073 rc = hl_poll_timeout(
5074 hdev,
5075 MMU_ASID_BUSY,
5076 status,
5077 !(status & 0x80000000),
5078 1000,
5079 timeout_usec);
5080
5081 if (rc) {
5082 dev_err(hdev->dev,
5083 "Timeout during MMU hop0 config of asid %d\n", asid);
5084 return rc;
5085 }
5086
5087 return 0;
5088}
5089
5090int goya_send_heartbeat(struct hl_device *hdev)
5091{
5092 struct goya_device *goya = hdev->asic_specific;
5093 struct armcp_packet hb_pkt;
5094 long result;
5095 int rc;
5096
5097 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5098 return 0;
5099
5100 memset(&hb_pkt, 0, sizeof(hb_pkt));
5101
5102 hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
5103 ARMCP_PKT_CTL_OPCODE_SHIFT);
5104 hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
5105
5106 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
5107 sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
5108
5109 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
5110 rc = -EIO;
5111
5112 return rc;
5113}
5114
5115static int goya_armcp_info_get(struct hl_device *hdev)
5116{
5117 struct goya_device *goya = hdev->asic_specific;
5118 struct asic_fixed_properties *prop = &hdev->asic_prop;
5119 struct armcp_packet pkt;
5120 void *armcp_info_cpu_addr;
5121 dma_addr_t armcp_info_dma_addr;
5122 u64 dram_size;
5123 long result;
5124 int rc;
5125
5126 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5127 return 0;
5128
5129 armcp_info_cpu_addr =
5130 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5131 sizeof(struct armcp_info), &armcp_info_dma_addr);
5132 if (!armcp_info_cpu_addr) {
5133 dev_err(hdev->dev,
5134 "Failed to allocate DMA memory for ArmCP info packet\n");
5135 return -ENOMEM;
5136 }
5137
5138 memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
5139
5140 memset(&pkt, 0, sizeof(pkt));
5141
5142 pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
5143 ARMCP_PKT_CTL_OPCODE_SHIFT);
5144 pkt.addr = cpu_to_le64(armcp_info_dma_addr +
5145 prop->host_phys_base_address);
5146 pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
5147
5148 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5149 GOYA_ARMCP_INFO_TIMEOUT, &result);
5150
5151 if (rc) {
5152 dev_err(hdev->dev,
5153 "Failed to send armcp info pkt, error %d\n", rc);
5154 goto out;
5155 }
5156
5157 memcpy(&prop->armcp_info, armcp_info_cpu_addr,
5158 sizeof(prop->armcp_info));
5159
5160 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
5161 if (dram_size) {
5162 if ((!is_power_of_2(dram_size)) ||
5163 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5164 dev_err(hdev->dev,
5165 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5166 dram_size);
5167 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5168 }
5169
5170 prop->dram_size = dram_size;
5171 prop->dram_end_address = prop->dram_base_address + dram_size;
5172 }
5173
5174 rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
5175 if (rc) {
5176 dev_err(hdev->dev,
5177 "Failed to build hwmon channel info, error %d\n", rc);
5178 rc = -EFAULT;
5179 goto out;
5180 }
5181
5182out:
5183 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
5184 sizeof(struct armcp_info), armcp_info_cpu_addr);
5185
5186 return rc;
5187}
5188
5189static void goya_init_clock_gating(struct hl_device *hdev)
5190{
5191
5192}
5193
5194static void goya_disable_clock_gating(struct hl_device *hdev)
5195{
5196
5197}
5198
5199static bool goya_is_device_idle(struct hl_device *hdev)
5200{
5201 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
5202 int i;
5203
5204 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5205
5206 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5207 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
5208
5209 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
5210 DMA_QM_IDLE_MASK)
5211 return false;
5212 }
5213
5214 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5215
5216 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5217 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
5218 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
5219 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
5220
5221 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
5222 TPC_QM_IDLE_MASK)
5223 return false;
5224
5225 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
5226 TPC_CMDQ_IDLE_MASK)
5227 return false;
5228
5229 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
5230 TPC_CFG_IDLE_MASK)
5231 return false;
5232 }
5233
5234 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
5235 MME_QM_IDLE_MASK)
5236 return false;
5237
5238 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
5239 MME_CMDQ_IDLE_MASK)
5240 return false;
5241
5242 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
5243 MME_ARCH_IDLE_MASK)
5244 return false;
5245
5246 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
5247 return false;
5248
5249 return true;
5250}
5251
5252static void goya_hw_queues_lock(struct hl_device *hdev)
5253{
5254 struct goya_device *goya = hdev->asic_specific;
5255
5256 spin_lock(&goya->hw_queues_lock);
5257}
5258
5259static void goya_hw_queues_unlock(struct hl_device *hdev)
5260{
5261 struct goya_device *goya = hdev->asic_specific;
5262
5263 spin_unlock(&goya->hw_queues_lock);
5264}
5265
5266static u32 goya_get_pci_id(struct hl_device *hdev)
5267{
5268 return hdev->pdev->device;
5269}
5270
5271static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5272 size_t max_size)
5273{
5274 struct goya_device *goya = hdev->asic_specific;
5275 struct asic_fixed_properties *prop = &hdev->asic_prop;
5276 struct armcp_packet pkt;
5277 void *eeprom_info_cpu_addr;
5278 dma_addr_t eeprom_info_dma_addr;
5279 long result;
5280 int rc;
5281
5282 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5283 return 0;
5284
5285 eeprom_info_cpu_addr =
5286 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5287 max_size, &eeprom_info_dma_addr);
5288 if (!eeprom_info_cpu_addr) {
5289 dev_err(hdev->dev,
5290 "Failed to allocate DMA memory for EEPROM info packet\n");
5291 return -ENOMEM;
5292 }
5293
5294 memset(eeprom_info_cpu_addr, 0, max_size);
5295
5296 memset(&pkt, 0, sizeof(pkt));
5297
5298 pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
5299 ARMCP_PKT_CTL_OPCODE_SHIFT);
5300 pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
5301 prop->host_phys_base_address);
5302 pkt.data_max_size = cpu_to_le32(max_size);
5303
5304 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5305 GOYA_ARMCP_EEPROM_TIMEOUT, &result);
5306
5307 if (rc) {
5308 dev_err(hdev->dev,
5309 "Failed to send armcp EEPROM pkt, error %d\n", rc);
5310 goto out;
5311 }
5312
5313 /* result contains the actual size */
5314 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
5315
5316out:
5317 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
5318 eeprom_info_cpu_addr);
5319
5320 return rc;
5321}
5322
5323static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5324{
5325 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
5326}
5327
5328static const struct hl_asic_funcs goya_funcs = {
5329 .early_init = goya_early_init,
5330 .early_fini = goya_early_fini,
5331 .late_init = goya_late_init,
5332 .late_fini = goya_late_fini,
5333 .sw_init = goya_sw_init,
5334 .sw_fini = goya_sw_fini,
5335 .hw_init = goya_hw_init,
5336 .hw_fini = goya_hw_fini,
5337 .halt_engines = goya_halt_engines,
5338 .suspend = goya_suspend,
5339 .resume = goya_resume,
5340 .cb_mmap = goya_cb_mmap,
5341 .ring_doorbell = goya_ring_doorbell,
5342 .flush_pq_write = goya_flush_pq_write,
5343 .dma_alloc_coherent = goya_dma_alloc_coherent,
5344 .dma_free_coherent = goya_dma_free_coherent,
5345 .get_int_queue_base = goya_get_int_queue_base,
5346 .test_queues = goya_test_queues,
5347 .dma_pool_zalloc = goya_dma_pool_zalloc,
5348 .dma_pool_free = goya_dma_pool_free,
5349 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5350 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5351 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5352 .cs_parser = goya_cs_parser,
5353 .asic_dma_map_sg = goya_dma_map_sg,
5354 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5355 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5356 .update_eq_ci = goya_update_eq_ci,
5357 .context_switch = goya_context_switch,
5358 .restore_phase_topology = goya_restore_phase_topology,
5359 .debugfs_read32 = goya_debugfs_read32,
5360 .debugfs_write32 = goya_debugfs_write32,
5361 .add_device_attr = goya_add_device_attr,
5362 .handle_eqe = goya_handle_eqe,
5363 .set_pll_profile = goya_set_pll_profile,
5364 .get_events_stat = goya_get_events_stat,
5365 .read_pte = goya_read_pte,
5366 .write_pte = goya_write_pte,
5367 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5368 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5369 .send_heartbeat = goya_send_heartbeat,
5370 .enable_clock_gating = goya_init_clock_gating,
5371 .disable_clock_gating = goya_disable_clock_gating,
5372 .is_device_idle = goya_is_device_idle,
5373 .soft_reset_late_init = goya_soft_reset_late_init,
5374 .hw_queues_lock = goya_hw_queues_lock,
5375 .hw_queues_unlock = goya_hw_queues_unlock,
5376 .get_pci_id = goya_get_pci_id,
5377 .get_eeprom_data = goya_get_eeprom_data,
5378 .send_cpu_message = goya_send_cpu_message,
5379 .get_hw_state = goya_get_hw_state
5380};
5381
5382/*
5383 * goya_set_asic_funcs - set Goya function pointers
5384 *
5385 * @*hdev: pointer to hl_device structure
5386 *
5387 */
5388void goya_set_asic_funcs(struct hl_device *hdev)
5389{
5390 hdev->asic_funcs = &goya_funcs;
5391}
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
new file mode 100644
index 000000000000..830551b6b062
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -0,0 +1,211 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GOYAP_H_
9#define GOYAP_H_
10
11#include <uapi/misc/habanalabs.h>
12#include "habanalabs.h"
13#include "include/hl_boot_if.h"
14#include "include/goya/goya_packets.h"
15#include "include/goya/goya.h"
16#include "include/goya/goya_async_events.h"
17#include "include/goya/goya_fw_if.h"
18
19#define NUMBER_OF_CMPLT_QUEUES 5
20#define NUMBER_OF_EXT_HW_QUEUES 5
21#define NUMBER_OF_CPU_HW_QUEUES 1
22#define NUMBER_OF_INT_HW_QUEUES 9
23#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \
24 NUMBER_OF_CPU_HW_QUEUES + \
25 NUMBER_OF_INT_HW_QUEUES)
26
27/*
28 * Number of MSIX interrupts IDS:
29 * Each completion queue has 1 ID
30 * The event queue has 1 ID
31 */
32#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1)
33
34#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES)
35#error "Number of H/W queues must be smaller than HL_MAX_QUEUES"
36#endif
37
38#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES)
39#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
40#endif
41
42#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
43
44#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
45
46#define TPC_ENABLED_MASK 0xFF
47
48#define PLL_HIGH_DEFAULT 1575000000 /* 1.575 GHz */
49
50#define MAX_POWER_DEFAULT 200000 /* 200W */
51
52#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */
53#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */
54
55#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
56
57/* DRAM Memory Map */
58
59#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
60#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */
61#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */
62#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */
63#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */
64#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */
65
66#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
67#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
68#define MMU_DRAM_DEFAULT_PAGE_ADDR (MMU_PAGE_TABLES_ADDR + \
69 MMU_PAGE_TABLES_SIZE)
70#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
71 MMU_DRAM_DEFAULT_PAGE_SIZE)
72#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \
73 MMU_CACHE_MNG_SIZE)
74#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
75#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
76
77#if (DRAM_BASE_ADDR_USER != 0x20000000)
78#error "KMD must reserve 512MB"
79#endif
80
81/*
82 * SRAM Memory Map for KMD
83 *
84 * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for
85 * MME/TPC QMANs
86 *
87 */
88
89#define MME_QMAN_BASE_OFFSET 0x000000 /* Must be 0 */
90#define MME_QMAN_LENGTH 64
91#define TPC_QMAN_LENGTH 64
92
93#define TPC0_QMAN_BASE_OFFSET (MME_QMAN_BASE_OFFSET + \
94 (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
95#define TPC1_QMAN_BASE_OFFSET (TPC0_QMAN_BASE_OFFSET + \
96 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
97#define TPC2_QMAN_BASE_OFFSET (TPC1_QMAN_BASE_OFFSET + \
98 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
99#define TPC3_QMAN_BASE_OFFSET (TPC2_QMAN_BASE_OFFSET + \
100 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
101#define TPC4_QMAN_BASE_OFFSET (TPC3_QMAN_BASE_OFFSET + \
102 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
103#define TPC5_QMAN_BASE_OFFSET (TPC4_QMAN_BASE_OFFSET + \
104 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
105#define TPC6_QMAN_BASE_OFFSET (TPC5_QMAN_BASE_OFFSET + \
106 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
107#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \
108 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
109
110#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
111 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
112
113#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
114#error "MME/TPC QMANs SRAM space exceeds limit"
115#endif
116
117#define SRAM_USER_BASE_OFFSET GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START
118
119/* Virtual address space */
120#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */
121#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */
122#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \
123 VA_HOST_SPACE_START) /* 767TB */
124
125#define VA_DDR_SPACE_START 0x800000000ull /* 32GB */
126#define VA_DDR_SPACE_END 0x2000000000ull /* 128GB */
127#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \
128 VA_DDR_SPACE_START) /* 128GB */
129
130#define DMA_MAX_TRANSFER_SIZE U32_MAX
131
132#define HW_CAP_PLL 0x00000001
133#define HW_CAP_DDR_0 0x00000002
134#define HW_CAP_DDR_1 0x00000004
135#define HW_CAP_MME 0x00000008
136#define HW_CAP_CPU 0x00000010
137#define HW_CAP_DMA 0x00000020
138#define HW_CAP_MSIX 0x00000040
139#define HW_CAP_CPU_Q 0x00000080
140#define HW_CAP_MMU 0x00000100
141#define HW_CAP_TPC_MBIST 0x00000200
142#define HW_CAP_GOLDEN 0x00000400
143#define HW_CAP_TPC 0x00000800
144
145#define CPU_PKT_SHIFT 5
146#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT)
147#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1))
148#define CPU_MAX_PKTS_IN_CB 32
149#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
150#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE)
151
152enum goya_fw_component {
153 FW_COMP_UBOOT,
154 FW_COMP_PREBOOT
155};
156
157struct goya_device {
158 int (*test_cpu_queue)(struct hl_device *hdev);
159 int (*armcp_info_get)(struct hl_device *hdev);
160
161 /* TODO: remove hw_queues_lock after moving to scheduler code */
162 spinlock_t hw_queues_lock;
163
164 u64 mme_clk;
165 u64 tpc_clk;
166 u64 ic_clk;
167
168 u64 ddr_bar_cur_addr;
169 u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
170 u32 hw_cap_initialized;
171};
172
173int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus,
174 u8 i2c_addr, u8 i2c_reg, u32 *val);
175int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus,
176 u8 i2c_addr, u8 i2c_reg, u32 val);
177int goya_test_cpu_queue(struct hl_device *hdev);
178int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
179 u32 timeout, long *result);
180long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
181long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
182long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
183long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
184long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
185void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
186 long value);
187void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
188void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
189void goya_add_device_attr(struct hl_device *hdev,
190 struct attribute_group *dev_attr_grp);
191void goya_init_security(struct hl_device *hdev);
192u64 goya_get_max_power(struct hl_device *hdev);
193void goya_set_max_power(struct hl_device *hdev, u64 value);
194
195int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
196void goya_late_fini(struct hl_device *hdev);
197int goya_suspend(struct hl_device *hdev);
198int goya_resume(struct hl_device *hdev);
199void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
200void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
201void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
202void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
203 u32 cq_val, u32 msix_vec);
204int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
205void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
206 dma_addr_t *dma_handle, u16 *queue_len);
207u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
208int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
209int goya_send_heartbeat(struct hl_device *hdev);
210
211#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
new file mode 100644
index 000000000000..088692c852b6
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -0,0 +1,254 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
9
10void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
11{
12 struct goya_device *goya = hdev->asic_specific;
13
14 switch (freq) {
15 case PLL_HIGH:
16 hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
17 hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
18 hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
19 break;
20 case PLL_LOW:
21 hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
22 hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
23 hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
24 break;
25 case PLL_LAST:
26 hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
27 hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
28 hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
29 break;
30 default:
31 dev_err(hdev->dev, "unknown frequency setting\n");
32 }
33}
34
35static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
36 char *buf)
37{
38 struct hl_device *hdev = dev_get_drvdata(dev);
39 long value;
40
41 if (hl_device_disabled_or_in_reset(hdev))
42 return -ENODEV;
43
44 value = hl_get_frequency(hdev, MME_PLL, false);
45
46 if (value < 0)
47 return value;
48
49 return sprintf(buf, "%lu\n", value);
50}
51
52static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
53 const char *buf, size_t count)
54{
55 struct hl_device *hdev = dev_get_drvdata(dev);
56 struct goya_device *goya = hdev->asic_specific;
57 int rc;
58 long value;
59
60 if (hl_device_disabled_or_in_reset(hdev)) {
61 count = -ENODEV;
62 goto fail;
63 }
64
65 if (hdev->pm_mng_profile == PM_AUTO) {
66 count = -EPERM;
67 goto fail;
68 }
69
70 rc = kstrtoul(buf, 0, &value);
71
72 if (rc) {
73 count = -EINVAL;
74 goto fail;
75 }
76
77 hl_set_frequency(hdev, MME_PLL, value);
78 goya->mme_clk = value;
79
80fail:
81 return count;
82}
83
84static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
85 char *buf)
86{
87 struct hl_device *hdev = dev_get_drvdata(dev);
88 long value;
89
90 if (hl_device_disabled_or_in_reset(hdev))
91 return -ENODEV;
92
93 value = hl_get_frequency(hdev, TPC_PLL, false);
94
95 if (value < 0)
96 return value;
97
98 return sprintf(buf, "%lu\n", value);
99}
100
101static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
102 const char *buf, size_t count)
103{
104 struct hl_device *hdev = dev_get_drvdata(dev);
105 struct goya_device *goya = hdev->asic_specific;
106 int rc;
107 long value;
108
109 if (hl_device_disabled_or_in_reset(hdev)) {
110 count = -ENODEV;
111 goto fail;
112 }
113
114 if (hdev->pm_mng_profile == PM_AUTO) {
115 count = -EPERM;
116 goto fail;
117 }
118
119 rc = kstrtoul(buf, 0, &value);
120
121 if (rc) {
122 count = -EINVAL;
123 goto fail;
124 }
125
126 hl_set_frequency(hdev, TPC_PLL, value);
127 goya->tpc_clk = value;
128
129fail:
130 return count;
131}
132
133static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
134 char *buf)
135{
136 struct hl_device *hdev = dev_get_drvdata(dev);
137 long value;
138
139 if (hl_device_disabled_or_in_reset(hdev))
140 return -ENODEV;
141
142 value = hl_get_frequency(hdev, IC_PLL, false);
143
144 if (value < 0)
145 return value;
146
147 return sprintf(buf, "%lu\n", value);
148}
149
150static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
151 const char *buf, size_t count)
152{
153 struct hl_device *hdev = dev_get_drvdata(dev);
154 struct goya_device *goya = hdev->asic_specific;
155 int rc;
156 long value;
157
158 if (hl_device_disabled_or_in_reset(hdev)) {
159 count = -ENODEV;
160 goto fail;
161 }
162
163 if (hdev->pm_mng_profile == PM_AUTO) {
164 count = -EPERM;
165 goto fail;
166 }
167
168 rc = kstrtoul(buf, 0, &value);
169
170 if (rc) {
171 count = -EINVAL;
172 goto fail;
173 }
174
175 hl_set_frequency(hdev, IC_PLL, value);
176 goya->ic_clk = value;
177
178fail:
179 return count;
180}
181
182static ssize_t mme_clk_curr_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 struct hl_device *hdev = dev_get_drvdata(dev);
186 long value;
187
188 if (hl_device_disabled_or_in_reset(hdev))
189 return -ENODEV;
190
191 value = hl_get_frequency(hdev, MME_PLL, true);
192
193 if (value < 0)
194 return value;
195
196 return sprintf(buf, "%lu\n", value);
197}
198
199static ssize_t tpc_clk_curr_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct hl_device *hdev = dev_get_drvdata(dev);
203 long value;
204
205 if (hl_device_disabled_or_in_reset(hdev))
206 return -ENODEV;
207
208 value = hl_get_frequency(hdev, TPC_PLL, true);
209
210 if (value < 0)
211 return value;
212
213 return sprintf(buf, "%lu\n", value);
214}
215
216static ssize_t ic_clk_curr_show(struct device *dev,
217 struct device_attribute *attr, char *buf)
218{
219 struct hl_device *hdev = dev_get_drvdata(dev);
220 long value;
221
222 if (hl_device_disabled_or_in_reset(hdev))
223 return -ENODEV;
224
225 value = hl_get_frequency(hdev, IC_PLL, true);
226
227 if (value < 0)
228 return value;
229
230 return sprintf(buf, "%lu\n", value);
231}
232
233static DEVICE_ATTR_RW(ic_clk);
234static DEVICE_ATTR_RO(ic_clk_curr);
235static DEVICE_ATTR_RW(mme_clk);
236static DEVICE_ATTR_RO(mme_clk_curr);
237static DEVICE_ATTR_RW(tpc_clk);
238static DEVICE_ATTR_RO(tpc_clk_curr);
239
240static struct attribute *goya_dev_attrs[] = {
241 &dev_attr_ic_clk.attr,
242 &dev_attr_ic_clk_curr.attr,
243 &dev_attr_mme_clk.attr,
244 &dev_attr_mme_clk_curr.attr,
245 &dev_attr_tpc_clk.attr,
246 &dev_attr_tpc_clk_curr.attr,
247 NULL,
248};
249
250void goya_add_device_attr(struct hl_device *hdev,
251 struct attribute_group *dev_attr_grp)
252{
253 dev_attr_grp->attrs = goya_dev_attrs;
254}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
new file mode 100644
index 000000000000..575003238401
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -0,0 +1,2999 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
9
10/*
11 * goya_set_block_as_protected - set the given block as protected
12 *
13 * @hdev: pointer to hl_device structure
14 * @block: block base address
15 *
16 */
17static void goya_pb_set_block(struct hl_device *hdev, u64 base)
18{
19 u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
20
21 while (pb_addr & 0xFFF) {
22 WREG32(pb_addr, 0);
23 pb_addr += 4;
24 }
25}
26
27static void goya_init_mme_protection_bits(struct hl_device *hdev)
28{
29 u32 pb_addr, mask;
30 u8 word_offset;
31
32 /* TODO: change to real reg name when Soc Online is updated */
33 u64 mmMME_SBB_POWER_ECO1 = 0xDFF60,
34 mmMME_SBB_POWER_ECO2 = 0xDFF64;
35
36 goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_0_BASE);
37 goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_1_BASE);
38 goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_2_BASE);
39 goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_3_BASE);
40
41 goya_pb_set_block(hdev, mmSBA_ECC_MEM_BASE);
42 goya_pb_set_block(hdev, mmSBB_ECC_MEM_BASE);
43
44 goya_pb_set_block(hdev, mmMME1_RTR_BASE);
45 goya_pb_set_block(hdev, mmMME1_RD_REGULATOR_BASE);
46 goya_pb_set_block(hdev, mmMME1_WR_REGULATOR_BASE);
47 goya_pb_set_block(hdev, mmMME2_RTR_BASE);
48 goya_pb_set_block(hdev, mmMME2_RD_REGULATOR_BASE);
49 goya_pb_set_block(hdev, mmMME2_WR_REGULATOR_BASE);
50 goya_pb_set_block(hdev, mmMME3_RTR_BASE);
51 goya_pb_set_block(hdev, mmMME3_RD_REGULATOR_BASE);
52 goya_pb_set_block(hdev, mmMME3_WR_REGULATOR_BASE);
53
54 goya_pb_set_block(hdev, mmMME4_RTR_BASE);
55 goya_pb_set_block(hdev, mmMME4_RD_REGULATOR_BASE);
56 goya_pb_set_block(hdev, mmMME4_WR_REGULATOR_BASE);
57
58 goya_pb_set_block(hdev, mmMME5_RTR_BASE);
59 goya_pb_set_block(hdev, mmMME5_RD_REGULATOR_BASE);
60 goya_pb_set_block(hdev, mmMME5_WR_REGULATOR_BASE);
61
62 goya_pb_set_block(hdev, mmMME6_RTR_BASE);
63 goya_pb_set_block(hdev, mmMME6_RD_REGULATOR_BASE);
64 goya_pb_set_block(hdev, mmMME6_WR_REGULATOR_BASE);
65
66 pb_addr = (mmMME_DUMMY & ~0xFFF) + PROT_BITS_OFFS;
67 word_offset = ((mmMME_DUMMY & PROT_BITS_OFFS) >> 7) << 2;
68 mask = 1 << ((mmMME_DUMMY & 0x7F) >> 2);
69 mask |= 1 << ((mmMME_RESET & 0x7F) >> 2);
70 mask |= 1 << ((mmMME_STALL & 0x7F) >> 2);
71 mask |= 1 << ((mmMME_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
72 mask |= 1 << ((mmMME_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
73 mask |= 1 << ((mmMME_DBGMEM_ADD & 0x7F) >> 2);
74 mask |= 1 << ((mmMME_DBGMEM_DATA_WR & 0x7F) >> 2);
75 mask |= 1 << ((mmMME_DBGMEM_DATA_RD & 0x7F) >> 2);
76 mask |= 1 << ((mmMME_DBGMEM_CTRL & 0x7F) >> 2);
77 mask |= 1 << ((mmMME_DBGMEM_RC & 0x7F) >> 2);
78 mask |= 1 << ((mmMME_LOG_SHADOW & 0x7F) >> 2);
79
80 WREG32(pb_addr + word_offset, ~mask);
81
82 pb_addr = (mmMME_STORE_MAX_CREDIT & ~0xFFF) + PROT_BITS_OFFS;
83 word_offset = ((mmMME_STORE_MAX_CREDIT & PROT_BITS_OFFS) >> 7) << 2;
84 mask = 1 << ((mmMME_STORE_MAX_CREDIT & 0x7F) >> 2);
85 mask |= 1 << ((mmMME_AGU & 0x7F) >> 2);
86 mask |= 1 << ((mmMME_SBA & 0x7F) >> 2);
87 mask |= 1 << ((mmMME_SBB & 0x7F) >> 2);
88 mask |= 1 << ((mmMME_SBC & 0x7F) >> 2);
89 mask |= 1 << ((mmMME_WBC & 0x7F) >> 2);
90 mask |= 1 << ((mmMME_SBA_CONTROL_DATA & 0x7F) >> 2);
91 mask |= 1 << ((mmMME_SBB_CONTROL_DATA & 0x7F) >> 2);
92 mask |= 1 << ((mmMME_SBC_CONTROL_DATA & 0x7F) >> 2);
93 mask |= 1 << ((mmMME_WBC_CONTROL_DATA & 0x7F) >> 2);
94 mask |= 1 << ((mmMME_TE & 0x7F) >> 2);
95 mask |= 1 << ((mmMME_TE2DEC & 0x7F) >> 2);
96 mask |= 1 << ((mmMME_REI_STATUS & 0x7F) >> 2);
97 mask |= 1 << ((mmMME_REI_MASK & 0x7F) >> 2);
98 mask |= 1 << ((mmMME_SEI_STATUS & 0x7F) >> 2);
99 mask |= 1 << ((mmMME_SEI_MASK & 0x7F) >> 2);
100 mask |= 1 << ((mmMME_SPI_STATUS & 0x7F) >> 2);
101 mask |= 1 << ((mmMME_SPI_MASK & 0x7F) >> 2);
102
103 WREG32(pb_addr + word_offset, ~mask);
104
105 pb_addr = (mmMME_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
106 word_offset = ((mmMME_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
107 mask = 1 << ((mmMME_QM_GLBL_CFG0 & 0x7F) >> 2);
108 mask |= 1 << ((mmMME_QM_GLBL_CFG1 & 0x7F) >> 2);
109 mask |= 1 << ((mmMME_QM_GLBL_PROT & 0x7F) >> 2);
110 mask |= 1 << ((mmMME_QM_GLBL_ERR_CFG & 0x7F) >> 2);
111 mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
112 mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
113 mask |= 1 << ((mmMME_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
114 mask |= 1 << ((mmMME_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
115 mask |= 1 << ((mmMME_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
116 mask |= 1 << ((mmMME_QM_GLBL_STS0 & 0x7F) >> 2);
117 mask |= 1 << ((mmMME_QM_GLBL_STS1 & 0x7F) >> 2);
118 mask |= 1 << ((mmMME_QM_PQ_BASE_LO & 0x7F) >> 2);
119 mask |= 1 << ((mmMME_QM_PQ_BASE_HI & 0x7F) >> 2);
120 mask |= 1 << ((mmMME_QM_PQ_SIZE & 0x7F) >> 2);
121 mask |= 1 << ((mmMME_QM_PQ_PI & 0x7F) >> 2);
122 mask |= 1 << ((mmMME_QM_PQ_CI & 0x7F) >> 2);
123 mask |= 1 << ((mmMME_QM_PQ_CFG0 & 0x7F) >> 2);
124 mask |= 1 << ((mmMME_QM_PQ_CFG1 & 0x7F) >> 2);
125 mask |= 1 << ((mmMME_QM_PQ_ARUSER & 0x7F) >> 2);
126
127 WREG32(pb_addr + word_offset, ~mask);
128
129 pb_addr = (mmMME_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
130 word_offset = ((mmMME_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
131 mask = 1 << ((mmMME_QM_PQ_PUSH0 & 0x7F) >> 2);
132 mask |= 1 << ((mmMME_QM_PQ_PUSH1 & 0x7F) >> 2);
133 mask |= 1 << ((mmMME_QM_PQ_PUSH2 & 0x7F) >> 2);
134 mask |= 1 << ((mmMME_QM_PQ_PUSH3 & 0x7F) >> 2);
135 mask |= 1 << ((mmMME_QM_PQ_STS0 & 0x7F) >> 2);
136 mask |= 1 << ((mmMME_QM_PQ_STS1 & 0x7F) >> 2);
137 mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
138 mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
139 mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
140 mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
141 mask |= 1 << ((mmMME_QM_CQ_CFG0 & 0x7F) >> 2);
142 mask |= 1 << ((mmMME_QM_CQ_CFG1 & 0x7F) >> 2);
143 mask |= 1 << ((mmMME_QM_CQ_ARUSER & 0x7F) >> 2);
144 mask |= 1 << ((mmMME_QM_CQ_PTR_LO & 0x7F) >> 2);
145 mask |= 1 << ((mmMME_QM_CQ_PTR_HI & 0x7F) >> 2);
146 mask |= 1 << ((mmMME_QM_CQ_TSIZE & 0x7F) >> 2);
147 mask |= 1 << ((mmMME_QM_CQ_CTL & 0x7F) >> 2);
148 mask |= 1 << ((mmMME_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
149 mask |= 1 << ((mmMME_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
150 mask |= 1 << ((mmMME_QM_CQ_TSIZE_STS & 0x7F) >> 2);
151 mask |= 1 << ((mmMME_QM_CQ_CTL_STS & 0x7F) >> 2);
152 mask |= 1 << ((mmMME_QM_CQ_STS0 & 0x7F) >> 2);
153 mask |= 1 << ((mmMME_QM_CQ_STS1 & 0x7F) >> 2);
154 mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
155 mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
156 mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
157 mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
158
159 WREG32(pb_addr + word_offset, ~mask);
160
161 pb_addr = (mmMME_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
162 word_offset = ((mmMME_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
163 mask = 1 << ((mmMME_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
164 mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
165 mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
166 mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
167 mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
168 mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
169 mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
170 mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
171 mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
172 mask |= 1 << ((mmMME_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
173 mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
174 mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
175 mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
176 mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
177 mask |= 1 << ((mmMME_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
178
179 WREG32(pb_addr + word_offset, ~mask);
180
181 pb_addr = (mmMME_QM_CP_STS & ~0xFFF) + PROT_BITS_OFFS;
182 word_offset = ((mmMME_QM_CP_STS & PROT_BITS_OFFS) >> 7) << 2;
183 mask = 1 << ((mmMME_QM_CP_STS & 0x7F) >> 2);
184 mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_LO & 0x7F) >> 2);
185 mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_HI & 0x7F) >> 2);
186 mask |= 1 << ((mmMME_QM_CP_BARRIER_CFG & 0x7F) >> 2);
187 mask |= 1 << ((mmMME_QM_CP_DBG_0 & 0x7F) >> 2);
188 mask |= 1 << ((mmMME_QM_PQ_BUF_ADDR & 0x7F) >> 2);
189 mask |= 1 << ((mmMME_QM_PQ_BUF_RDATA & 0x7F) >> 2);
190 mask |= 1 << ((mmMME_QM_CQ_BUF_ADDR & 0x7F) >> 2);
191 mask |= 1 << ((mmMME_QM_CQ_BUF_RDATA & 0x7F) >> 2);
192
193 WREG32(pb_addr + word_offset, ~mask);
194
195 pb_addr = (mmMME_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
196 word_offset = ((mmMME_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
197 mask = 1 << ((mmMME_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
198 mask |= 1 << ((mmMME_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
199 mask |= 1 << ((mmMME_CMDQ_GLBL_PROT & 0x7F) >> 2);
200 mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
201 mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
202 mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
203 mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
204 mask |= 1 << ((mmMME_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
205 mask |= 1 << ((mmMME_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
206 mask |= 1 << ((mmMME_CMDQ_GLBL_STS0 & 0x7F) >> 2);
207 mask |= 1 << ((mmMME_CMDQ_GLBL_STS1 & 0x7F) >> 2);
208
209 WREG32(pb_addr + word_offset, ~mask);
210
211 pb_addr = (mmMME_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
212 word_offset = ((mmMME_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
213 mask = 1 << ((mmMME_CMDQ_CQ_CFG0 & 0x7F) >> 2);
214 mask |= 1 << ((mmMME_CMDQ_CQ_CFG1 & 0x7F) >> 2);
215 mask |= 1 << ((mmMME_CMDQ_CQ_ARUSER & 0x7F) >> 2);
216 mask |= 1 << ((mmMME_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
217 mask |= 1 << ((mmMME_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
218 mask |= 1 << ((mmMME_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
219 mask |= 1 << ((mmMME_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
220 mask |= 1 << ((mmMME_CMDQ_CQ_STS0 & 0x7F) >> 2);
221 mask |= 1 << ((mmMME_CMDQ_CQ_STS1 & 0x7F) >> 2);
222 mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
223 mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
224 mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
225 mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
226
227 WREG32(pb_addr + word_offset, ~mask);
228
229 pb_addr = (mmMME_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
230 word_offset = ((mmMME_CMDQ_CQ_IFIFO_CNT &
231 PROT_BITS_OFFS) >> 7) << 2;
232 mask = 1 << ((mmMME_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
233 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
234 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
235 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
236 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
237 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
238 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
239 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
240 mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
241 mask |= 1 << ((mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
242 mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
243 mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
244 mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
245 mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
246 mask |= 1 << ((mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
247 mask |= 1 << ((mmMME_CMDQ_CP_STS & 0x7F) >> 2);
248 mask |= 1 << ((mmMME_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
249
250 WREG32(pb_addr + word_offset, ~mask);
251
252 pb_addr = (mmMME_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
253 word_offset = ((mmMME_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
254 << 2;
255 mask = 1 << ((mmMME_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
256 mask |= 1 << ((mmMME_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
257 mask |= 1 << ((mmMME_CMDQ_CP_DBG_0 & 0x7F) >> 2);
258 mask |= 1 << ((mmMME_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
259 mask |= 1 << ((mmMME_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
260
261 WREG32(pb_addr + word_offset, ~mask);
262
263 pb_addr = (mmMME_SBB_POWER_ECO1 & ~0xFFF) + PROT_BITS_OFFS;
264 word_offset = ((mmMME_SBB_POWER_ECO1 & PROT_BITS_OFFS) >> 7) << 2;
265 mask = 1 << ((mmMME_SBB_POWER_ECO1 & 0x7F) >> 2);
266 mask |= 1 << ((mmMME_SBB_POWER_ECO2 & 0x7F) >> 2);
267
268 WREG32(pb_addr + word_offset, ~mask);
269}
270
271static void goya_init_dma_protection_bits(struct hl_device *hdev)
272{
273 u32 pb_addr, mask;
274 u8 word_offset;
275
276 goya_pb_set_block(hdev, mmDMA_NRTR_BASE);
277 goya_pb_set_block(hdev, mmDMA_RD_REGULATOR_BASE);
278 goya_pb_set_block(hdev, mmDMA_WR_REGULATOR_BASE);
279
280 pb_addr = (mmDMA_QM_0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
281 word_offset = ((mmDMA_QM_0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
282 mask = 1 << ((mmDMA_QM_0_GLBL_CFG0 & 0x7F) >> 2);
283 mask |= 1 << ((mmDMA_QM_0_GLBL_CFG1 & 0x7F) >> 2);
284 mask |= 1 << ((mmDMA_QM_0_GLBL_PROT & 0x7F) >> 2);
285 mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_CFG & 0x7F) >> 2);
286 mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
287 mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
288 mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_WDATA & 0x7F) >> 2);
289 mask |= 1 << ((mmDMA_QM_0_GLBL_SECURE_PROPS & 0x7F) >> 2);
290 mask |= 1 << ((mmDMA_QM_0_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
291 mask |= 1 << ((mmDMA_QM_0_GLBL_STS0 & 0x7F) >> 2);
292 mask |= 1 << ((mmDMA_QM_0_GLBL_STS1 & 0x7F) >> 2);
293 mask |= 1 << ((mmDMA_QM_0_PQ_BASE_LO & 0x7F) >> 2);
294 mask |= 1 << ((mmDMA_QM_0_PQ_BASE_HI & 0x7F) >> 2);
295 mask |= 1 << ((mmDMA_QM_0_PQ_SIZE & 0x7F) >> 2);
296 mask |= 1 << ((mmDMA_QM_0_PQ_PI & 0x7F) >> 2);
297 mask |= 1 << ((mmDMA_QM_0_PQ_CI & 0x7F) >> 2);
298 mask |= 1 << ((mmDMA_QM_0_PQ_CFG0 & 0x7F) >> 2);
299 mask |= 1 << ((mmDMA_QM_0_PQ_CFG1 & 0x7F) >> 2);
300 mask |= 1 << ((mmDMA_QM_0_PQ_ARUSER & 0x7F) >> 2);
301
302 WREG32(pb_addr + word_offset, ~mask);
303
304 pb_addr = (mmDMA_QM_0_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
305 word_offset = ((mmDMA_QM_0_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
306 mask = 1 << ((mmDMA_QM_0_PQ_PUSH0 & 0x7F) >> 2);
307 mask |= 1 << ((mmDMA_QM_0_PQ_PUSH1 & 0x7F) >> 2);
308 mask |= 1 << ((mmDMA_QM_0_PQ_PUSH2 & 0x7F) >> 2);
309 mask |= 1 << ((mmDMA_QM_0_PQ_PUSH3 & 0x7F) >> 2);
310 mask |= 1 << ((mmDMA_QM_0_PQ_STS0 & 0x7F) >> 2);
311 mask |= 1 << ((mmDMA_QM_0_PQ_STS1 & 0x7F) >> 2);
312 mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
313 mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
314 mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
315 mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
316 mask |= 1 << ((mmDMA_QM_0_CQ_CFG0 & 0x7F) >> 2);
317 mask |= 1 << ((mmDMA_QM_0_CQ_CFG1 & 0x7F) >> 2);
318 mask |= 1 << ((mmDMA_QM_0_CQ_ARUSER & 0x7F) >> 2);
319 mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO & 0x7F) >> 2);
320 mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI & 0x7F) >> 2);
321 mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE & 0x7F) >> 2);
322 mask |= 1 << ((mmDMA_QM_0_CQ_CTL & 0x7F) >> 2);
323 mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO_STS & 0x7F) >> 2);
324 mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI_STS & 0x7F) >> 2);
325 mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE_STS & 0x7F) >> 2);
326 mask |= 1 << ((mmDMA_QM_0_CQ_CTL_STS & 0x7F) >> 2);
327 mask |= 1 << ((mmDMA_QM_0_CQ_STS0 & 0x7F) >> 2);
328 mask |= 1 << ((mmDMA_QM_0_CQ_STS1 & 0x7F) >> 2);
329 mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
330 mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
331 mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
332 mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
333
334 WREG32(pb_addr + word_offset, ~mask);
335
336 pb_addr = (mmDMA_QM_0_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
337 word_offset = ((mmDMA_QM_0_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
338 mask = 1 << ((mmDMA_QM_0_CQ_IFIFO_CNT & 0x7F) >> 2);
339 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
340 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
341 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
342 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
343 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
344 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
345 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
346 mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
347 mask |= 1 << ((mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
348 mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
349 mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
350 mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
351 mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
352 mask |= 1 << ((mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
353
354 WREG32(pb_addr + word_offset, ~mask);
355
356 goya_pb_set_block(hdev, mmDMA_CH_0_BASE);
357
358 pb_addr = (mmDMA_QM_1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
359 word_offset = ((mmDMA_QM_1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
360 mask = 1 << ((mmDMA_QM_1_GLBL_CFG0 & 0x7F) >> 2);
361 mask |= 1 << ((mmDMA_QM_1_GLBL_CFG1 & 0x7F) >> 2);
362 mask |= 1 << ((mmDMA_QM_1_GLBL_PROT & 0x7F) >> 2);
363 mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_CFG & 0x7F) >> 2);
364 mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
365 mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
366 mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_WDATA & 0x7F) >> 2);
367 mask |= 1 << ((mmDMA_QM_1_GLBL_SECURE_PROPS & 0x7F) >> 2);
368 mask |= 1 << ((mmDMA_QM_1_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
369 mask |= 1 << ((mmDMA_QM_1_GLBL_STS0 & 0x7F) >> 2);
370 mask |= 1 << ((mmDMA_QM_1_GLBL_STS1 & 0x7F) >> 2);
371 mask |= 1 << ((mmDMA_QM_1_PQ_BASE_LO & 0x7F) >> 2);
372 mask |= 1 << ((mmDMA_QM_1_PQ_BASE_HI & 0x7F) >> 2);
373 mask |= 1 << ((mmDMA_QM_1_PQ_SIZE & 0x7F) >> 2);
374 mask |= 1 << ((mmDMA_QM_1_PQ_PI & 0x7F) >> 2);
375 mask |= 1 << ((mmDMA_QM_1_PQ_CI & 0x7F) >> 2);
376 mask |= 1 << ((mmDMA_QM_1_PQ_CFG0 & 0x7F) >> 2);
377 mask |= 1 << ((mmDMA_QM_1_PQ_CFG1 & 0x7F) >> 2);
378 mask |= 1 << ((mmDMA_QM_1_PQ_ARUSER & 0x7F) >> 2);
379
380 WREG32(pb_addr + word_offset, ~mask);
381
382 pb_addr = (mmDMA_QM_1_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
383 word_offset = ((mmDMA_QM_1_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
384 mask = 1 << ((mmDMA_QM_1_PQ_PUSH0 & 0x7F) >> 2);
385 mask |= 1 << ((mmDMA_QM_1_PQ_PUSH1 & 0x7F) >> 2);
386 mask |= 1 << ((mmDMA_QM_1_PQ_PUSH2 & 0x7F) >> 2);
387 mask |= 1 << ((mmDMA_QM_1_PQ_PUSH3 & 0x7F) >> 2);
388 mask |= 1 << ((mmDMA_QM_1_PQ_STS0 & 0x7F) >> 2);
389 mask |= 1 << ((mmDMA_QM_1_PQ_STS1 & 0x7F) >> 2);
390 mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
391 mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
392 mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
393 mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
394 mask |= 1 << ((mmDMA_QM_1_CQ_CFG0 & 0x7F) >> 2);
395 mask |= 1 << ((mmDMA_QM_1_CQ_CFG1 & 0x7F) >> 2);
396 mask |= 1 << ((mmDMA_QM_1_CQ_ARUSER & 0x7F) >> 2);
397 mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO & 0x7F) >> 2);
398 mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI & 0x7F) >> 2);
399 mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE & 0x7F) >> 2);
400 mask |= 1 << ((mmDMA_QM_1_CQ_CTL & 0x7F) >> 2);
401 mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO_STS & 0x7F) >> 2);
402 mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI_STS & 0x7F) >> 2);
403 mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE_STS & 0x7F) >> 2);
404 mask |= 1 << ((mmDMA_QM_1_CQ_CTL_STS & 0x7F) >> 2);
405 mask |= 1 << ((mmDMA_QM_1_CQ_STS0 & 0x7F) >> 2);
406 mask |= 1 << ((mmDMA_QM_1_CQ_STS1 & 0x7F) >> 2);
407 mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
408 mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
409 mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
410 mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
411
412 WREG32(pb_addr + word_offset, ~mask);
413
414 pb_addr = (mmDMA_QM_1_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
415 word_offset = ((mmDMA_QM_1_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
416 mask = 1 << ((mmDMA_QM_1_CQ_IFIFO_CNT & 0x7F) >> 2);
417 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
418 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
419 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
420 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
421 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
422 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
423 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
424 mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
425 mask |= 1 << ((mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
426 mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
427 mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
428 mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
429 mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
430 mask |= 1 << ((mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
431
432 WREG32(pb_addr + word_offset, ~mask);
433
434 goya_pb_set_block(hdev, mmDMA_CH_1_BASE);
435
436 pb_addr = (mmDMA_QM_2_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
437 word_offset = ((mmDMA_QM_2_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
438 mask = 1 << ((mmDMA_QM_2_GLBL_CFG0 & 0x7F) >> 2);
439 mask |= 1 << ((mmDMA_QM_2_GLBL_CFG1 & 0x7F) >> 2);
440 mask |= 1 << ((mmDMA_QM_2_GLBL_PROT & 0x7F) >> 2);
441 mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_CFG & 0x7F) >> 2);
442 mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
443 mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
444 mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_WDATA & 0x7F) >> 2);
445 mask |= 1 << ((mmDMA_QM_2_GLBL_SECURE_PROPS & 0x7F) >> 2);
446 mask |= 1 << ((mmDMA_QM_2_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
447 mask |= 1 << ((mmDMA_QM_2_GLBL_STS0 & 0x7F) >> 2);
448 mask |= 1 << ((mmDMA_QM_2_GLBL_STS1 & 0x7F) >> 2);
449 mask |= 1 << ((mmDMA_QM_2_PQ_BASE_LO & 0x7F) >> 2);
450 mask |= 1 << ((mmDMA_QM_2_PQ_BASE_HI & 0x7F) >> 2);
451 mask |= 1 << ((mmDMA_QM_2_PQ_SIZE & 0x7F) >> 2);
452 mask |= 1 << ((mmDMA_QM_2_PQ_PI & 0x7F) >> 2);
453 mask |= 1 << ((mmDMA_QM_2_PQ_CI & 0x7F) >> 2);
454 mask |= 1 << ((mmDMA_QM_2_PQ_CFG0 & 0x7F) >> 2);
455 mask |= 1 << ((mmDMA_QM_2_PQ_CFG1 & 0x7F) >> 2);
456 mask |= 1 << ((mmDMA_QM_2_PQ_ARUSER & 0x7F) >> 2);
457
458 WREG32(pb_addr + word_offset, ~mask);
459
460 pb_addr = (mmDMA_QM_2_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
461 word_offset = ((mmDMA_QM_2_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
462 mask = 1 << ((mmDMA_QM_2_PQ_PUSH0 & 0x7F) >> 2);
463 mask |= 1 << ((mmDMA_QM_2_PQ_PUSH1 & 0x7F) >> 2);
464 mask |= 1 << ((mmDMA_QM_2_PQ_PUSH2 & 0x7F) >> 2);
465 mask |= 1 << ((mmDMA_QM_2_PQ_PUSH3 & 0x7F) >> 2);
466 mask |= 1 << ((mmDMA_QM_2_PQ_STS0 & 0x7F) >> 2);
467 mask |= 1 << ((mmDMA_QM_2_PQ_STS1 & 0x7F) >> 2);
468 mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
469 mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
470 mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
471 mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
472 mask |= 1 << ((mmDMA_QM_2_CQ_CFG0 & 0x7F) >> 2);
473 mask |= 1 << ((mmDMA_QM_2_CQ_CFG1 & 0x7F) >> 2);
474 mask |= 1 << ((mmDMA_QM_2_CQ_ARUSER & 0x7F) >> 2);
475 mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO & 0x7F) >> 2);
476 mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI & 0x7F) >> 2);
477 mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE & 0x7F) >> 2);
478 mask |= 1 << ((mmDMA_QM_2_CQ_CTL & 0x7F) >> 2);
479 mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO_STS & 0x7F) >> 2);
480 mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI_STS & 0x7F) >> 2);
481 mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE_STS & 0x7F) >> 2);
482 mask |= 1 << ((mmDMA_QM_2_CQ_CTL_STS & 0x7F) >> 2);
483 mask |= 1 << ((mmDMA_QM_2_CQ_STS0 & 0x7F) >> 2);
484 mask |= 1 << ((mmDMA_QM_2_CQ_STS1 & 0x7F) >> 2);
485 mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
486 mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
487 mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
488 mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
489
490 WREG32(pb_addr + word_offset, ~mask);
491
492 pb_addr = (mmDMA_QM_2_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
493 word_offset = ((mmDMA_QM_2_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
494 mask = 1 << ((mmDMA_QM_2_CQ_IFIFO_CNT & 0x7F) >> 2);
495 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
496 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
497 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
498 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
499 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
500 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
501 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
502 mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
503 mask |= 1 << ((mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
504 mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
505 mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
506 mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
507 mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
508 mask |= 1 << ((mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
509
510 WREG32(pb_addr + word_offset, ~mask);
511
512 goya_pb_set_block(hdev, mmDMA_CH_2_BASE);
513
514 pb_addr = (mmDMA_QM_3_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
515 word_offset = ((mmDMA_QM_3_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
516 mask = 1 << ((mmDMA_QM_3_GLBL_CFG0 & 0x7F) >> 2);
517 mask |= 1 << ((mmDMA_QM_3_GLBL_CFG1 & 0x7F) >> 2);
518 mask |= 1 << ((mmDMA_QM_3_GLBL_PROT & 0x7F) >> 2);
519 mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_CFG & 0x7F) >> 2);
520 mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
521 mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
522 mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_WDATA & 0x7F) >> 2);
523 mask |= 1 << ((mmDMA_QM_3_GLBL_SECURE_PROPS & 0x7F) >> 2);
524 mask |= 1 << ((mmDMA_QM_3_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
525 mask |= 1 << ((mmDMA_QM_3_GLBL_STS0 & 0x7F) >> 2);
526 mask |= 1 << ((mmDMA_QM_3_GLBL_STS1 & 0x7F) >> 2);
527 mask |= 1 << ((mmDMA_QM_3_PQ_BASE_LO & 0x7F) >> 2);
528 mask |= 1 << ((mmDMA_QM_3_PQ_BASE_HI & 0x7F) >> 2);
529 mask |= 1 << ((mmDMA_QM_3_PQ_SIZE & 0x7F) >> 2);
530 mask |= 1 << ((mmDMA_QM_3_PQ_PI & 0x7F) >> 2);
531 mask |= 1 << ((mmDMA_QM_3_PQ_CI & 0x7F) >> 2);
532 mask |= 1 << ((mmDMA_QM_3_PQ_CFG0 & 0x7F) >> 2);
533 mask |= 1 << ((mmDMA_QM_3_PQ_CFG1 & 0x7F) >> 2);
534 mask |= 1 << ((mmDMA_QM_3_PQ_ARUSER & 0x7F) >> 2);
535
536 WREG32(pb_addr + word_offset, ~mask);
537
538 pb_addr = (mmDMA_QM_3_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
539 word_offset = ((mmDMA_QM_3_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
540 mask = 1 << ((mmDMA_QM_3_PQ_PUSH0 & 0x7F) >> 2);
541 mask |= 1 << ((mmDMA_QM_3_PQ_PUSH1 & 0x7F) >> 2);
542 mask |= 1 << ((mmDMA_QM_3_PQ_PUSH2 & 0x7F) >> 2);
543 mask |= 1 << ((mmDMA_QM_3_PQ_PUSH3 & 0x7F) >> 2);
544 mask |= 1 << ((mmDMA_QM_3_PQ_STS0 & 0x7F) >> 2);
545 mask |= 1 << ((mmDMA_QM_3_PQ_STS1 & 0x7F) >> 2);
546 mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
547 mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
548 mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
549 mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
550 mask |= 1 << ((mmDMA_QM_3_CQ_CFG0 & 0x7F) >> 2);
551 mask |= 1 << ((mmDMA_QM_3_CQ_CFG1 & 0x7F) >> 2);
552 mask |= 1 << ((mmDMA_QM_3_CQ_ARUSER & 0x7F) >> 2);
553 mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO & 0x7F) >> 2);
554 mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI & 0x7F) >> 2);
555 mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE & 0x7F) >> 2);
556 mask |= 1 << ((mmDMA_QM_3_CQ_CTL & 0x7F) >> 2);
557 mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO_STS & 0x7F) >> 2);
558 mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI_STS & 0x7F) >> 2);
559 mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE_STS & 0x7F) >> 2);
560 mask |= 1 << ((mmDMA_QM_3_CQ_CTL_STS & 0x7F) >> 2);
561 mask |= 1 << ((mmDMA_QM_3_CQ_STS0 & 0x7F) >> 2);
562 mask |= 1 << ((mmDMA_QM_3_CQ_STS1 & 0x7F) >> 2);
563 mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
564 mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
565 mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
566 mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
567
568 WREG32(pb_addr + word_offset, ~mask);
569
570 pb_addr = (mmDMA_QM_3_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
571 word_offset = ((mmDMA_QM_3_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
572 mask = 1 << ((mmDMA_QM_3_CQ_IFIFO_CNT & 0x7F) >> 2);
573 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
574 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
575 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
576 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
577 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
578 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
579 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
580 mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
581 mask |= 1 << ((mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
582 mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
583 mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
584 mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
585 mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
586 mask |= 1 << ((mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
587
588 WREG32(pb_addr + word_offset, ~mask);
589
590 goya_pb_set_block(hdev, mmDMA_CH_3_BASE);
591
592 pb_addr = (mmDMA_QM_4_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
593 word_offset = ((mmDMA_QM_4_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
594 mask = 1 << ((mmDMA_QM_4_GLBL_CFG0 & 0x7F) >> 2);
595 mask |= 1 << ((mmDMA_QM_4_GLBL_CFG1 & 0x7F) >> 2);
596 mask |= 1 << ((mmDMA_QM_4_GLBL_PROT & 0x7F) >> 2);
597 mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_CFG & 0x7F) >> 2);
598 mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
599 mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
600 mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_WDATA & 0x7F) >> 2);
601 mask |= 1 << ((mmDMA_QM_4_GLBL_SECURE_PROPS & 0x7F) >> 2);
602 mask |= 1 << ((mmDMA_QM_4_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
603 mask |= 1 << ((mmDMA_QM_4_GLBL_STS0 & 0x7F) >> 2);
604 mask |= 1 << ((mmDMA_QM_4_GLBL_STS1 & 0x7F) >> 2);
605 mask |= 1 << ((mmDMA_QM_4_PQ_BASE_LO & 0x7F) >> 2);
606 mask |= 1 << ((mmDMA_QM_4_PQ_BASE_HI & 0x7F) >> 2);
607 mask |= 1 << ((mmDMA_QM_4_PQ_SIZE & 0x7F) >> 2);
608 mask |= 1 << ((mmDMA_QM_4_PQ_PI & 0x7F) >> 2);
609 mask |= 1 << ((mmDMA_QM_4_PQ_CI & 0x7F) >> 2);
610 mask |= 1 << ((mmDMA_QM_4_PQ_CFG0 & 0x7F) >> 2);
611 mask |= 1 << ((mmDMA_QM_4_PQ_CFG1 & 0x7F) >> 2);
612 mask |= 1 << ((mmDMA_QM_4_PQ_ARUSER & 0x7F) >> 2);
613
614 WREG32(pb_addr + word_offset, ~mask);
615
616 pb_addr = (mmDMA_QM_4_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
617 word_offset = ((mmDMA_QM_4_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
618 mask = 1 << ((mmDMA_QM_4_PQ_PUSH0 & 0x7F) >> 2);
619 mask |= 1 << ((mmDMA_QM_4_PQ_PUSH1 & 0x7F) >> 2);
620 mask |= 1 << ((mmDMA_QM_4_PQ_PUSH2 & 0x7F) >> 2);
621 mask |= 1 << ((mmDMA_QM_4_PQ_PUSH3 & 0x7F) >> 2);
622 mask |= 1 << ((mmDMA_QM_4_PQ_STS0 & 0x7F) >> 2);
623 mask |= 1 << ((mmDMA_QM_4_PQ_STS1 & 0x7F) >> 2);
624 mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
625 mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
626 mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
627 mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
628 mask |= 1 << ((mmDMA_QM_4_CQ_CFG0 & 0x7F) >> 2);
629 mask |= 1 << ((mmDMA_QM_4_CQ_CFG1 & 0x7F) >> 2);
630 mask |= 1 << ((mmDMA_QM_4_CQ_ARUSER & 0x7F) >> 2);
631 mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO & 0x7F) >> 2);
632 mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI & 0x7F) >> 2);
633 mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE & 0x7F) >> 2);
634 mask |= 1 << ((mmDMA_QM_4_CQ_CTL & 0x7F) >> 2);
635 mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO_STS & 0x7F) >> 2);
636 mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI_STS & 0x7F) >> 2);
637 mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE_STS & 0x7F) >> 2);
638 mask |= 1 << ((mmDMA_QM_4_CQ_CTL_STS & 0x7F) >> 2);
639 mask |= 1 << ((mmDMA_QM_4_CQ_STS0 & 0x7F) >> 2);
640 mask |= 1 << ((mmDMA_QM_4_CQ_STS1 & 0x7F) >> 2);
641 mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
642 mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
643 mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
644 mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
645
646 WREG32(pb_addr + word_offset, ~mask);
647
648 pb_addr = (mmDMA_QM_4_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
649 word_offset = ((mmDMA_QM_4_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
650 mask = 1 << ((mmDMA_QM_4_CQ_IFIFO_CNT & 0x7F) >> 2);
651 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
652 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
653 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
654 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
655 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
656 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
657 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
658 mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
659 mask |= 1 << ((mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
660 mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
661 mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
662 mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
663 mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
664 mask |= 1 << ((mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
665
666 WREG32(pb_addr + word_offset, ~mask);
667
668 goya_pb_set_block(hdev, mmDMA_CH_4_BASE);
669}
670
671static void goya_init_tpc_protection_bits(struct hl_device *hdev)
672{
673 u32 pb_addr, mask;
674 u8 word_offset;
675
676 goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE);
677 goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE);
678
679 pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
680 word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH &
681 PROT_BITS_OFFS) >> 7) << 2;
682 mask = 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
683 mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
684 mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
685 mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
686
687 WREG32(pb_addr + word_offset, ~mask);
688
689 pb_addr = (mmTPC0_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
690 word_offset = ((mmTPC0_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
691 mask = 1 << ((mmTPC0_CFG_ARUSER & 0x7F) >> 2);
692 mask |= 1 << ((mmTPC0_CFG_AWUSER & 0x7F) >> 2);
693
694 WREG32(pb_addr + word_offset, ~mask);
695
696 pb_addr = (mmTPC0_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
697 word_offset = ((mmTPC0_CFG_FUNC_MBIST_CNTRL &
698 PROT_BITS_OFFS) >> 7) << 2;
699 mask = 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
700 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
701 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
702 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
703 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
704 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
705 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
706 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
707 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
708 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
709 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
710 mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
711
712 WREG32(pb_addr + word_offset, ~mask);
713
714 pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
715 word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
716 mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
717 mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
718 mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
719 mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
720 mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
721 mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
722 mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
723 mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
724 mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
725 mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
726 mask |= 1 << ((mmTPC0_QM_GLBL_STS1 & 0x7F) >> 2);
727 mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO & 0x7F) >> 2);
728 mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI & 0x7F) >> 2);
729 mask |= 1 << ((mmTPC0_QM_PQ_SIZE & 0x7F) >> 2);
730 mask |= 1 << ((mmTPC0_QM_PQ_PI & 0x7F) >> 2);
731 mask |= 1 << ((mmTPC0_QM_PQ_CI & 0x7F) >> 2);
732 mask |= 1 << ((mmTPC0_QM_PQ_CFG0 & 0x7F) >> 2);
733 mask |= 1 << ((mmTPC0_QM_PQ_CFG1 & 0x7F) >> 2);
734 mask |= 1 << ((mmTPC0_QM_PQ_ARUSER & 0x7F) >> 2);
735
736 WREG32(pb_addr + word_offset, ~mask);
737
738 pb_addr = (mmTPC0_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
739 word_offset = ((mmTPC0_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
740 mask = 1 << ((mmTPC0_QM_PQ_PUSH0 & 0x7F) >> 2);
741 mask |= 1 << ((mmTPC0_QM_PQ_PUSH1 & 0x7F) >> 2);
742 mask |= 1 << ((mmTPC0_QM_PQ_PUSH2 & 0x7F) >> 2);
743 mask |= 1 << ((mmTPC0_QM_PQ_PUSH3 & 0x7F) >> 2);
744 mask |= 1 << ((mmTPC0_QM_PQ_STS0 & 0x7F) >> 2);
745 mask |= 1 << ((mmTPC0_QM_PQ_STS1 & 0x7F) >> 2);
746 mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
747 mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
748 mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
749 mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
750 mask |= 1 << ((mmTPC0_QM_CQ_CFG0 & 0x7F) >> 2);
751 mask |= 1 << ((mmTPC0_QM_CQ_CFG1 & 0x7F) >> 2);
752 mask |= 1 << ((mmTPC0_QM_CQ_ARUSER & 0x7F) >> 2);
753 mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO & 0x7F) >> 2);
754 mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI & 0x7F) >> 2);
755 mask |= 1 << ((mmTPC0_QM_CQ_TSIZE & 0x7F) >> 2);
756 mask |= 1 << ((mmTPC0_QM_CQ_CTL & 0x7F) >> 2);
757 mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
758 mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
759 mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS & 0x7F) >> 2);
760 mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS & 0x7F) >> 2);
761 mask |= 1 << ((mmTPC0_QM_CQ_STS0 & 0x7F) >> 2);
762 mask |= 1 << ((mmTPC0_QM_CQ_STS1 & 0x7F) >> 2);
763 mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
764 mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
765 mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
766 mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
767
768 WREG32(pb_addr + word_offset, ~mask);
769
770 pb_addr = (mmTPC0_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
771 word_offset = ((mmTPC0_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
772 mask = 1 << ((mmTPC0_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
773 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
774 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
775 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
776 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
777 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
778 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
779 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
780 mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
781 mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
782 mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
783 mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
784 mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
785 mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
786 mask |= 1 << ((mmTPC0_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
787
788 WREG32(pb_addr + word_offset, ~mask);
789
790 pb_addr = (mmTPC0_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
791 word_offset = ((mmTPC0_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
792 mask = 1 << ((mmTPC0_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
793 mask |= 1 << ((mmTPC0_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
794 mask |= 1 << ((mmTPC0_CMDQ_GLBL_PROT & 0x7F) >> 2);
795 mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
796 mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
797 mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
798 mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
799 mask |= 1 << ((mmTPC0_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
800 mask |= 1 << ((mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
801 mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS0 & 0x7F) >> 2);
802 mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS1 & 0x7F) >> 2);
803
804 WREG32(pb_addr + word_offset, ~mask);
805
806 pb_addr = (mmTPC0_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
807 word_offset = ((mmTPC0_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
808 mask = 1 << ((mmTPC0_CMDQ_CQ_CFG0 & 0x7F) >> 2);
809 mask |= 1 << ((mmTPC0_CMDQ_CQ_CFG1 & 0x7F) >> 2);
810 mask |= 1 << ((mmTPC0_CMDQ_CQ_ARUSER & 0x7F) >> 2);
811 mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
812 mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
813 mask |= 1 << ((mmTPC0_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
814 mask |= 1 << ((mmTPC0_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
815 mask |= 1 << ((mmTPC0_CMDQ_CQ_STS0 & 0x7F) >> 2);
816 mask |= 1 << ((mmTPC0_CMDQ_CQ_STS1 & 0x7F) >> 2);
817 mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
818 mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
819 mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
820 mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
821
822 WREG32(pb_addr + word_offset, ~mask);
823
824 pb_addr = (mmTPC0_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
825 word_offset = ((mmTPC0_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
826 mask = 1 << ((mmTPC0_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
827 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
828 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
829 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
830 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
831 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
832 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
833 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
834 mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
835 mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
836 mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
837 mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
838 mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
839 mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
840 mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
841 mask |= 1 << ((mmTPC0_CMDQ_CP_STS & 0x7F) >> 2);
842 mask |= 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
843
844 WREG32(pb_addr + word_offset, ~mask);
845
846 pb_addr = (mmTPC0_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
847 word_offset = ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
848 << 2;
849 mask = 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
850 mask |= 1 << ((mmTPC0_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
851 mask |= 1 << ((mmTPC0_CMDQ_CP_DBG_0 & 0x7F) >> 2);
852 mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
853 mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
854
855 WREG32(pb_addr + word_offset, ~mask);
856
857 goya_pb_set_block(hdev, mmTPC1_RTR_BASE);
858 goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE);
859 goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE);
860
861 pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
862 word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH &
863 PROT_BITS_OFFS) >> 7) << 2;
864 mask = 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
865 mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
866 mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
867 mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
868
869 WREG32(pb_addr + word_offset, ~mask);
870
871 pb_addr = (mmTPC1_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
872 word_offset = ((mmTPC1_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
873 mask = 1 << ((mmTPC1_CFG_ARUSER & 0x7F) >> 2);
874 mask |= 1 << ((mmTPC1_CFG_AWUSER & 0x7F) >> 2);
875
876 WREG32(pb_addr + word_offset, ~mask);
877
878 pb_addr = (mmTPC1_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
879 word_offset = ((mmTPC1_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
880 << 2;
881 mask = 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
882 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
883 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
884 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
885 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
886 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
887 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
888 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
889 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
890 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
891 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
892 mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
893
894 WREG32(pb_addr + word_offset, ~mask);
895
896 pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
897 word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
898 mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
899 mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
900 mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
901 mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
902 mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
903 mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
904 mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
905 mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
906 mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
907 mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
908 mask |= 1 << ((mmTPC1_QM_GLBL_STS1 & 0x7F) >> 2);
909 mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO & 0x7F) >> 2);
910 mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI & 0x7F) >> 2);
911 mask |= 1 << ((mmTPC1_QM_PQ_SIZE & 0x7F) >> 2);
912 mask |= 1 << ((mmTPC1_QM_PQ_PI & 0x7F) >> 2);
913 mask |= 1 << ((mmTPC1_QM_PQ_CI & 0x7F) >> 2);
914 mask |= 1 << ((mmTPC1_QM_PQ_CFG0 & 0x7F) >> 2);
915 mask |= 1 << ((mmTPC1_QM_PQ_CFG1 & 0x7F) >> 2);
916 mask |= 1 << ((mmTPC1_QM_PQ_ARUSER & 0x7F) >> 2);
917
918 WREG32(pb_addr + word_offset, ~mask);
919
920 pb_addr = (mmTPC1_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
921 word_offset = ((mmTPC1_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
922 mask = 1 << ((mmTPC1_QM_PQ_PUSH0 & 0x7F) >> 2);
923 mask |= 1 << ((mmTPC1_QM_PQ_PUSH1 & 0x7F) >> 2);
924 mask |= 1 << ((mmTPC1_QM_PQ_PUSH2 & 0x7F) >> 2);
925 mask |= 1 << ((mmTPC1_QM_PQ_PUSH3 & 0x7F) >> 2);
926 mask |= 1 << ((mmTPC1_QM_PQ_STS0 & 0x7F) >> 2);
927 mask |= 1 << ((mmTPC1_QM_PQ_STS1 & 0x7F) >> 2);
928 mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
929 mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
930 mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
931 mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
932 mask |= 1 << ((mmTPC1_QM_CQ_CFG0 & 0x7F) >> 2);
933 mask |= 1 << ((mmTPC1_QM_CQ_CFG1 & 0x7F) >> 2);
934 mask |= 1 << ((mmTPC1_QM_CQ_ARUSER & 0x7F) >> 2);
935 mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO & 0x7F) >> 2);
936 mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI & 0x7F) >> 2);
937 mask |= 1 << ((mmTPC1_QM_CQ_TSIZE & 0x7F) >> 2);
938 mask |= 1 << ((mmTPC1_QM_CQ_CTL & 0x7F) >> 2);
939 mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
940 mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
941 mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS & 0x7F) >> 2);
942 mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS & 0x7F) >> 2);
943 mask |= 1 << ((mmTPC1_QM_CQ_STS0 & 0x7F) >> 2);
944 mask |= 1 << ((mmTPC1_QM_CQ_STS1 & 0x7F) >> 2);
945 mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
946 mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
947 mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
948 mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
949
950 WREG32(pb_addr + word_offset, ~mask);
951
952 pb_addr = (mmTPC1_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
953 word_offset = ((mmTPC1_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
954 mask = 1 << ((mmTPC1_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
955 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
956 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
957 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
958 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
959 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
960 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
961 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
962 mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
963 mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
964 mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
965 mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
966 mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
967 mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
968 mask |= 1 << ((mmTPC1_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
969
970 WREG32(pb_addr + word_offset, ~mask);
971
972 pb_addr = (mmTPC1_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
973 word_offset = ((mmTPC1_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
974 mask = 1 << ((mmTPC1_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
975 mask |= 1 << ((mmTPC1_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
976 mask |= 1 << ((mmTPC1_CMDQ_GLBL_PROT & 0x7F) >> 2);
977 mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
978 mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
979 mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
980 mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
981 mask |= 1 << ((mmTPC1_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
982 mask |= 1 << ((mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
983 mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS0 & 0x7F) >> 2);
984 mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS1 & 0x7F) >> 2);
985
986 WREG32(pb_addr + word_offset, ~mask);
987
988 pb_addr = (mmTPC1_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
989 word_offset = ((mmTPC1_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
990 mask = 1 << ((mmTPC1_CMDQ_CQ_CFG0 & 0x7F) >> 2);
991 mask |= 1 << ((mmTPC1_CMDQ_CQ_CFG1 & 0x7F) >> 2);
992 mask |= 1 << ((mmTPC1_CMDQ_CQ_ARUSER & 0x7F) >> 2);
993 mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
994 mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
995 mask |= 1 << ((mmTPC1_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
996 mask |= 1 << ((mmTPC1_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
997 mask |= 1 << ((mmTPC1_CMDQ_CQ_STS0 & 0x7F) >> 2);
998 mask |= 1 << ((mmTPC1_CMDQ_CQ_STS1 & 0x7F) >> 2);
999 mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1000 mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1001 mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1002 mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1003
1004 WREG32(pb_addr + word_offset, ~mask);
1005
1006 pb_addr = (mmTPC1_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1007 word_offset = ((mmTPC1_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1008 mask = 1 << ((mmTPC1_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
1009 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1010 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1011 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1012 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1013 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1014 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1015 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1016 mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1017 mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1018 mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1019 mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1020 mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1021 mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1022 mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1023 mask |= 1 << ((mmTPC1_CMDQ_CP_STS & 0x7F) >> 2);
1024 mask |= 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
1025
1026 WREG32(pb_addr + word_offset, ~mask);
1027
1028 pb_addr = (mmTPC1_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
1029 word_offset = ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
1030 << 2;
1031 mask = 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
1032 mask |= 1 << ((mmTPC1_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
1033 mask |= 1 << ((mmTPC1_CMDQ_CP_DBG_0 & 0x7F) >> 2);
1034 mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
1035 mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
1036
1037 WREG32(pb_addr + word_offset, ~mask);
1038
1039 goya_pb_set_block(hdev, mmTPC2_RTR_BASE);
1040 goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE);
1041 goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE);
1042
1043 pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
1044 word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH &
1045 PROT_BITS_OFFS) >> 7) << 2;
1046 mask = 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1047 mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
1048 mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
1049 mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1050
1051 WREG32(pb_addr + word_offset, ~mask);
1052
1053 pb_addr = (mmTPC2_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
1054 word_offset = ((mmTPC2_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
1055 mask = 1 << ((mmTPC2_CFG_ARUSER & 0x7F) >> 2);
1056 mask |= 1 << ((mmTPC2_CFG_AWUSER & 0x7F) >> 2);
1057
1058 WREG32(pb_addr + word_offset, ~mask);
1059
1060 pb_addr = (mmTPC2_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
1061 word_offset = ((mmTPC2_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
1062 << 2;
1063 mask = 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
1064 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
1065 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
1066 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
1067 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
1068 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
1069 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
1070 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
1071 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
1072 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
1073 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
1074 mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
1075
1076 WREG32(pb_addr + word_offset, ~mask);
1077
1078 pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1079 word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1080 mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
1081 mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
1082 mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
1083 mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
1084 mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1085 mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1086 mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
1087 mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
1088 mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1089 mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
1090 mask |= 1 << ((mmTPC2_QM_GLBL_STS1 & 0x7F) >> 2);
1091 mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO & 0x7F) >> 2);
1092 mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI & 0x7F) >> 2);
1093 mask |= 1 << ((mmTPC2_QM_PQ_SIZE & 0x7F) >> 2);
1094 mask |= 1 << ((mmTPC2_QM_PQ_PI & 0x7F) >> 2);
1095 mask |= 1 << ((mmTPC2_QM_PQ_CI & 0x7F) >> 2);
1096 mask |= 1 << ((mmTPC2_QM_PQ_CFG0 & 0x7F) >> 2);
1097 mask |= 1 << ((mmTPC2_QM_PQ_CFG1 & 0x7F) >> 2);
1098 mask |= 1 << ((mmTPC2_QM_PQ_ARUSER & 0x7F) >> 2);
1099
1100 WREG32(pb_addr + word_offset, ~mask);
1101
1102 pb_addr = (mmTPC2_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
1103 word_offset = ((mmTPC2_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
1104 mask = 1 << ((mmTPC2_QM_PQ_PUSH0 & 0x7F) >> 2);
1105 mask |= 1 << ((mmTPC2_QM_PQ_PUSH1 & 0x7F) >> 2);
1106 mask |= 1 << ((mmTPC2_QM_PQ_PUSH2 & 0x7F) >> 2);
1107 mask |= 1 << ((mmTPC2_QM_PQ_PUSH3 & 0x7F) >> 2);
1108 mask |= 1 << ((mmTPC2_QM_PQ_STS0 & 0x7F) >> 2);
1109 mask |= 1 << ((mmTPC2_QM_PQ_STS1 & 0x7F) >> 2);
1110 mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1111 mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1112 mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1113 mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1114 mask |= 1 << ((mmTPC2_QM_CQ_CFG0 & 0x7F) >> 2);
1115 mask |= 1 << ((mmTPC2_QM_CQ_CFG1 & 0x7F) >> 2);
1116 mask |= 1 << ((mmTPC2_QM_CQ_ARUSER & 0x7F) >> 2);
1117 mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO & 0x7F) >> 2);
1118 mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI & 0x7F) >> 2);
1119 mask |= 1 << ((mmTPC2_QM_CQ_TSIZE & 0x7F) >> 2);
1120 mask |= 1 << ((mmTPC2_QM_CQ_CTL & 0x7F) >> 2);
1121 mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
1122 mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
1123 mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS & 0x7F) >> 2);
1124 mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS & 0x7F) >> 2);
1125 mask |= 1 << ((mmTPC2_QM_CQ_STS0 & 0x7F) >> 2);
1126 mask |= 1 << ((mmTPC2_QM_CQ_STS1 & 0x7F) >> 2);
1127 mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1128 mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1129 mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1130 mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1131
1132 WREG32(pb_addr + word_offset, ~mask);
1133
1134 pb_addr = (mmTPC2_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1135 word_offset = ((mmTPC2_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1136 mask = 1 << ((mmTPC2_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
1137 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1138 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1139 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1140 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1141 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1142 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1143 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1144 mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1145 mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1146 mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1147 mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1148 mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1149 mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1150 mask |= 1 << ((mmTPC2_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1151
1152 WREG32(pb_addr + word_offset, ~mask);
1153
1154 pb_addr = (mmTPC2_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1155 word_offset = ((mmTPC2_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1156 mask = 1 << ((mmTPC2_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
1157 mask |= 1 << ((mmTPC2_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
1158 mask |= 1 << ((mmTPC2_CMDQ_GLBL_PROT & 0x7F) >> 2);
1159 mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
1160 mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1161 mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1162 mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
1163 mask |= 1 << ((mmTPC2_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
1164 mask |= 1 << ((mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1165 mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS0 & 0x7F) >> 2);
1166 mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS1 & 0x7F) >> 2);
1167
1168 WREG32(pb_addr + word_offset, ~mask);
1169
1170 pb_addr = (mmTPC2_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1171 word_offset = ((mmTPC2_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1172 mask = 1 << ((mmTPC2_CMDQ_CQ_CFG0 & 0x7F) >> 2);
1173 mask |= 1 << ((mmTPC2_CMDQ_CQ_CFG1 & 0x7F) >> 2);
1174 mask |= 1 << ((mmTPC2_CMDQ_CQ_ARUSER & 0x7F) >> 2);
1175 mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
1176 mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
1177 mask |= 1 << ((mmTPC2_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
1178 mask |= 1 << ((mmTPC2_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
1179 mask |= 1 << ((mmTPC2_CMDQ_CQ_STS0 & 0x7F) >> 2);
1180 mask |= 1 << ((mmTPC2_CMDQ_CQ_STS1 & 0x7F) >> 2);
1181 mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1182 mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1183 mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1184 mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1185
1186 WREG32(pb_addr + word_offset, ~mask);
1187
1188 pb_addr = (mmTPC2_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1189 word_offset = ((mmTPC2_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1190 mask = 1 << ((mmTPC2_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
1191 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1192 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1193 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1194 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1195 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1196 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1197 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1198 mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1199 mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1200 mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1201 mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1202 mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1203 mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1204 mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1205 mask |= 1 << ((mmTPC2_CMDQ_CP_STS & 0x7F) >> 2);
1206 mask |= 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
1207
1208 WREG32(pb_addr + word_offset, ~mask);
1209
1210 pb_addr = (mmTPC2_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
1211 word_offset = ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
1212 << 2;
1213 mask = 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
1214 mask |= 1 << ((mmTPC2_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
1215 mask |= 1 << ((mmTPC2_CMDQ_CP_DBG_0 & 0x7F) >> 2);
1216 mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
1217 mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
1218
1219 WREG32(pb_addr + word_offset, ~mask);
1220
1221 goya_pb_set_block(hdev, mmTPC3_RTR_BASE);
1222 goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE);
1223 goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE);
1224
1225 pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
1226 word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH
1227 & PROT_BITS_OFFS) >> 7) << 2;
1228 mask = 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1229 mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
1230 mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
1231 mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1232
1233 WREG32(pb_addr + word_offset, ~mask);
1234
1235 pb_addr = (mmTPC3_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
1236 word_offset = ((mmTPC3_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
1237 mask = 1 << ((mmTPC3_CFG_ARUSER & 0x7F) >> 2);
1238 mask |= 1 << ((mmTPC3_CFG_AWUSER & 0x7F) >> 2);
1239
1240 WREG32(pb_addr + word_offset, ~mask);
1241
1242 pb_addr = (mmTPC3_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
1243 word_offset = ((mmTPC3_CFG_FUNC_MBIST_CNTRL
1244 & PROT_BITS_OFFS) >> 7) << 2;
1245 mask = 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
1246 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
1247 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
1248 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
1249 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
1250 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
1251 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
1252 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
1253 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
1254 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
1255 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
1256 mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
1257
1258 WREG32(pb_addr + word_offset, ~mask);
1259
1260 pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1261 word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1262 mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
1263 mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
1264 mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
1265 mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
1266 mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1267 mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1268 mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
1269 mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
1270 mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1271 mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
1272 mask |= 1 << ((mmTPC3_QM_GLBL_STS1 & 0x7F) >> 2);
1273 mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO & 0x7F) >> 2);
1274 mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI & 0x7F) >> 2);
1275 mask |= 1 << ((mmTPC3_QM_PQ_SIZE & 0x7F) >> 2);
1276 mask |= 1 << ((mmTPC3_QM_PQ_PI & 0x7F) >> 2);
1277 mask |= 1 << ((mmTPC3_QM_PQ_CI & 0x7F) >> 2);
1278 mask |= 1 << ((mmTPC3_QM_PQ_CFG0 & 0x7F) >> 2);
1279 mask |= 1 << ((mmTPC3_QM_PQ_CFG1 & 0x7F) >> 2);
1280 mask |= 1 << ((mmTPC3_QM_PQ_ARUSER & 0x7F) >> 2);
1281
1282 WREG32(pb_addr + word_offset, ~mask);
1283
1284 pb_addr = (mmTPC3_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
1285 word_offset = ((mmTPC3_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
1286 mask = 1 << ((mmTPC3_QM_PQ_PUSH0 & 0x7F) >> 2);
1287 mask |= 1 << ((mmTPC3_QM_PQ_PUSH1 & 0x7F) >> 2);
1288 mask |= 1 << ((mmTPC3_QM_PQ_PUSH2 & 0x7F) >> 2);
1289 mask |= 1 << ((mmTPC3_QM_PQ_PUSH3 & 0x7F) >> 2);
1290 mask |= 1 << ((mmTPC3_QM_PQ_STS0 & 0x7F) >> 2);
1291 mask |= 1 << ((mmTPC3_QM_PQ_STS1 & 0x7F) >> 2);
1292 mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1293 mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1294 mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1295 mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1296 mask |= 1 << ((mmTPC3_QM_CQ_CFG0 & 0x7F) >> 2);
1297 mask |= 1 << ((mmTPC3_QM_CQ_CFG1 & 0x7F) >> 2);
1298 mask |= 1 << ((mmTPC3_QM_CQ_ARUSER & 0x7F) >> 2);
1299 mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO & 0x7F) >> 2);
1300 mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI & 0x7F) >> 2);
1301 mask |= 1 << ((mmTPC3_QM_CQ_TSIZE & 0x7F) >> 2);
1302 mask |= 1 << ((mmTPC3_QM_CQ_CTL & 0x7F) >> 2);
1303 mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
1304 mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
1305 mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS & 0x7F) >> 2);
1306 mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS & 0x7F) >> 2);
1307 mask |= 1 << ((mmTPC3_QM_CQ_STS0 & 0x7F) >> 2);
1308 mask |= 1 << ((mmTPC3_QM_CQ_STS1 & 0x7F) >> 2);
1309 mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1310 mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1311 mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1312 mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1313
1314 WREG32(pb_addr + word_offset, ~mask);
1315
1316 pb_addr = (mmTPC3_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1317 word_offset = ((mmTPC3_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1318 mask = 1 << ((mmTPC3_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
1319 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1320 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1321 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1322 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1323 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1324 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1325 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1326 mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1327 mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1328 mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1329 mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1330 mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1331 mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1332 mask |= 1 << ((mmTPC3_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1333
1334 WREG32(pb_addr + word_offset, ~mask);
1335
1336 pb_addr = (mmTPC3_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1337 word_offset = ((mmTPC3_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1338 mask = 1 << ((mmTPC3_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
1339 mask |= 1 << ((mmTPC3_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
1340 mask |= 1 << ((mmTPC3_CMDQ_GLBL_PROT & 0x7F) >> 2);
1341 mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
1342 mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1343 mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1344 mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
1345 mask |= 1 << ((mmTPC3_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
1346 mask |= 1 << ((mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1347 mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS0 & 0x7F) >> 2);
1348 mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS1 & 0x7F) >> 2);
1349
1350 WREG32(pb_addr + word_offset, ~mask);
1351
1352 pb_addr = (mmTPC3_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1353 word_offset = ((mmTPC3_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1354 mask = 1 << ((mmTPC3_CMDQ_CQ_CFG0 & 0x7F) >> 2);
1355 mask |= 1 << ((mmTPC3_CMDQ_CQ_CFG1 & 0x7F) >> 2);
1356 mask |= 1 << ((mmTPC3_CMDQ_CQ_ARUSER & 0x7F) >> 2);
1357 mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
1358 mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
1359 mask |= 1 << ((mmTPC3_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
1360 mask |= 1 << ((mmTPC3_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
1361 mask |= 1 << ((mmTPC3_CMDQ_CQ_STS0 & 0x7F) >> 2);
1362 mask |= 1 << ((mmTPC3_CMDQ_CQ_STS1 & 0x7F) >> 2);
1363 mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1364 mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1365 mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1366 mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1367
1368 WREG32(pb_addr + word_offset, ~mask);
1369
1370 pb_addr = (mmTPC3_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1371 word_offset = ((mmTPC3_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1372 mask = 1 << ((mmTPC3_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
1373 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1374 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1375 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1376 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1377 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1378 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1379 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1380 mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1381 mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1382 mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1383 mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1384 mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1385 mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1386 mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1387 mask |= 1 << ((mmTPC3_CMDQ_CP_STS & 0x7F) >> 2);
1388 mask |= 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
1389
1390 WREG32(pb_addr + word_offset, ~mask);
1391
1392 pb_addr = (mmTPC3_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
1393 word_offset = ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
1394 << 2;
1395 mask = 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
1396 mask |= 1 << ((mmTPC3_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
1397 mask |= 1 << ((mmTPC3_CMDQ_CP_DBG_0 & 0x7F) >> 2);
1398 mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
1399 mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
1400
1401 WREG32(pb_addr + word_offset, ~mask);
1402
1403 goya_pb_set_block(hdev, mmTPC4_RTR_BASE);
1404 goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE);
1405 goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE);
1406
1407 pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
1408 word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH &
1409 PROT_BITS_OFFS) >> 7) << 2;
1410 mask = 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1411 mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
1412 mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
1413 mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1414
1415 WREG32(pb_addr + word_offset, ~mask);
1416
1417 pb_addr = (mmTPC4_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
1418 word_offset = ((mmTPC4_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
1419 mask = 1 << ((mmTPC4_CFG_ARUSER & 0x7F) >> 2);
1420 mask |= 1 << ((mmTPC4_CFG_AWUSER & 0x7F) >> 2);
1421
1422 WREG32(pb_addr + word_offset, ~mask);
1423
1424 pb_addr = (mmTPC4_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
1425 word_offset = ((mmTPC4_CFG_FUNC_MBIST_CNTRL &
1426 PROT_BITS_OFFS) >> 7) << 2;
1427 mask = 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
1428 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
1429 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
1430 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
1431 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
1432 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
1433 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
1434 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
1435 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
1436 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
1437 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
1438 mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
1439
1440 WREG32(pb_addr + word_offset, ~mask);
1441
1442 pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1443 word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1444 mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
1445 mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
1446 mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
1447 mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
1448 mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1449 mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1450 mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
1451 mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
1452 mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1453 mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
1454 mask |= 1 << ((mmTPC4_QM_GLBL_STS1 & 0x7F) >> 2);
1455 mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO & 0x7F) >> 2);
1456 mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI & 0x7F) >> 2);
1457 mask |= 1 << ((mmTPC4_QM_PQ_SIZE & 0x7F) >> 2);
1458 mask |= 1 << ((mmTPC4_QM_PQ_PI & 0x7F) >> 2);
1459 mask |= 1 << ((mmTPC4_QM_PQ_CI & 0x7F) >> 2);
1460 mask |= 1 << ((mmTPC4_QM_PQ_CFG0 & 0x7F) >> 2);
1461 mask |= 1 << ((mmTPC4_QM_PQ_CFG1 & 0x7F) >> 2);
1462 mask |= 1 << ((mmTPC4_QM_PQ_ARUSER & 0x7F) >> 2);
1463
1464 WREG32(pb_addr + word_offset, ~mask);
1465
1466 pb_addr = (mmTPC4_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
1467 word_offset = ((mmTPC4_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
1468 mask = 1 << ((mmTPC4_QM_PQ_PUSH0 & 0x7F) >> 2);
1469 mask |= 1 << ((mmTPC4_QM_PQ_PUSH1 & 0x7F) >> 2);
1470 mask |= 1 << ((mmTPC4_QM_PQ_PUSH2 & 0x7F) >> 2);
1471 mask |= 1 << ((mmTPC4_QM_PQ_PUSH3 & 0x7F) >> 2);
1472 mask |= 1 << ((mmTPC4_QM_PQ_STS0 & 0x7F) >> 2);
1473 mask |= 1 << ((mmTPC4_QM_PQ_STS1 & 0x7F) >> 2);
1474 mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1475 mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1476 mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1477 mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1478 mask |= 1 << ((mmTPC4_QM_CQ_CFG0 & 0x7F) >> 2);
1479 mask |= 1 << ((mmTPC4_QM_CQ_CFG1 & 0x7F) >> 2);
1480 mask |= 1 << ((mmTPC4_QM_CQ_ARUSER & 0x7F) >> 2);
1481 mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO & 0x7F) >> 2);
1482 mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI & 0x7F) >> 2);
1483 mask |= 1 << ((mmTPC4_QM_CQ_TSIZE & 0x7F) >> 2);
1484 mask |= 1 << ((mmTPC4_QM_CQ_CTL & 0x7F) >> 2);
1485 mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
1486 mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
1487 mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS & 0x7F) >> 2);
1488 mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS & 0x7F) >> 2);
1489 mask |= 1 << ((mmTPC4_QM_CQ_STS0 & 0x7F) >> 2);
1490 mask |= 1 << ((mmTPC4_QM_CQ_STS1 & 0x7F) >> 2);
1491 mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1492 mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1493 mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1494 mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1495
1496 WREG32(pb_addr + word_offset, ~mask);
1497
1498 pb_addr = (mmTPC4_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1499 word_offset = ((mmTPC4_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1500 mask = 1 << ((mmTPC4_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
1501 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1502 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1503 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1504 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1505 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1506 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1507 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1508 mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1509 mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1510 mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1511 mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1512 mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1513 mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1514 mask |= 1 << ((mmTPC4_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1515
1516 WREG32(pb_addr + word_offset, ~mask);
1517
1518 pb_addr = (mmTPC4_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1519 word_offset = ((mmTPC4_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1520 mask = 1 << ((mmTPC4_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
1521 mask |= 1 << ((mmTPC4_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
1522 mask |= 1 << ((mmTPC4_CMDQ_GLBL_PROT & 0x7F) >> 2);
1523 mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
1524 mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1525 mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1526 mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
1527 mask |= 1 << ((mmTPC4_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
1528 mask |= 1 << ((mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1529 mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS0 & 0x7F) >> 2);
1530 mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS1 & 0x7F) >> 2);
1531
1532 WREG32(pb_addr + word_offset, ~mask);
1533
1534 pb_addr = (mmTPC4_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1535 word_offset = ((mmTPC4_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1536 mask = 1 << ((mmTPC4_CMDQ_CQ_CFG0 & 0x7F) >> 2);
1537 mask |= 1 << ((mmTPC4_CMDQ_CQ_CFG1 & 0x7F) >> 2);
1538 mask |= 1 << ((mmTPC4_CMDQ_CQ_ARUSER & 0x7F) >> 2);
1539 mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
1540 mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
1541 mask |= 1 << ((mmTPC4_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
1542 mask |= 1 << ((mmTPC4_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
1543 mask |= 1 << ((mmTPC4_CMDQ_CQ_STS0 & 0x7F) >> 2);
1544 mask |= 1 << ((mmTPC4_CMDQ_CQ_STS1 & 0x7F) >> 2);
1545 mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1546 mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1547 mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1548 mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1549
1550 WREG32(pb_addr + word_offset, ~mask);
1551
1552 pb_addr = (mmTPC4_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1553 word_offset = ((mmTPC4_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1554 mask = 1 << ((mmTPC4_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
1555 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1556 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1557 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1558 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1559 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1560 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1561 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1562 mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1563 mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1564 mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1565 mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1566 mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1567 mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1568 mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1569 mask |= 1 << ((mmTPC4_CMDQ_CP_STS & 0x7F) >> 2);
1570 mask |= 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
1571
1572 WREG32(pb_addr + word_offset, ~mask);
1573
1574 pb_addr = (mmTPC4_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
1575 word_offset = ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
1576 << 2;
1577 mask = 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
1578 mask |= 1 << ((mmTPC4_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
1579 mask |= 1 << ((mmTPC4_CMDQ_CP_DBG_0 & 0x7F) >> 2);
1580 mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
1581 mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
1582
1583 WREG32(pb_addr + word_offset, ~mask);
1584
1585 goya_pb_set_block(hdev, mmTPC5_RTR_BASE);
1586 goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE);
1587 goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE);
1588
1589 pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
1590 word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH &
1591 PROT_BITS_OFFS) >> 7) << 2;
1592 mask = 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1593 mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
1594 mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
1595 mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1596
1597 WREG32(pb_addr + word_offset, ~mask);
1598
1599 pb_addr = (mmTPC5_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
1600 word_offset = ((mmTPC5_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
1601 mask = 1 << ((mmTPC5_CFG_ARUSER & 0x7F) >> 2);
1602 mask |= 1 << ((mmTPC5_CFG_AWUSER & 0x7F) >> 2);
1603
1604 WREG32(pb_addr + word_offset, ~mask);
1605
1606 pb_addr = (mmTPC5_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
1607 word_offset = ((mmTPC5_CFG_FUNC_MBIST_CNTRL &
1608 PROT_BITS_OFFS) >> 7) << 2;
1609 mask = 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
1610 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
1611 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
1612 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
1613 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
1614 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
1615 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
1616 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
1617 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
1618 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
1619 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
1620 mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
1621
1622 WREG32(pb_addr + word_offset, ~mask);
1623
1624 pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1625 word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1626 mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
1627 mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
1628 mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
1629 mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
1630 mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1631 mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1632 mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
1633 mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
1634 mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1635 mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
1636 mask |= 1 << ((mmTPC5_QM_GLBL_STS1 & 0x7F) >> 2);
1637 mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO & 0x7F) >> 2);
1638 mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI & 0x7F) >> 2);
1639 mask |= 1 << ((mmTPC5_QM_PQ_SIZE & 0x7F) >> 2);
1640 mask |= 1 << ((mmTPC5_QM_PQ_PI & 0x7F) >> 2);
1641 mask |= 1 << ((mmTPC5_QM_PQ_CI & 0x7F) >> 2);
1642 mask |= 1 << ((mmTPC5_QM_PQ_CFG0 & 0x7F) >> 2);
1643 mask |= 1 << ((mmTPC5_QM_PQ_CFG1 & 0x7F) >> 2);
1644 mask |= 1 << ((mmTPC5_QM_PQ_ARUSER & 0x7F) >> 2);
1645
1646 WREG32(pb_addr + word_offset, ~mask);
1647
1648 pb_addr = (mmTPC5_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
1649 word_offset = ((mmTPC5_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
1650 mask = 1 << ((mmTPC5_QM_PQ_PUSH0 & 0x7F) >> 2);
1651 mask |= 1 << ((mmTPC5_QM_PQ_PUSH1 & 0x7F) >> 2);
1652 mask |= 1 << ((mmTPC5_QM_PQ_PUSH2 & 0x7F) >> 2);
1653 mask |= 1 << ((mmTPC5_QM_PQ_PUSH3 & 0x7F) >> 2);
1654 mask |= 1 << ((mmTPC5_QM_PQ_STS0 & 0x7F) >> 2);
1655 mask |= 1 << ((mmTPC5_QM_PQ_STS1 & 0x7F) >> 2);
1656 mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1657 mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1658 mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1659 mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1660 mask |= 1 << ((mmTPC5_QM_CQ_CFG0 & 0x7F) >> 2);
1661 mask |= 1 << ((mmTPC5_QM_CQ_CFG1 & 0x7F) >> 2);
1662 mask |= 1 << ((mmTPC5_QM_CQ_ARUSER & 0x7F) >> 2);
1663 mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO & 0x7F) >> 2);
1664 mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI & 0x7F) >> 2);
1665 mask |= 1 << ((mmTPC5_QM_CQ_TSIZE & 0x7F) >> 2);
1666 mask |= 1 << ((mmTPC5_QM_CQ_CTL & 0x7F) >> 2);
1667 mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
1668 mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
1669 mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS & 0x7F) >> 2);
1670 mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS & 0x7F) >> 2);
1671 mask |= 1 << ((mmTPC5_QM_CQ_STS0 & 0x7F) >> 2);
1672 mask |= 1 << ((mmTPC5_QM_CQ_STS1 & 0x7F) >> 2);
1673 mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1674 mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1675 mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1676 mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1677
1678 WREG32(pb_addr + word_offset, ~mask);
1679
1680 pb_addr = (mmTPC5_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1681 word_offset = ((mmTPC5_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1682 mask = 1 << ((mmTPC5_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
1683 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1684 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1685 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1686 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1687 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1688 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1689 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1690 mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1691 mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1692 mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1693 mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1694 mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1695 mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1696 mask |= 1 << ((mmTPC5_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1697
1698 WREG32(pb_addr + word_offset, ~mask);
1699
1700 pb_addr = (mmTPC5_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1701 word_offset = ((mmTPC5_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1702 mask = 1 << ((mmTPC5_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
1703 mask |= 1 << ((mmTPC5_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
1704 mask |= 1 << ((mmTPC5_CMDQ_GLBL_PROT & 0x7F) >> 2);
1705 mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
1706 mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1707 mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1708 mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
1709 mask |= 1 << ((mmTPC5_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
1710 mask |= 1 << ((mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1711 mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS0 & 0x7F) >> 2);
1712 mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS1 & 0x7F) >> 2);
1713
1714 WREG32(pb_addr + word_offset, ~mask);
1715
1716 pb_addr = (mmTPC5_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1717 word_offset = ((mmTPC5_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1718 mask = 1 << ((mmTPC5_CMDQ_CQ_CFG0 & 0x7F) >> 2);
1719 mask |= 1 << ((mmTPC5_CMDQ_CQ_CFG1 & 0x7F) >> 2);
1720 mask |= 1 << ((mmTPC5_CMDQ_CQ_ARUSER & 0x7F) >> 2);
1721 mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
1722 mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
1723 mask |= 1 << ((mmTPC5_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
1724 mask |= 1 << ((mmTPC5_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
1725 mask |= 1 << ((mmTPC5_CMDQ_CQ_STS0 & 0x7F) >> 2);
1726 mask |= 1 << ((mmTPC5_CMDQ_CQ_STS1 & 0x7F) >> 2);
1727 mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1728 mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1729 mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1730 mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1731
1732 WREG32(pb_addr + word_offset, ~mask);
1733
1734 pb_addr = (mmTPC5_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1735 word_offset = ((mmTPC5_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1736 mask = 1 << ((mmTPC5_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
1737 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1738 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1739 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1740 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1741 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1742 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1743 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1744 mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1745 mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1746 mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1747 mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1748 mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1749 mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1750 mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1751 mask |= 1 << ((mmTPC5_CMDQ_CP_STS & 0x7F) >> 2);
1752 mask |= 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
1753
1754 WREG32(pb_addr + word_offset, ~mask);
1755
1756 pb_addr = (mmTPC5_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
1757 word_offset = ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
1758 << 2;
1759 mask = 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
1760 mask |= 1 << ((mmTPC5_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
1761 mask |= 1 << ((mmTPC5_CMDQ_CP_DBG_0 & 0x7F) >> 2);
1762 mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
1763 mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
1764
1765 WREG32(pb_addr + word_offset, ~mask);
1766
1767 goya_pb_set_block(hdev, mmTPC6_RTR_BASE);
1768 goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE);
1769 goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE);
1770
1771 pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
1772 word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH &
1773 PROT_BITS_OFFS) >> 7) << 2;
1774 mask = 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1775 mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
1776 mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
1777 mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1778
1779 WREG32(pb_addr + word_offset, ~mask);
1780
1781 pb_addr = (mmTPC6_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
1782 word_offset = ((mmTPC6_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
1783 mask = 1 << ((mmTPC6_CFG_ARUSER & 0x7F) >> 2);
1784 mask |= 1 << ((mmTPC6_CFG_AWUSER & 0x7F) >> 2);
1785
1786 WREG32(pb_addr + word_offset, ~mask);
1787
1788 pb_addr = (mmTPC6_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
1789 word_offset = ((mmTPC6_CFG_FUNC_MBIST_CNTRL &
1790 PROT_BITS_OFFS) >> 7) << 2;
1791 mask = 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
1792 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
1793 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
1794 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
1795 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
1796 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
1797 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
1798 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
1799 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
1800 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
1801 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
1802 mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
1803
1804 WREG32(pb_addr + word_offset, ~mask);
1805
1806 pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1807 word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1808 mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
1809 mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
1810 mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
1811 mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
1812 mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1813 mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1814 mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
1815 mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
1816 mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1817 mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
1818 mask |= 1 << ((mmTPC6_QM_GLBL_STS1 & 0x7F) >> 2);
1819 mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO & 0x7F) >> 2);
1820 mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI & 0x7F) >> 2);
1821 mask |= 1 << ((mmTPC6_QM_PQ_SIZE & 0x7F) >> 2);
1822 mask |= 1 << ((mmTPC6_QM_PQ_PI & 0x7F) >> 2);
1823 mask |= 1 << ((mmTPC6_QM_PQ_CI & 0x7F) >> 2);
1824 mask |= 1 << ((mmTPC6_QM_PQ_CFG0 & 0x7F) >> 2);
1825 mask |= 1 << ((mmTPC6_QM_PQ_CFG1 & 0x7F) >> 2);
1826 mask |= 1 << ((mmTPC6_QM_PQ_ARUSER & 0x7F) >> 2);
1827
1828 WREG32(pb_addr + word_offset, ~mask);
1829
1830 pb_addr = (mmTPC6_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
1831 word_offset = ((mmTPC6_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
1832 mask = 1 << ((mmTPC6_QM_PQ_PUSH0 & 0x7F) >> 2);
1833 mask |= 1 << ((mmTPC6_QM_PQ_PUSH1 & 0x7F) >> 2);
1834 mask |= 1 << ((mmTPC6_QM_PQ_PUSH2 & 0x7F) >> 2);
1835 mask |= 1 << ((mmTPC6_QM_PQ_PUSH3 & 0x7F) >> 2);
1836 mask |= 1 << ((mmTPC6_QM_PQ_STS0 & 0x7F) >> 2);
1837 mask |= 1 << ((mmTPC6_QM_PQ_STS1 & 0x7F) >> 2);
1838 mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1839 mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1840 mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1841 mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1842 mask |= 1 << ((mmTPC6_QM_CQ_CFG0 & 0x7F) >> 2);
1843 mask |= 1 << ((mmTPC6_QM_CQ_CFG1 & 0x7F) >> 2);
1844 mask |= 1 << ((mmTPC6_QM_CQ_ARUSER & 0x7F) >> 2);
1845 mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO & 0x7F) >> 2);
1846 mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI & 0x7F) >> 2);
1847 mask |= 1 << ((mmTPC6_QM_CQ_TSIZE & 0x7F) >> 2);
1848 mask |= 1 << ((mmTPC6_QM_CQ_CTL & 0x7F) >> 2);
1849 mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
1850 mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
1851 mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS & 0x7F) >> 2);
1852 mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS & 0x7F) >> 2);
1853 mask |= 1 << ((mmTPC6_QM_CQ_STS0 & 0x7F) >> 2);
1854 mask |= 1 << ((mmTPC6_QM_CQ_STS1 & 0x7F) >> 2);
1855 mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1856 mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1857 mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1858 mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1859
1860 WREG32(pb_addr + word_offset, ~mask);
1861
1862 pb_addr = (mmTPC6_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1863 word_offset = ((mmTPC6_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1864 mask = 1 << ((mmTPC6_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
1865 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1866 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1867 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1868 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1869 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1870 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1871 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1872 mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1873 mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1874 mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1875 mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1876 mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1877 mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1878 mask |= 1 << ((mmTPC6_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1879
1880 WREG32(pb_addr + word_offset, ~mask);
1881
1882 pb_addr = (mmTPC6_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1883 word_offset = ((mmTPC6_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1884 mask = 1 << ((mmTPC6_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
1885 mask |= 1 << ((mmTPC6_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
1886 mask |= 1 << ((mmTPC6_CMDQ_GLBL_PROT & 0x7F) >> 2);
1887 mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
1888 mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1889 mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1890 mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
1891 mask |= 1 << ((mmTPC6_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
1892 mask |= 1 << ((mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1893 mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS0 & 0x7F) >> 2);
1894 mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS1 & 0x7F) >> 2);
1895
1896 WREG32(pb_addr + word_offset, ~mask);
1897
1898 pb_addr = (mmTPC6_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1899 word_offset = ((mmTPC6_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1900 mask = 1 << ((mmTPC6_CMDQ_CQ_CFG0 & 0x7F) >> 2);
1901 mask |= 1 << ((mmTPC6_CMDQ_CQ_CFG1 & 0x7F) >> 2);
1902 mask |= 1 << ((mmTPC6_CMDQ_CQ_ARUSER & 0x7F) >> 2);
1903 mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
1904 mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
1905 mask |= 1 << ((mmTPC6_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
1906 mask |= 1 << ((mmTPC6_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
1907 mask |= 1 << ((mmTPC6_CMDQ_CQ_STS0 & 0x7F) >> 2);
1908 mask |= 1 << ((mmTPC6_CMDQ_CQ_STS1 & 0x7F) >> 2);
1909 mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
1910 mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
1911 mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
1912 mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
1913
1914 WREG32(pb_addr + word_offset, ~mask);
1915
1916 pb_addr = (mmTPC6_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
1917 word_offset = ((mmTPC6_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
1918 mask = 1 << ((mmTPC6_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
1919 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
1920 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
1921 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
1922 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
1923 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
1924 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
1925 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
1926 mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
1927 mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
1928 mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
1929 mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
1930 mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
1931 mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
1932 mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
1933 mask |= 1 << ((mmTPC6_CMDQ_CP_STS & 0x7F) >> 2);
1934 mask |= 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
1935
1936 WREG32(pb_addr + word_offset, ~mask);
1937
1938 pb_addr = (mmTPC6_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
1939 word_offset = ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
1940 << 2;
1941 mask = 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
1942 mask |= 1 << ((mmTPC6_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
1943 mask |= 1 << ((mmTPC6_CMDQ_CP_DBG_0 & 0x7F) >> 2);
1944 mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
1945 mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
1946
1947 WREG32(pb_addr + word_offset, ~mask);
1948
1949 goya_pb_set_block(hdev, mmTPC7_NRTR_BASE);
1950 goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE);
1951 goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE);
1952
1953 pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
1954 word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH &
1955 PROT_BITS_OFFS) >> 7) << 2;
1956 mask = 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1957 mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
1958 mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
1959 mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
1960
1961 WREG32(pb_addr + word_offset, ~mask);
1962
1963 pb_addr = (mmTPC7_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
1964 word_offset = ((mmTPC7_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
1965 mask = 1 << ((mmTPC7_CFG_ARUSER & 0x7F) >> 2);
1966 mask |= 1 << ((mmTPC7_CFG_AWUSER & 0x7F) >> 2);
1967
1968 WREG32(pb_addr + word_offset, ~mask);
1969
1970 pb_addr = (mmTPC7_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
1971 word_offset = ((mmTPC7_CFG_FUNC_MBIST_CNTRL &
1972 PROT_BITS_OFFS) >> 7) << 2;
1973 mask = 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
1974 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
1975 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
1976 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
1977 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
1978 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
1979 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
1980 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
1981 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
1982 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
1983 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
1984 mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
1985
1986 WREG32(pb_addr + word_offset, ~mask);
1987
1988 pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
1989 word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
1990 mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
1991 mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
1992 mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
1993 mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
1994 mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
1995 mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
1996 mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
1997 mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
1998 mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
1999 mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
2000 mask |= 1 << ((mmTPC7_QM_GLBL_STS1 & 0x7F) >> 2);
2001 mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO & 0x7F) >> 2);
2002 mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI & 0x7F) >> 2);
2003 mask |= 1 << ((mmTPC7_QM_PQ_SIZE & 0x7F) >> 2);
2004 mask |= 1 << ((mmTPC7_QM_PQ_PI & 0x7F) >> 2);
2005 mask |= 1 << ((mmTPC7_QM_PQ_CI & 0x7F) >> 2);
2006 mask |= 1 << ((mmTPC7_QM_PQ_CFG0 & 0x7F) >> 2);
2007 mask |= 1 << ((mmTPC7_QM_PQ_CFG1 & 0x7F) >> 2);
2008 mask |= 1 << ((mmTPC7_QM_PQ_ARUSER & 0x7F) >> 2);
2009
2010 WREG32(pb_addr + word_offset, ~mask);
2011
2012 pb_addr = (mmTPC7_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
2013 word_offset = ((mmTPC7_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
2014 mask = 1 << ((mmTPC7_QM_PQ_PUSH0 & 0x7F) >> 2);
2015 mask |= 1 << ((mmTPC7_QM_PQ_PUSH1 & 0x7F) >> 2);
2016 mask |= 1 << ((mmTPC7_QM_PQ_PUSH2 & 0x7F) >> 2);
2017 mask |= 1 << ((mmTPC7_QM_PQ_PUSH3 & 0x7F) >> 2);
2018 mask |= 1 << ((mmTPC7_QM_PQ_STS0 & 0x7F) >> 2);
2019 mask |= 1 << ((mmTPC7_QM_PQ_STS1 & 0x7F) >> 2);
2020 mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
2021 mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
2022 mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
2023 mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
2024 mask |= 1 << ((mmTPC7_QM_CQ_CFG0 & 0x7F) >> 2);
2025 mask |= 1 << ((mmTPC7_QM_CQ_CFG1 & 0x7F) >> 2);
2026 mask |= 1 << ((mmTPC7_QM_CQ_ARUSER & 0x7F) >> 2);
2027 mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO & 0x7F) >> 2);
2028 mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI & 0x7F) >> 2);
2029 mask |= 1 << ((mmTPC7_QM_CQ_TSIZE & 0x7F) >> 2);
2030 mask |= 1 << ((mmTPC7_QM_CQ_CTL & 0x7F) >> 2);
2031 mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
2032 mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
2033 mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS & 0x7F) >> 2);
2034 mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS & 0x7F) >> 2);
2035 mask |= 1 << ((mmTPC7_QM_CQ_STS0 & 0x7F) >> 2);
2036 mask |= 1 << ((mmTPC7_QM_CQ_STS1 & 0x7F) >> 2);
2037 mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
2038 mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
2039 mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
2040 mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
2041
2042 WREG32(pb_addr + word_offset, ~mask);
2043
2044 pb_addr = (mmTPC7_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
2045 word_offset = ((mmTPC7_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
2046 mask = 1 << ((mmTPC7_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
2047 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
2048 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
2049 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
2050 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
2051 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
2052 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
2053 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
2054 mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
2055 mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
2056 mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
2057 mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
2058 mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
2059 mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
2060 mask |= 1 << ((mmTPC7_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
2061
2062 WREG32(pb_addr + word_offset, ~mask);
2063
2064 pb_addr = (mmTPC7_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
2065 word_offset = ((mmTPC7_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
2066 mask = 1 << ((mmTPC7_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
2067 mask |= 1 << ((mmTPC7_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
2068 mask |= 1 << ((mmTPC7_CMDQ_GLBL_PROT & 0x7F) >> 2);
2069 mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
2070 mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
2071 mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
2072 mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
2073 mask |= 1 << ((mmTPC7_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
2074 mask |= 1 << ((mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
2075 mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS0 & 0x7F) >> 2);
2076 mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS1 & 0x7F) >> 2);
2077
2078 WREG32(pb_addr + word_offset, ~mask);
2079
2080 pb_addr = (mmTPC7_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
2081 word_offset = ((mmTPC7_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
2082 mask = 1 << ((mmTPC7_CMDQ_CQ_CFG0 & 0x7F) >> 2);
2083 mask |= 1 << ((mmTPC7_CMDQ_CQ_CFG1 & 0x7F) >> 2);
2084 mask |= 1 << ((mmTPC7_CMDQ_CQ_ARUSER & 0x7F) >> 2);
2085 mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
2086 mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
2087 mask |= 1 << ((mmTPC7_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
2088 mask |= 1 << ((mmTPC7_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
2089 mask |= 1 << ((mmTPC7_CMDQ_CQ_STS0 & 0x7F) >> 2);
2090 mask |= 1 << ((mmTPC7_CMDQ_CQ_STS1 & 0x7F) >> 2);
2091 mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
2092 mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
2093 mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
2094 mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
2095
2096 WREG32(pb_addr + word_offset, ~mask);
2097
2098 pb_addr = (mmTPC7_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
2099 word_offset = ((mmTPC7_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
2100 mask = 1 << ((mmTPC7_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
2101 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
2102 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
2103 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
2104 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
2105 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
2106 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
2107 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
2108 mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
2109 mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
2110 mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
2111 mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
2112 mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
2113 mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
2114 mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
2115 mask |= 1 << ((mmTPC7_CMDQ_CP_STS & 0x7F) >> 2);
2116 mask |= 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
2117
2118 WREG32(pb_addr + word_offset, ~mask);
2119
2120 pb_addr = (mmTPC7_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
2121 word_offset = ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
2122 << 2;
2123 mask = 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
2124 mask |= 1 << ((mmTPC7_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
2125 mask |= 1 << ((mmTPC7_CMDQ_CP_DBG_0 & 0x7F) >> 2);
2126 mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
2127 mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
2128
2129 WREG32(pb_addr + word_offset, ~mask);
2130}
2131
2132/*
2133 * goya_init_protection_bits - Initialize protection bits for specific registers
2134 *
2135 * @hdev: pointer to hl_device structure
2136 *
2137 * All protection bits are 1 by default, means not protected. Need to set to 0
2138 * each bit that belongs to a protected register.
2139 *
2140 */
2141static void goya_init_protection_bits(struct hl_device *hdev)
2142{
2143 /*
2144 * In each 4K block of registers, the last 128 bytes are protection
2145 * bits - total of 1024 bits, one for each register. Each bit is related
2146 * to a specific register, by the order of the registers.
2147 * So in order to calculate the bit that is related to a given register,
2148 * we need to calculate its word offset and then the exact bit inside
2149 * the word (which is 4 bytes).
2150 *
2151 * Register address:
2152 *
2153 * 31 12 11 7 6 2 1 0
2154 * -----------------------------------------------------------------
2155 * | Don't | word | bit location | 0 |
2156 * | care | offset | inside word | |
2157 * -----------------------------------------------------------------
2158 *
2159 * Bits 7-11 represents the word offset inside the 128 bytes.
2160 * Bits 2-6 represents the bit location inside the word.
2161 */
2162
2163 goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
2164 goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
2165 goya_pb_set_block(hdev, mmPCI_WR_REGULATOR_BASE);
2166
2167 goya_pb_set_block(hdev, mmSRAM_Y0_X0_BANK_BASE);
2168 goya_pb_set_block(hdev, mmSRAM_Y0_X0_RTR_BASE);
2169 goya_pb_set_block(hdev, mmSRAM_Y0_X1_BANK_BASE);
2170 goya_pb_set_block(hdev, mmSRAM_Y0_X1_RTR_BASE);
2171 goya_pb_set_block(hdev, mmSRAM_Y0_X2_BANK_BASE);
2172 goya_pb_set_block(hdev, mmSRAM_Y0_X2_RTR_BASE);
2173 goya_pb_set_block(hdev, mmSRAM_Y0_X3_BANK_BASE);
2174 goya_pb_set_block(hdev, mmSRAM_Y0_X3_RTR_BASE);
2175 goya_pb_set_block(hdev, mmSRAM_Y0_X4_BANK_BASE);
2176 goya_pb_set_block(hdev, mmSRAM_Y0_X4_RTR_BASE);
2177
2178 goya_pb_set_block(hdev, mmSRAM_Y1_X0_BANK_BASE);
2179 goya_pb_set_block(hdev, mmSRAM_Y1_X0_RTR_BASE);
2180 goya_pb_set_block(hdev, mmSRAM_Y1_X1_BANK_BASE);
2181 goya_pb_set_block(hdev, mmSRAM_Y1_X1_RTR_BASE);
2182 goya_pb_set_block(hdev, mmSRAM_Y1_X2_BANK_BASE);
2183 goya_pb_set_block(hdev, mmSRAM_Y1_X2_RTR_BASE);
2184 goya_pb_set_block(hdev, mmSRAM_Y1_X3_BANK_BASE);
2185 goya_pb_set_block(hdev, mmSRAM_Y1_X3_RTR_BASE);
2186 goya_pb_set_block(hdev, mmSRAM_Y1_X4_BANK_BASE);
2187 goya_pb_set_block(hdev, mmSRAM_Y1_X4_RTR_BASE);
2188
2189 goya_pb_set_block(hdev, mmSRAM_Y2_X0_BANK_BASE);
2190 goya_pb_set_block(hdev, mmSRAM_Y2_X0_RTR_BASE);
2191 goya_pb_set_block(hdev, mmSRAM_Y2_X1_BANK_BASE);
2192 goya_pb_set_block(hdev, mmSRAM_Y2_X1_RTR_BASE);
2193 goya_pb_set_block(hdev, mmSRAM_Y2_X2_BANK_BASE);
2194 goya_pb_set_block(hdev, mmSRAM_Y2_X2_RTR_BASE);
2195 goya_pb_set_block(hdev, mmSRAM_Y2_X3_BANK_BASE);
2196 goya_pb_set_block(hdev, mmSRAM_Y2_X3_RTR_BASE);
2197 goya_pb_set_block(hdev, mmSRAM_Y2_X4_BANK_BASE);
2198 goya_pb_set_block(hdev, mmSRAM_Y2_X4_RTR_BASE);
2199
2200 goya_pb_set_block(hdev, mmSRAM_Y3_X0_BANK_BASE);
2201 goya_pb_set_block(hdev, mmSRAM_Y3_X0_RTR_BASE);
2202 goya_pb_set_block(hdev, mmSRAM_Y3_X1_BANK_BASE);
2203 goya_pb_set_block(hdev, mmSRAM_Y3_X1_RTR_BASE);
2204 goya_pb_set_block(hdev, mmSRAM_Y3_X2_BANK_BASE);
2205 goya_pb_set_block(hdev, mmSRAM_Y3_X2_RTR_BASE);
2206 goya_pb_set_block(hdev, mmSRAM_Y3_X3_BANK_BASE);
2207 goya_pb_set_block(hdev, mmSRAM_Y3_X3_RTR_BASE);
2208 goya_pb_set_block(hdev, mmSRAM_Y3_X4_BANK_BASE);
2209 goya_pb_set_block(hdev, mmSRAM_Y3_X4_RTR_BASE);
2210
2211 goya_pb_set_block(hdev, mmSRAM_Y4_X0_BANK_BASE);
2212 goya_pb_set_block(hdev, mmSRAM_Y4_X0_RTR_BASE);
2213 goya_pb_set_block(hdev, mmSRAM_Y4_X1_BANK_BASE);
2214 goya_pb_set_block(hdev, mmSRAM_Y4_X1_RTR_BASE);
2215 goya_pb_set_block(hdev, mmSRAM_Y4_X2_BANK_BASE);
2216 goya_pb_set_block(hdev, mmSRAM_Y4_X2_RTR_BASE);
2217 goya_pb_set_block(hdev, mmSRAM_Y4_X3_BANK_BASE);
2218 goya_pb_set_block(hdev, mmSRAM_Y4_X3_RTR_BASE);
2219 goya_pb_set_block(hdev, mmSRAM_Y4_X4_BANK_BASE);
2220 goya_pb_set_block(hdev, mmSRAM_Y4_X4_RTR_BASE);
2221
2222 goya_pb_set_block(hdev, mmSRAM_Y5_X0_BANK_BASE);
2223 goya_pb_set_block(hdev, mmSRAM_Y5_X0_RTR_BASE);
2224 goya_pb_set_block(hdev, mmSRAM_Y5_X1_BANK_BASE);
2225 goya_pb_set_block(hdev, mmSRAM_Y5_X1_RTR_BASE);
2226 goya_pb_set_block(hdev, mmSRAM_Y5_X2_BANK_BASE);
2227 goya_pb_set_block(hdev, mmSRAM_Y5_X2_RTR_BASE);
2228 goya_pb_set_block(hdev, mmSRAM_Y5_X3_BANK_BASE);
2229 goya_pb_set_block(hdev, mmSRAM_Y5_X3_RTR_BASE);
2230 goya_pb_set_block(hdev, mmSRAM_Y5_X4_BANK_BASE);
2231 goya_pb_set_block(hdev, mmSRAM_Y5_X4_RTR_BASE);
2232
2233 goya_pb_set_block(hdev, mmPCIE_WRAP_BASE);
2234 goya_pb_set_block(hdev, mmPCIE_CORE_BASE);
2235 goya_pb_set_block(hdev, mmPCIE_DB_CFG_BASE);
2236 goya_pb_set_block(hdev, mmPCIE_DB_CMD_BASE);
2237 goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
2238 goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
2239 goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
2240
2241 goya_init_mme_protection_bits(hdev);
2242
2243 goya_init_dma_protection_bits(hdev);
2244
2245 goya_init_tpc_protection_bits(hdev);
2246}
2247
2248/*
2249 * goya_init_security - Initialize security model
2250 *
2251 * @hdev: pointer to hl_device structure
2252 *
2253 * Initialize the security model of the device
2254 * That includes range registers and protection bit per register
2255 *
2256 */
2257void goya_init_security(struct hl_device *hdev)
2258{
2259 struct goya_device *goya = hdev->asic_specific;
2260
2261 u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
2262 u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
2263
2264 u32 lbw_rng0_base = 0xFC440000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2265 u32 lbw_rng0_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2266
2267 u32 lbw_rng1_base = 0xFC480000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2268 u32 lbw_rng1_mask = 0xFFF80000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2269
2270 u32 lbw_rng2_base = 0xFC600000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2271 u32 lbw_rng2_mask = 0xFFE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2272
2273 u32 lbw_rng3_base = 0xFC800000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2274 u32 lbw_rng3_mask = 0xFFF00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2275
2276 u32 lbw_rng4_base = 0xFCC02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2277 u32 lbw_rng4_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2278
2279 u32 lbw_rng5_base = 0xFCC40000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2280 u32 lbw_rng5_mask = 0xFFFF8000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2281
2282 u32 lbw_rng6_base = 0xFCC48000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2283 u32 lbw_rng6_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2284
2285 u32 lbw_rng7_base = 0xFCC4A000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2286 u32 lbw_rng7_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2287
2288 u32 lbw_rng8_base = 0xFCC4C000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2289 u32 lbw_rng8_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2290
2291 u32 lbw_rng9_base = 0xFCC50000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2292 u32 lbw_rng9_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2293
2294 u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2295 u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2296
2297 u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2298 u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2299
2300 u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2301 u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2302
2303 u32 lbw_rng13_base = 0xFEC43000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2304 u32 lbw_rng13_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2305
2306 WREG32(mmDMA_MACRO_LBW_RANGE_HIT_BLOCK, 0xFFFF);
2307 WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFF);
2308
2309 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
2310 WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFE);
2311
2312 /* Protect HOST */
2313 WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_0, 0);
2314 WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_0, 0);
2315 WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_0, 0);
2316 WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_0, 0xFFF80);
2317 }
2318
2319 /*
2320 * Protect DDR @
2321 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2322 * The mask protects the first 512MB
2323 */
2324 WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_1, dram_addr_lo);
2325 WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_1, dram_addr_hi);
2326 WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_1, 0xE0000000);
2327 WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_1, 0x3FFFF);
2328
2329 /* Protect registers */
2330
2331 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_0, lbw_rng0_base);
2332 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_0, lbw_rng0_mask);
2333 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_1, lbw_rng1_base);
2334 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_1, lbw_rng1_mask);
2335 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_2, lbw_rng2_base);
2336 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_2, lbw_rng2_mask);
2337 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_3, lbw_rng3_base);
2338 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_3, lbw_rng3_mask);
2339 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_4, lbw_rng4_base);
2340 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_4, lbw_rng4_mask);
2341 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_5, lbw_rng5_base);
2342 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_5, lbw_rng5_mask);
2343 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_6, lbw_rng6_base);
2344 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_6, lbw_rng6_mask);
2345 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_7, lbw_rng7_base);
2346 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_7, lbw_rng7_mask);
2347 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_8, lbw_rng8_base);
2348 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_8, lbw_rng8_mask);
2349 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_9, lbw_rng9_base);
2350 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_9, lbw_rng9_mask);
2351 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_10, lbw_rng10_base);
2352 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_10, lbw_rng10_mask);
2353 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_11, lbw_rng11_base);
2354 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_11, lbw_rng11_mask);
2355 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_12, lbw_rng12_base);
2356 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_12, lbw_rng12_mask);
2357 WREG32(mmDMA_MACRO_LBW_RANGE_BASE_13, lbw_rng13_base);
2358 WREG32(mmDMA_MACRO_LBW_RANGE_MASK_13, lbw_rng13_mask);
2359
2360 WREG32(mmMME1_RTR_LBW_RANGE_HIT, 0xFFFF);
2361 WREG32(mmMME2_RTR_LBW_RANGE_HIT, 0xFFFF);
2362 WREG32(mmMME3_RTR_LBW_RANGE_HIT, 0xFFFF);
2363 WREG32(mmMME4_RTR_LBW_RANGE_HIT, 0xFFFF);
2364 WREG32(mmMME5_RTR_LBW_RANGE_HIT, 0xFFFF);
2365 WREG32(mmMME6_RTR_LBW_RANGE_HIT, 0xFFFF);
2366
2367 WREG32(mmMME1_RTR_HBW_RANGE_HIT, 0xFE);
2368 WREG32(mmMME2_RTR_HBW_RANGE_HIT, 0xFE);
2369 WREG32(mmMME3_RTR_HBW_RANGE_HIT, 0xFE);
2370 WREG32(mmMME4_RTR_HBW_RANGE_HIT, 0xFE);
2371 WREG32(mmMME5_RTR_HBW_RANGE_HIT, 0xFE);
2372 WREG32(mmMME6_RTR_HBW_RANGE_HIT, 0xFE);
2373
2374 /* Protect HOST */
2375 WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_0, 0);
2376 WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_0, 0);
2377 WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_0, 0);
2378 WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2379
2380 WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_0, 0);
2381 WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_0, 0);
2382 WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_0, 0);
2383 WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2384
2385 WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_0, 0);
2386 WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_0, 0);
2387 WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_0, 0);
2388 WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2389
2390 WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_0, 0);
2391 WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_0, 0);
2392 WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_0, 0);
2393 WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2394
2395 WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_0, 0);
2396 WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_0, 0);
2397 WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_0, 0);
2398 WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2399
2400 WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_0, 0);
2401 WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_0, 0);
2402 WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_0, 0);
2403 WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2404
2405 /*
2406 * Protect DDR @
2407 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2408 * The mask protects the first 512MB
2409 */
2410 WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2411 WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2412 WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2413 WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2414
2415 WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2416 WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2417 WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2418 WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2419
2420 WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2421 WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2422 WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2423 WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2424
2425 WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2426 WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2427 WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2428 WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2429
2430 WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2431 WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2432 WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2433 WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2434
2435 WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2436 WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2437 WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2438 WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2439
2440 WREG32(mmMME1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2441 WREG32(mmMME1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2442 WREG32(mmMME1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2443 WREG32(mmMME1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2444 WREG32(mmMME1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2445 WREG32(mmMME1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2446 WREG32(mmMME1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2447 WREG32(mmMME1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2448 WREG32(mmMME1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2449 WREG32(mmMME1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2450 WREG32(mmMME1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2451 WREG32(mmMME1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2452 WREG32(mmMME1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2453 WREG32(mmMME1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2454 WREG32(mmMME1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2455 WREG32(mmMME1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2456 WREG32(mmMME1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2457 WREG32(mmMME1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2458 WREG32(mmMME1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2459 WREG32(mmMME1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2460 WREG32(mmMME1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2461 WREG32(mmMME1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2462 WREG32(mmMME1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2463 WREG32(mmMME1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2464 WREG32(mmMME1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2465 WREG32(mmMME1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2466 WREG32(mmMME1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2467 WREG32(mmMME1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2468
2469 WREG32(mmMME2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2470 WREG32(mmMME2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2471 WREG32(mmMME2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2472 WREG32(mmMME2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2473 WREG32(mmMME2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2474 WREG32(mmMME2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2475 WREG32(mmMME2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2476 WREG32(mmMME2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2477 WREG32(mmMME2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2478 WREG32(mmMME2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2479 WREG32(mmMME2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2480 WREG32(mmMME2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2481 WREG32(mmMME2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2482 WREG32(mmMME2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2483 WREG32(mmMME2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2484 WREG32(mmMME2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2485 WREG32(mmMME2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2486 WREG32(mmMME2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2487 WREG32(mmMME2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2488 WREG32(mmMME2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2489 WREG32(mmMME2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2490 WREG32(mmMME2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2491 WREG32(mmMME2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2492 WREG32(mmMME2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2493 WREG32(mmMME2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2494 WREG32(mmMME2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2495 WREG32(mmMME2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2496 WREG32(mmMME2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2497
2498 WREG32(mmMME3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2499 WREG32(mmMME3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2500 WREG32(mmMME3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2501 WREG32(mmMME3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2502 WREG32(mmMME3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2503 WREG32(mmMME3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2504 WREG32(mmMME3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2505 WREG32(mmMME3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2506 WREG32(mmMME3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2507 WREG32(mmMME3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2508 WREG32(mmMME3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2509 WREG32(mmMME3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2510 WREG32(mmMME3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2511 WREG32(mmMME3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2512 WREG32(mmMME3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2513 WREG32(mmMME3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2514 WREG32(mmMME3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2515 WREG32(mmMME3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2516 WREG32(mmMME3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2517 WREG32(mmMME3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2518 WREG32(mmMME3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2519 WREG32(mmMME3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2520 WREG32(mmMME3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2521 WREG32(mmMME3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2522 WREG32(mmMME3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2523 WREG32(mmMME3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2524 WREG32(mmMME3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2525 WREG32(mmMME3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2526
2527 WREG32(mmMME4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2528 WREG32(mmMME4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2529 WREG32(mmMME4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2530 WREG32(mmMME4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2531 WREG32(mmMME4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2532 WREG32(mmMME4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2533 WREG32(mmMME4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2534 WREG32(mmMME4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2535 WREG32(mmMME4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2536 WREG32(mmMME4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2537 WREG32(mmMME4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2538 WREG32(mmMME4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2539 WREG32(mmMME4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2540 WREG32(mmMME4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2541 WREG32(mmMME4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2542 WREG32(mmMME4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2543 WREG32(mmMME4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2544 WREG32(mmMME4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2545 WREG32(mmMME4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2546 WREG32(mmMME4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2547 WREG32(mmMME4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2548 WREG32(mmMME4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2549 WREG32(mmMME4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2550 WREG32(mmMME4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2551 WREG32(mmMME4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2552 WREG32(mmMME4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2553 WREG32(mmMME4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2554 WREG32(mmMME4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2555
2556 WREG32(mmMME5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2557 WREG32(mmMME5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2558 WREG32(mmMME5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2559 WREG32(mmMME5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2560 WREG32(mmMME5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2561 WREG32(mmMME5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2562 WREG32(mmMME5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2563 WREG32(mmMME5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2564 WREG32(mmMME5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2565 WREG32(mmMME5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2566 WREG32(mmMME5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2567 WREG32(mmMME5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2568 WREG32(mmMME5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2569 WREG32(mmMME5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2570 WREG32(mmMME5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2571 WREG32(mmMME5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2572 WREG32(mmMME5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2573 WREG32(mmMME5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2574 WREG32(mmMME5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2575 WREG32(mmMME5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2576 WREG32(mmMME5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2577 WREG32(mmMME5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2578 WREG32(mmMME5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2579 WREG32(mmMME5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2580 WREG32(mmMME5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2581 WREG32(mmMME5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2582 WREG32(mmMME5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2583 WREG32(mmMME5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2584
2585 WREG32(mmMME6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2586 WREG32(mmMME6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2587 WREG32(mmMME6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2588 WREG32(mmMME6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2589 WREG32(mmMME6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2590 WREG32(mmMME6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2591 WREG32(mmMME6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2592 WREG32(mmMME6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2593 WREG32(mmMME6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2594 WREG32(mmMME6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2595 WREG32(mmMME6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2596 WREG32(mmMME6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2597 WREG32(mmMME6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2598 WREG32(mmMME6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2599 WREG32(mmMME6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2600 WREG32(mmMME6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2601 WREG32(mmMME6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2602 WREG32(mmMME6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2603 WREG32(mmMME6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2604 WREG32(mmMME6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2605 WREG32(mmMME6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2606 WREG32(mmMME6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2607 WREG32(mmMME6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2608 WREG32(mmMME6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2609 WREG32(mmMME6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2610 WREG32(mmMME6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2611 WREG32(mmMME6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2612 WREG32(mmMME6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2613
2614 WREG32(mmTPC0_NRTR_LBW_RANGE_HIT, 0xFFFF);
2615 WREG32(mmTPC0_NRTR_HBW_RANGE_HIT, 0xFE);
2616
2617 /* Protect HOST */
2618 WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_0, 0);
2619 WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_0, 0);
2620 WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_0, 0);
2621 WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2622
2623 /*
2624 * Protect DDR @
2625 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2626 * The mask protects the first 512MB
2627 */
2628 WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2629 WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2630 WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2631 WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2632
2633 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2634 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2635 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2636 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2637 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2638 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2639 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2640 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2641 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2642 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2643 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2644 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2645 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2646 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2647 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2648 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2649 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2650 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2651 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2652 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2653 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2654 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2655 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2656 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2657 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2658 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2659 WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2660 WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2661
2662 WREG32(mmTPC1_RTR_LBW_RANGE_HIT, 0xFFFF);
2663 WREG32(mmTPC1_RTR_HBW_RANGE_HIT, 0xFE);
2664
2665 /* Protect HOST */
2666 WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_0, 0);
2667 WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_0, 0);
2668 WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_0, 0);
2669 WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2670
2671 /*
2672 * Protect DDR @
2673 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2674 * The mask protects the first 512MB
2675 */
2676 WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2677 WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2678 WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2679 WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2680
2681 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2682 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2683 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2684 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2685 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2686 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2687 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2688 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2689 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2690 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2691 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2692 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2693 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2694 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2695 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2696 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2697 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2698 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2699 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2700 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2701 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2702 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2703 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2704 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2705 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2706 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2707 WREG32(mmTPC1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2708 WREG32(mmTPC1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2709
2710 WREG32(mmTPC2_RTR_LBW_RANGE_HIT, 0xFFFF);
2711 WREG32(mmTPC2_RTR_HBW_RANGE_HIT, 0xFE);
2712
2713 /* Protect HOST */
2714 WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_0, 0);
2715 WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_0, 0);
2716 WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_0, 0);
2717 WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2718
2719 /*
2720 * Protect DDR @
2721 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2722 * The mask protects the first 512MB
2723 */
2724 WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2725 WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2726 WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2727 WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2728
2729 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2730 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2731 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2732 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2733 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2734 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2735 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2736 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2737 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2738 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2739 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2740 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2741 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2742 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2743 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2744 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2745 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2746 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2747 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2748 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2749 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2750 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2751 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2752 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2753 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2754 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2755 WREG32(mmTPC2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2756 WREG32(mmTPC2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2757
2758 WREG32(mmTPC3_RTR_LBW_RANGE_HIT, 0xFFFF);
2759 WREG32(mmTPC3_RTR_HBW_RANGE_HIT, 0xFE);
2760
2761 /* Protect HOST */
2762 WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_0, 0);
2763 WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_0, 0);
2764 WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_0, 0);
2765 WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2766
2767 /*
2768 * Protect DDR @
2769 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2770 * The mask protects the first 512MB
2771 */
2772 WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2773 WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2774 WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2775 WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2776
2777 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2778 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2779 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2780 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2781 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2782 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2783 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2784 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2785 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2786 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2787 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2788 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2789 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2790 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2791 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2792 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2793 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2794 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2795 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2796 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2797 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2798 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2799 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2800 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2801 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2802 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2803 WREG32(mmTPC3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2804 WREG32(mmTPC3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2805
2806 WREG32(mmTPC4_RTR_LBW_RANGE_HIT, 0xFFFF);
2807 WREG32(mmTPC4_RTR_HBW_RANGE_HIT, 0xFE);
2808
2809 /* Protect HOST */
2810 WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_0, 0);
2811 WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_0, 0);
2812 WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_0, 0);
2813 WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2814
2815 /*
2816 * Protect DDR @
2817 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2818 * The mask protects the first 512MB
2819 */
2820 WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2821 WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2822 WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2823 WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2824
2825 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2826 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2827 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2828 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2829 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2830 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2831 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2832 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2833 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2834 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2835 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2836 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2837 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2838 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2839 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2840 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2841 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2842 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2843 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2844 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2845 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2846 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2847 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2848 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2849 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2850 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2851 WREG32(mmTPC4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2852 WREG32(mmTPC4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2853
2854 WREG32(mmTPC5_RTR_LBW_RANGE_HIT, 0xFFFF);
2855 WREG32(mmTPC5_RTR_HBW_RANGE_HIT, 0xFE);
2856
2857 /* Protect HOST */
2858 WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_0, 0);
2859 WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_0, 0);
2860 WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_0, 0);
2861 WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2862
2863 /*
2864 * Protect DDR @
2865 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2866 * The mask protects the first 512MB
2867 */
2868 WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2869 WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2870 WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2871 WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2872
2873 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2874 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2875 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2876 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2877 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2878 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2879 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2880 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2881 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2882 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2883 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2884 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2885 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2886 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2887 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2888 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2889 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2890 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2891 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2892 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2893 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2894 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2895 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2896 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2897 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2898 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2899 WREG32(mmTPC5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2900 WREG32(mmTPC5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2901
2902 WREG32(mmTPC6_RTR_LBW_RANGE_HIT, 0xFFFF);
2903 WREG32(mmTPC6_RTR_HBW_RANGE_HIT, 0xFE);
2904
2905 /* Protect HOST */
2906 WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_0, 0);
2907 WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_0, 0);
2908 WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_0, 0);
2909 WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2910
2911 /*
2912 * Protect DDR @
2913 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2914 * The mask protects the first 512MB
2915 */
2916 WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2917 WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2918 WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2919 WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2920
2921 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2922 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2923 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2924 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2925 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2926 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2927 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2928 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2929 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2930 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2931 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2932 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2933 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2934 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2935 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2936 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2937 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2938 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2939 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2940 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2941 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2942 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2943 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2944 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2945 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2946 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2947 WREG32(mmTPC6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2948 WREG32(mmTPC6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2949
2950 WREG32(mmTPC7_NRTR_LBW_RANGE_HIT, 0xFFFF);
2951 WREG32(mmTPC7_NRTR_HBW_RANGE_HIT, 0xFE);
2952
2953 /* Protect HOST */
2954 WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_0, 0);
2955 WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_0, 0);
2956 WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_0, 0);
2957 WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
2958
2959 /*
2960 * Protect DDR @
2961 * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
2962 * The mask protects the first 512MB
2963 */
2964 WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
2965 WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
2966 WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
2967 WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
2968
2969 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
2970 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
2971 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
2972 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
2973 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
2974 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
2975 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
2976 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
2977 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
2978 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
2979 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
2980 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
2981 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
2982 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
2983 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
2984 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
2985 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
2986 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
2987 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
2988 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
2989 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
2990 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
2991 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
2992 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
2993 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
2994 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
2995 WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
2996 WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
2997
2998 goya_init_protection_bits(hdev);
2999}
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
new file mode 100644
index 000000000000..a7c95e9f9b9a
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -0,0 +1,1464 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef HABANALABSP_H_
9#define HABANALABSP_H_
10
11#include "include/armcp_if.h"
12#include "include/qman_if.h"
13
14#define pr_fmt(fmt) "habanalabs: " fmt
15
16#include <linux/cdev.h>
17#include <linux/iopoll.h>
18#include <linux/irqreturn.h>
19#include <linux/dma-fence.h>
20#include <linux/dma-direction.h>
21#include <linux/scatterlist.h>
22#include <linux/hashtable.h>
23
24#define HL_NAME "habanalabs"
25
26#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
27
28#define HL_PENDING_RESET_PER_SEC 5
29
30#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
31
32#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
33
34#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
35
36#define HL_MAX_QUEUES 128
37
38#define HL_MAX_JOBS_PER_CS 64
39
40/* MUST BE POWER OF 2 and larger than 1 */
41#define HL_MAX_PENDING_CS 64
42
43/* Memory */
44#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
45
46/* MMU */
47#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
48
49/**
50 * struct pgt_info - MMU hop page info.
51 * @node: hash linked-list node for the pgts hash of pgts.
52 * @addr: physical address of the pgt.
53 * @ctx: pointer to the owner ctx.
54 * @num_of_ptes: indicates how many ptes are used in the pgt.
55 *
56 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
57 * is needed during mapping, a new page is allocated and this structure holds
58 * its essential information. During unmapping, if no valid PTEs remained in the
59 * page, it is freed with its pgt_info structure.
60 */
61struct pgt_info {
62 struct hlist_node node;
63 u64 addr;
64 struct hl_ctx *ctx;
65 int num_of_ptes;
66};
67
68struct hl_device;
69struct hl_fpriv;
70
71/**
72 * enum hl_queue_type - Supported QUEUE types.
73 * @QUEUE_TYPE_NA: queue is not available.
74 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
75 * host.
76 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
77 * memories and/or operates the compute engines.
78 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
79 */
80enum hl_queue_type {
81 QUEUE_TYPE_NA,
82 QUEUE_TYPE_EXT,
83 QUEUE_TYPE_INT,
84 QUEUE_TYPE_CPU
85};
86
87/**
88 * struct hw_queue_properties - queue information.
89 * @type: queue type.
90 * @kmd_only: true if only KMD is allowed to send a job to this queue, false
91 * otherwise.
92 */
93struct hw_queue_properties {
94 enum hl_queue_type type;
95 u8 kmd_only;
96};
97
98/**
99 * enum vm_type_t - virtual memory mapping request information.
100 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
101 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
102 */
103enum vm_type_t {
104 VM_TYPE_USERPTR,
105 VM_TYPE_PHYS_PACK
106};
107
108/**
109 * enum hl_device_hw_state - H/W device state. use this to understand whether
110 * to do reset before hw_init or not
111 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
112 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
113 * hw_init
114 */
115enum hl_device_hw_state {
116 HL_DEVICE_HW_STATE_CLEAN = 0,
117 HL_DEVICE_HW_STATE_DIRTY
118};
119
120/**
121 * struct asic_fixed_properties - ASIC specific immutable properties.
122 * @hw_queues_props: H/W queues properties.
123 * @armcp_info: received various information from ArmCP regarding the H/W. e.g.
124 * available sensors.
125 * @uboot_ver: F/W U-boot version.
126 * @preboot_ver: F/W Preboot version.
127 * @sram_base_address: SRAM physical start address.
128 * @sram_end_address: SRAM physical end address.
129 * @sram_user_base_address - SRAM physical start address for user access.
130 * @dram_base_address: DRAM physical start address.
131 * @dram_end_address: DRAM physical end address.
132 * @dram_user_base_address: DRAM physical start address for user access.
133 * @dram_size: DRAM total size.
134 * @dram_pci_bar_size: size of PCI bar towards DRAM.
135 * @host_phys_base_address: base physical address of host memory for
136 * transactions that the device generates.
137 * @max_power_default: max power of the device after reset
138 * @va_space_host_start_address: base address of virtual memory range for
139 * mapping host memory.
140 * @va_space_host_end_address: end address of virtual memory range for
141 * mapping host memory.
142 * @va_space_dram_start_address: base address of virtual memory range for
143 * mapping DRAM memory.
144 * @va_space_dram_end_address: end address of virtual memory range for
145 * mapping DRAM memory.
146 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
147 * fault.
148 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
149 * @mmu_dram_default_page_addr: DRAM default page physical address.
150 * @mmu_pgt_size: MMU page tables total size.
151 * @mmu_pte_size: PTE size in MMU page tables.
152 * @mmu_hop_table_size: MMU hop table size.
153 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
154 * @dram_page_size: page size for MMU DRAM allocation.
155 * @cfg_size: configuration space size on SRAM.
156 * @sram_size: total size of SRAM.
157 * @max_asid: maximum number of open contexts (ASIDs).
158 * @num_of_events: number of possible internal H/W IRQs.
159 * @psoc_pci_pll_nr: PCI PLL NR value.
160 * @psoc_pci_pll_nf: PCI PLL NF value.
161 * @psoc_pci_pll_od: PCI PLL OD value.
162 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
163 * @completion_queues_count: number of completion queues.
164 * @high_pll: high PLL frequency used by the device.
165 * @cb_pool_cb_cnt: number of CBs in the CB pool.
166 * @cb_pool_cb_size: size of each CB in the CB pool.
167 * @tpc_enabled_mask: which TPCs are enabled.
168 */
169struct asic_fixed_properties {
170 struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
171 struct armcp_info armcp_info;
172 char uboot_ver[VERSION_MAX_LEN];
173 char preboot_ver[VERSION_MAX_LEN];
174 u64 sram_base_address;
175 u64 sram_end_address;
176 u64 sram_user_base_address;
177 u64 dram_base_address;
178 u64 dram_end_address;
179 u64 dram_user_base_address;
180 u64 dram_size;
181 u64 dram_pci_bar_size;
182 u64 host_phys_base_address;
183 u64 max_power_default;
184 u64 va_space_host_start_address;
185 u64 va_space_host_end_address;
186 u64 va_space_dram_start_address;
187 u64 va_space_dram_end_address;
188 u64 dram_size_for_default_page_mapping;
189 u64 mmu_pgt_addr;
190 u64 mmu_dram_default_page_addr;
191 u32 mmu_pgt_size;
192 u32 mmu_pte_size;
193 u32 mmu_hop_table_size;
194 u32 mmu_hop0_tables_total_size;
195 u32 dram_page_size;
196 u32 cfg_size;
197 u32 sram_size;
198 u32 max_asid;
199 u32 num_of_events;
200 u32 psoc_pci_pll_nr;
201 u32 psoc_pci_pll_nf;
202 u32 psoc_pci_pll_od;
203 u32 psoc_pci_pll_div_factor;
204 u32 high_pll;
205 u32 cb_pool_cb_cnt;
206 u32 cb_pool_cb_size;
207 u8 completion_queues_count;
208 u8 tpc_enabled_mask;
209};
210
211/**
212 * struct hl_dma_fence - wrapper for fence object used by command submissions.
213 * @base_fence: kernel fence object.
214 * @lock: spinlock to protect fence.
215 * @hdev: habanalabs device structure.
216 * @cs_seq: command submission sequence number.
217 */
218struct hl_dma_fence {
219 struct dma_fence base_fence;
220 spinlock_t lock;
221 struct hl_device *hdev;
222 u64 cs_seq;
223};
224
225/*
226 * Command Buffers
227 */
228
229#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
230
231/**
232 * struct hl_cb_mgr - describes a Command Buffer Manager.
233 * @cb_lock: protects cb_handles.
234 * @cb_handles: an idr to hold all command buffer handles.
235 */
236struct hl_cb_mgr {
237 spinlock_t cb_lock;
238 struct idr cb_handles; /* protected by cb_lock */
239};
240
241/**
242 * struct hl_cb - describes a Command Buffer.
243 * @refcount: reference counter for usage of the CB.
244 * @hdev: pointer to device this CB belongs to.
245 * @lock: spinlock to protect mmap/cs flows.
246 * @debugfs_list: node in debugfs list of command buffers.
247 * @pool_list: node in pool list of command buffers.
248 * @kernel_address: Holds the CB's kernel virtual address.
249 * @bus_address: Holds the CB's DMA address.
250 * @mmap_size: Holds the CB's size that was mmaped.
251 * @size: holds the CB's size.
252 * @id: the CB's ID.
253 * @cs_cnt: holds number of CS that this CB participates in.
254 * @ctx_id: holds the ID of the owner's context.
255 * @mmap: true if the CB is currently mmaped to user.
256 * @is_pool: true if CB was acquired from the pool, false otherwise.
257 */
258struct hl_cb {
259 struct kref refcount;
260 struct hl_device *hdev;
261 spinlock_t lock;
262 struct list_head debugfs_list;
263 struct list_head pool_list;
264 u64 kernel_address;
265 dma_addr_t bus_address;
266 u32 mmap_size;
267 u32 size;
268 u32 id;
269 u32 cs_cnt;
270 u32 ctx_id;
271 u8 mmap;
272 u8 is_pool;
273};
274
275
276/*
277 * QUEUES
278 */
279
280struct hl_cs_job;
281
282/*
283 * Currently, there are two limitations on the maximum length of a queue:
284 *
285 * 1. The memory footprint of the queue. The current allocated space for the
286 * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
287 * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
288 * which currently is 4096/16 = 256 entries.
289 *
290 * To increase that, we need either to decrease the size of the
291 * BD (difficult), or allocate more than a single page (easier).
292 *
293 * 2. Because the size of the JOB handle field in the BD CTL / completion queue
294 * is 10-bit, we can have up to 1024 open jobs per hardware queue.
295 * Therefore, each queue can hold up to 1024 entries.
296 *
297 * HL_QUEUE_LENGTH is in units of struct hl_bd.
298 * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
299 */
300
301#define HL_PAGE_SIZE 4096 /* minimum page size */
302/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
303#define HL_QUEUE_LENGTH 256
304#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
305
306/*
307 * HL_CQ_LENGTH is in units of struct hl_cq_entry.
308 * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
309 */
310#define HL_CQ_LENGTH HL_QUEUE_LENGTH
311#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
312
313/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
314#define HL_EQ_LENGTH 64
315#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
316
317
318/**
319 * struct hl_hw_queue - describes a H/W transport queue.
320 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
321 * @queue_type: type of queue.
322 * @kernel_address: holds the queue's kernel virtual address.
323 * @bus_address: holds the queue's DMA address.
324 * @pi: holds the queue's pi value.
325 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
326 * @hw_queue_id: the id of the H/W queue.
327 * @int_queue_len: length of internal queue (number of entries).
328 * @valid: is the queue valid (we have array of 32 queues, not all of them
329 * exists).
330 */
331struct hl_hw_queue {
332 struct hl_cs_job **shadow_queue;
333 enum hl_queue_type queue_type;
334 u64 kernel_address;
335 dma_addr_t bus_address;
336 u32 pi;
337 u32 ci;
338 u32 hw_queue_id;
339 u16 int_queue_len;
340 u8 valid;
341};
342
343/**
344 * struct hl_cq - describes a completion queue
345 * @hdev: pointer to the device structure
346 * @kernel_address: holds the queue's kernel virtual address
347 * @bus_address: holds the queue's DMA address
348 * @hw_queue_id: the id of the matching H/W queue
349 * @ci: ci inside the queue
350 * @pi: pi inside the queue
351 * @free_slots_cnt: counter of free slots in queue
352 */
353struct hl_cq {
354 struct hl_device *hdev;
355 u64 kernel_address;
356 dma_addr_t bus_address;
357 u32 hw_queue_id;
358 u32 ci;
359 u32 pi;
360 atomic_t free_slots_cnt;
361};
362
363/**
364 * struct hl_eq - describes the event queue (single one per device)
365 * @hdev: pointer to the device structure
366 * @kernel_address: holds the queue's kernel virtual address
367 * @bus_address: holds the queue's DMA address
368 * @ci: ci inside the queue
369 */
370struct hl_eq {
371 struct hl_device *hdev;
372 u64 kernel_address;
373 dma_addr_t bus_address;
374 u32 ci;
375};
376
377
378/*
379 * ASICs
380 */
381
382/**
383 * enum hl_asic_type - supported ASIC types.
384 * @ASIC_AUTO_DETECT: ASIC type will be automatically set.
385 * @ASIC_GOYA: Goya device.
386 * @ASIC_INVALID: Invalid ASIC type.
387 */
388enum hl_asic_type {
389 ASIC_AUTO_DETECT,
390 ASIC_GOYA,
391 ASIC_INVALID
392};
393
394struct hl_cs_parser;
395
396/**
397 * enum hl_pm_mng_profile - power management profile.
398 * @PM_AUTO: internal clock is set by KMD.
399 * @PM_MANUAL: internal clock is set by the user.
400 * @PM_LAST: last power management type.
401 */
402enum hl_pm_mng_profile {
403 PM_AUTO = 1,
404 PM_MANUAL,
405 PM_LAST
406};
407
408/**
409 * enum hl_pll_frequency - PLL frequency.
410 * @PLL_HIGH: high frequency.
411 * @PLL_LOW: low frequency.
412 * @PLL_LAST: last frequency values that were configured by the user.
413 */
414enum hl_pll_frequency {
415 PLL_HIGH = 1,
416 PLL_LOW,
417 PLL_LAST
418};
419
420/**
421 * struct hl_asic_funcs - ASIC specific functions that are can be called from
422 * common code.
423 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
424 * @early_fini: tears down what was done in early_init.
425 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
426 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
427 * @sw_init: sets up driver state, does not configure H/W.
428 * @sw_fini: tears down driver state, does not configure H/W.
429 * @hw_init: sets up the H/W state.
430 * @hw_fini: tears down the H/W state.
431 * @halt_engines: halt engines, needed for reset sequence. This also disables
432 * interrupts from the device. Should be called before
433 * hw_fini and before CS rollback.
434 * @suspend: handles IP specific H/W or SW changes for suspend.
435 * @resume: handles IP specific H/W or SW changes for resume.
436 * @cb_mmap: maps a CB.
437 * @ring_doorbell: increment PI on a given QMAN.
438 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
439 * @dma_alloc_coherent: Allocate coherent DMA memory by calling
440 * dma_alloc_coherent(). This is ASIC function because its
441 * implementation is not trivial when the driver is loaded
442 * in simulation mode (not upstreamed).
443 * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent().
444 * This is ASIC function because its implementation is not
445 * trivial when the driver is loaded in simulation mode
446 * (not upstreamed).
447 * @get_int_queue_base: get the internal queue base address.
448 * @test_queues: run simple test on all queues for sanity check.
449 * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
450 * size of allocation is HL_DMA_POOL_BLK_SIZE.
451 * @dma_pool_free: free small DMA allocation from pool.
452 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
453 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
454 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
455 * @cs_parser: parse Command Submission.
456 * @asic_dma_map_sg: DMA map scatter-gather list.
457 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
458 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
459 * @update_eq_ci: update event queue CI.
460 * @context_switch: called upon ASID context switch.
461 * @restore_phase_topology: clear all SOBs amd MONs.
462 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
463 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
464 * @add_device_attr: add ASIC specific device attributes.
465 * @handle_eqe: handle event queue entry (IRQ) from ArmCP.
466 * @set_pll_profile: change PLL profile (manual/automatic).
467 * @get_events_stat: retrieve event queue entries histogram.
468 * @read_pte: read MMU page table entry from DRAM.
469 * @write_pte: write MMU page table entry to DRAM.
470 * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
471 * hard (L0 & L1) flush.
472 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
473 * ASID-VA-size mask.
474 * @send_heartbeat: send is-alive packet to ArmCP and verify response.
475 * @enable_clock_gating: enable clock gating for reducing power consumption.
476 * @disable_clock_gating: disable clock for accessing registers on HBW.
477 * @is_device_idle: return true if device is idle, false otherwise.
478 * @soft_reset_late_init: perform certain actions needed after soft reset.
479 * @hw_queues_lock: acquire H/W queues lock.
480 * @hw_queues_unlock: release H/W queues lock.
481 * @get_pci_id: retrieve PCI ID.
482 * @get_eeprom_data: retrieve EEPROM data from F/W.
483 * @send_cpu_message: send buffer to ArmCP.
484 * @get_hw_state: retrieve the H/W state
485 */
486struct hl_asic_funcs {
487 int (*early_init)(struct hl_device *hdev);
488 int (*early_fini)(struct hl_device *hdev);
489 int (*late_init)(struct hl_device *hdev);
490 void (*late_fini)(struct hl_device *hdev);
491 int (*sw_init)(struct hl_device *hdev);
492 int (*sw_fini)(struct hl_device *hdev);
493 int (*hw_init)(struct hl_device *hdev);
494 void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
495 void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
496 int (*suspend)(struct hl_device *hdev);
497 int (*resume)(struct hl_device *hdev);
498 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
499 u64 kaddress, phys_addr_t paddress, u32 size);
500 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
501 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
502 void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size,
503 dma_addr_t *dma_handle, gfp_t flag);
504 void (*dma_free_coherent)(struct hl_device *hdev, size_t size,
505 void *cpu_addr, dma_addr_t dma_handle);
506 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
507 dma_addr_t *dma_handle, u16 *queue_len);
508 int (*test_queues)(struct hl_device *hdev);
509 void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size,
510 gfp_t mem_flags, dma_addr_t *dma_handle);
511 void (*dma_pool_free)(struct hl_device *hdev, void *vaddr,
512 dma_addr_t dma_addr);
513 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
514 size_t size, dma_addr_t *dma_handle);
515 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
516 size_t size, void *vaddr);
517 void (*hl_dma_unmap_sg)(struct hl_device *hdev,
518 struct scatterlist *sg, int nents,
519 enum dma_data_direction dir);
520 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
521 int (*asic_dma_map_sg)(struct hl_device *hdev,
522 struct scatterlist *sg, int nents,
523 enum dma_data_direction dir);
524 u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
525 struct sg_table *sgt);
526 void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr,
527 u32 cq_val, u32 msix_num);
528 void (*update_eq_ci)(struct hl_device *hdev, u32 val);
529 int (*context_switch)(struct hl_device *hdev, u32 asid);
530 void (*restore_phase_topology)(struct hl_device *hdev);
531 int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
532 int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
533 void (*add_device_attr)(struct hl_device *hdev,
534 struct attribute_group *dev_attr_grp);
535 void (*handle_eqe)(struct hl_device *hdev,
536 struct hl_eq_entry *eq_entry);
537 void (*set_pll_profile)(struct hl_device *hdev,
538 enum hl_pll_frequency freq);
539 void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
540 u64 (*read_pte)(struct hl_device *hdev, u64 addr);
541 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
542 void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
543 void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
544 u32 asid, u64 va, u64 size);
545 int (*send_heartbeat)(struct hl_device *hdev);
546 void (*enable_clock_gating)(struct hl_device *hdev);
547 void (*disable_clock_gating)(struct hl_device *hdev);
548 bool (*is_device_idle)(struct hl_device *hdev);
549 int (*soft_reset_late_init)(struct hl_device *hdev);
550 void (*hw_queues_lock)(struct hl_device *hdev);
551 void (*hw_queues_unlock)(struct hl_device *hdev);
552 u32 (*get_pci_id)(struct hl_device *hdev);
553 int (*get_eeprom_data)(struct hl_device *hdev, void *data,
554 size_t max_size);
555 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
556 u16 len, u32 timeout, long *result);
557 enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
558};
559
560
561/*
562 * CONTEXTS
563 */
564
565#define HL_KERNEL_ASID_ID 0
566
567/**
568 * struct hl_va_range - virtual addresses range.
569 * @lock: protects the virtual addresses list.
570 * @list: list of virtual addresses blocks available for mappings.
571 * @start_addr: range start address.
572 * @end_addr: range end address.
573 */
574struct hl_va_range {
575 struct mutex lock;
576 struct list_head list;
577 u64 start_addr;
578 u64 end_addr;
579};
580
581/**
582 * struct hl_ctx - user/kernel context.
583 * @mem_hash: holds mapping from virtual address to virtual memory area
584 * descriptor (hl_vm_phys_pg_list or hl_userptr).
585 * @mmu_hash: holds a mapping from virtual address to pgt_info structure.
586 * @hpriv: pointer to the private (KMD) data of the process (fd).
587 * @hdev: pointer to the device structure.
588 * @refcount: reference counter for the context. Context is released only when
589 * this hits 0l. It is incremented on CS and CS_WAIT.
590 * @cs_pending: array of DMA fence objects representing pending CS.
591 * @host_va_range: holds available virtual addresses for host mappings.
592 * @dram_va_range: holds available virtual addresses for DRAM mappings.
593 * @mem_hash_lock: protects the mem_hash.
594 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
595 * MMU hash or walking the PGT requires talking this lock
596 * @debugfs_list: node in debugfs list of contexts.
597 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
598 * to user so user could inquire about CS. It is used as
599 * index to cs_pending array.
600 * @dram_default_hops: array that holds all hops addresses needed for default
601 * DRAM mapping.
602 * @cs_lock: spinlock to protect cs_sequence.
603 * @dram_phys_mem: amount of used physical DRAM memory by this context.
604 * @thread_restore_token: token to prevent multiple threads of the same context
605 * from running the restore phase. Only one thread
606 * should run it.
607 * @thread_restore_wait_token: token to prevent the threads that didn't run
608 * the restore phase from moving to their execution
609 * phase before the restore phase has finished.
610 * @asid: context's unique address space ID in the device's MMU.
611 */
612struct hl_ctx {
613 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
614 DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS);
615 struct hl_fpriv *hpriv;
616 struct hl_device *hdev;
617 struct kref refcount;
618 struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
619 struct hl_va_range host_va_range;
620 struct hl_va_range dram_va_range;
621 struct mutex mem_hash_lock;
622 struct mutex mmu_lock;
623 struct list_head debugfs_list;
624 u64 cs_sequence;
625 u64 *dram_default_hops;
626 spinlock_t cs_lock;
627 atomic64_t dram_phys_mem;
628 atomic_t thread_restore_token;
629 u32 thread_restore_wait_token;
630 u32 asid;
631};
632
633/**
634 * struct hl_ctx_mgr - for handling multiple contexts.
635 * @ctx_lock: protects ctx_handles.
636 * @ctx_handles: idr to hold all ctx handles.
637 */
638struct hl_ctx_mgr {
639 struct mutex ctx_lock;
640 struct idr ctx_handles;
641};
642
643
644
645/*
646 * COMMAND SUBMISSIONS
647 */
648
649/**
650 * struct hl_userptr - memory mapping chunk information
651 * @vm_type: type of the VM.
652 * @job_node: linked-list node for hanging the object on the Job's list.
653 * @vec: pointer to the frame vector.
654 * @sgt: pointer to the scatter-gather table that holds the pages.
655 * @dir: for DMA unmapping, the direction must be supplied, so save it.
656 * @debugfs_list: node in debugfs list of command submissions.
657 * @addr: user-space virtual pointer to the start of the memory area.
658 * @size: size of the memory area to pin & map.
659 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
660 */
661struct hl_userptr {
662 enum vm_type_t vm_type; /* must be first */
663 struct list_head job_node;
664 struct frame_vector *vec;
665 struct sg_table *sgt;
666 enum dma_data_direction dir;
667 struct list_head debugfs_list;
668 u64 addr;
669 u32 size;
670 u8 dma_mapped;
671};
672
673/**
674 * struct hl_cs - command submission.
675 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
676 * @ctx: the context this CS belongs to.
677 * @job_list: list of the CS's jobs in the various queues.
678 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
679 * @refcount: reference counter for usage of the CS.
680 * @fence: pointer to the fence object of this CS.
681 * @work_tdr: delayed work node for TDR.
682 * @mirror_node : node in device mirror list of command submissions.
683 * @debugfs_list: node in debugfs list of command submissions.
684 * @sequence: the sequence number of this CS.
685 * @submitted: true if CS was submitted to H/W.
686 * @completed: true if CS was completed by device.
687 * @timedout : true if CS was timedout.
688 * @tdr_active: true if TDR was activated for this CS (to prevent
689 * double TDR activation).
690 * @aborted: true if CS was aborted due to some device error.
691 */
692struct hl_cs {
693 u8 jobs_in_queue_cnt[HL_MAX_QUEUES];
694 struct hl_ctx *ctx;
695 struct list_head job_list;
696 spinlock_t job_lock;
697 struct kref refcount;
698 struct dma_fence *fence;
699 struct delayed_work work_tdr;
700 struct list_head mirror_node;
701 struct list_head debugfs_list;
702 u64 sequence;
703 u8 submitted;
704 u8 completed;
705 u8 timedout;
706 u8 tdr_active;
707 u8 aborted;
708};
709
710/**
711 * struct hl_cs_job - command submission job.
712 * @cs_node: the node to hang on the CS jobs list.
713 * @cs: the CS this job belongs to.
714 * @user_cb: the CB we got from the user.
715 * @patched_cb: in case of patching, this is internal CB which is submitted on
716 * the queue instead of the CB we got from the IOCTL.
717 * @finish_work: workqueue object to run when job is completed.
718 * @userptr_list: linked-list of userptr mappings that belong to this job and
719 * wait for completion.
720 * @debugfs_list: node in debugfs list of command submission jobs.
721 * @id: the id of this job inside a CS.
722 * @hw_queue_id: the id of the H/W queue this job is submitted to.
723 * @user_cb_size: the actual size of the CB we got from the user.
724 * @job_cb_size: the actual size of the CB that we put on the queue.
725 * @ext_queue: whether the job is for external queue or internal queue.
726 */
727struct hl_cs_job {
728 struct list_head cs_node;
729 struct hl_cs *cs;
730 struct hl_cb *user_cb;
731 struct hl_cb *patched_cb;
732 struct work_struct finish_work;
733 struct list_head userptr_list;
734 struct list_head debugfs_list;
735 u32 id;
736 u32 hw_queue_id;
737 u32 user_cb_size;
738 u32 job_cb_size;
739 u8 ext_queue;
740};
741
742/**
743 * struct hl_cs_parser - command submission paerser properties.
744 * @user_cb: the CB we got from the user.
745 * @patched_cb: in case of patching, this is internal CB which is submitted on
746 * the queue instead of the CB we got from the IOCTL.
747 * @job_userptr_list: linked-list of userptr mappings that belong to the related
748 * job and wait for completion.
749 * @cs_sequence: the sequence number of the related CS.
750 * @ctx_id: the ID of the context the related CS belongs to.
751 * @hw_queue_id: the id of the H/W queue this job is submitted to.
752 * @user_cb_size: the actual size of the CB we got from the user.
753 * @patched_cb_size: the size of the CB after parsing.
754 * @ext_queue: whether the job is for external queue or internal queue.
755 * @job_id: the id of the related job inside the related CS.
756 * @use_virt_addr: whether to treat the addresses in the CB as virtual during
757 * parsing.
758 */
759struct hl_cs_parser {
760 struct hl_cb *user_cb;
761 struct hl_cb *patched_cb;
762 struct list_head *job_userptr_list;
763 u64 cs_sequence;
764 u32 ctx_id;
765 u32 hw_queue_id;
766 u32 user_cb_size;
767 u32 patched_cb_size;
768 u8 ext_queue;
769 u8 job_id;
770 u8 use_virt_addr;
771};
772
773
774/*
775 * MEMORY STRUCTURE
776 */
777
778/**
779 * struct hl_vm_hash_node - hash element from virtual address to virtual
780 * memory area descriptor (hl_vm_phys_pg_list or
781 * hl_userptr).
782 * @node: node to hang on the hash table in context object.
783 * @vaddr: key virtual address.
784 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
785 */
786struct hl_vm_hash_node {
787 struct hlist_node node;
788 u64 vaddr;
789 void *ptr;
790};
791
792/**
793 * struct hl_vm_phys_pg_pack - physical page pack.
794 * @vm_type: describes the type of the virtual area descriptor.
795 * @pages: the physical page array.
796 * @mapping_cnt: number of shared mappings.
797 * @asid: the context related to this list.
798 * @npages: num physical pages in the pack.
799 * @page_size: size of each page in the pack.
800 * @total_size: total size of all the pages in this list.
801 * @flags: HL_MEM_* flags related to this list.
802 * @handle: the provided handle related to this list.
803 * @offset: offset from the first page.
804 * @contiguous: is contiguous physical memory.
805 * @created_from_userptr: is product of host virtual address.
806 */
807struct hl_vm_phys_pg_pack {
808 enum vm_type_t vm_type; /* must be first */
809 u64 *pages;
810 atomic_t mapping_cnt;
811 u32 asid;
812 u32 npages;
813 u32 page_size;
814 u32 total_size;
815 u32 flags;
816 u32 handle;
817 u32 offset;
818 u8 contiguous;
819 u8 created_from_userptr;
820};
821
822/**
823 * struct hl_vm_va_block - virtual range block information.
824 * @node: node to hang on the virtual range list in context object.
825 * @start: virtual range start address.
826 * @end: virtual range end address.
827 * @size: virtual range size.
828 */
829struct hl_vm_va_block {
830 struct list_head node;
831 u64 start;
832 u64 end;
833 u64 size;
834};
835
836/**
837 * struct hl_vm - virtual memory manager for MMU.
838 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
839 * @dram_pg_pool_refcount: reference counter for the pool usage.
840 * @idr_lock: protects the phys_pg_list_handles.
841 * @phys_pg_pack_handles: idr to hold all device allocations handles.
842 * @init_done: whether initialization was done. We need this because VM
843 * initialization might be skipped during device initialization.
844 */
845struct hl_vm {
846 struct gen_pool *dram_pg_pool;
847 struct kref dram_pg_pool_refcount;
848 spinlock_t idr_lock;
849 struct idr phys_pg_pack_handles;
850 u8 init_done;
851};
852
853/*
854 * FILE PRIVATE STRUCTURE
855 */
856
857/**
858 * struct hl_fpriv - process information stored in FD private data.
859 * @hdev: habanalabs device structure.
860 * @filp: pointer to the given file structure.
861 * @taskpid: current process ID.
862 * @ctx: current executing context.
863 * @ctx_mgr: context manager to handle multiple context for this FD.
864 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
865 * @debugfs_list: list of relevant ASIC debugfs.
866 * @refcount: number of related contexts.
867 * @restore_phase_mutex: lock for context switch and restore phase.
868 */
869struct hl_fpriv {
870 struct hl_device *hdev;
871 struct file *filp;
872 struct pid *taskpid;
873 struct hl_ctx *ctx; /* TODO: remove for multiple ctx */
874 struct hl_ctx_mgr ctx_mgr;
875 struct hl_cb_mgr cb_mgr;
876 struct list_head debugfs_list;
877 struct kref refcount;
878 struct mutex restore_phase_mutex;
879};
880
881
882/*
883 * DebugFS
884 */
885
886/**
887 * struct hl_info_list - debugfs file ops.
888 * @name: file name.
889 * @show: function to output information.
890 * @write: function to write to the file.
891 */
892struct hl_info_list {
893 const char *name;
894 int (*show)(struct seq_file *s, void *data);
895 ssize_t (*write)(struct file *file, const char __user *buf,
896 size_t count, loff_t *f_pos);
897};
898
899/**
900 * struct hl_debugfs_entry - debugfs dentry wrapper.
901 * @dent: base debugfs entry structure.
902 * @info_ent: dentry realted ops.
903 * @dev_entry: ASIC specific debugfs manager.
904 */
905struct hl_debugfs_entry {
906 struct dentry *dent;
907 const struct hl_info_list *info_ent;
908 struct hl_dbg_device_entry *dev_entry;
909};
910
911/**
912 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
913 * @root: root dentry.
914 * @hdev: habanalabs device structure.
915 * @entry_arr: array of available hl_debugfs_entry.
916 * @file_list: list of available debugfs files.
917 * @file_mutex: protects file_list.
918 * @cb_list: list of available CBs.
919 * @cb_spinlock: protects cb_list.
920 * @cs_list: list of available CSs.
921 * @cs_spinlock: protects cs_list.
922 * @cs_job_list: list of available CB jobs.
923 * @cs_job_spinlock: protects cs_job_list.
924 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
925 * @userptr_spinlock: protects userptr_list.
926 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
927 * @ctx_mem_hash_spinlock: protects cb_list.
928 * @addr: next address to read/write from/to in read/write32.
929 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
930 * @mmu_asid: ASID to use while translating in mmu_show.
931 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
932 * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
933 * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
934 */
935struct hl_dbg_device_entry {
936 struct dentry *root;
937 struct hl_device *hdev;
938 struct hl_debugfs_entry *entry_arr;
939 struct list_head file_list;
940 struct mutex file_mutex;
941 struct list_head cb_list;
942 spinlock_t cb_spinlock;
943 struct list_head cs_list;
944 spinlock_t cs_spinlock;
945 struct list_head cs_job_list;
946 spinlock_t cs_job_spinlock;
947 struct list_head userptr_list;
948 spinlock_t userptr_spinlock;
949 struct list_head ctx_mem_hash_list;
950 spinlock_t ctx_mem_hash_spinlock;
951 u64 addr;
952 u64 mmu_addr;
953 u32 mmu_asid;
954 u8 i2c_bus;
955 u8 i2c_addr;
956 u8 i2c_reg;
957};
958
959
960/*
961 * DEVICES
962 */
963
964/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
965 * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
966 */
967#define HL_MAX_MINORS 256
968
969/*
970 * Registers read & write functions.
971 */
972
973u32 hl_rreg(struct hl_device *hdev, u32 reg);
974void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
975
976#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
977 readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us)
978
979#define RREG32(reg) hl_rreg(hdev, (reg))
980#define WREG32(reg, v) hl_wreg(hdev, (reg), (v))
981#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
982 hl_rreg(hdev, (reg)))
983
984#define WREG32_P(reg, val, mask) \
985 do { \
986 u32 tmp_ = RREG32(reg); \
987 tmp_ &= (mask); \
988 tmp_ |= ((val) & ~(mask)); \
989 WREG32(reg, tmp_); \
990 } while (0)
991#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
992#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
993
994#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
995#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
996#define WREG32_FIELD(reg, field, val) \
997 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
998 (val) << REG_FIELD_SHIFT(reg, field))
999
1000struct hwmon_chip_info;
1001
1002/**
1003 * struct hl_device_reset_work - reset workqueue task wrapper.
1004 * @reset_work: reset work to be done.
1005 * @hdev: habanalabs device structure.
1006 */
1007struct hl_device_reset_work {
1008 struct work_struct reset_work;
1009 struct hl_device *hdev;
1010};
1011
1012/**
1013 * struct hl_device - habanalabs device structure.
1014 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
1015 * @pcie_bar: array of available PCIe bars.
1016 * @rmmio: configuration area address on SRAM.
1017 * @cdev: related char device.
1018 * @dev: realted kernel basic device structure.
1019 * @work_freq: delayed work to lower device frequency if possible.
1020 * @work_heartbeat: delayed work for ArmCP is-alive check.
1021 * @asic_name: ASIC specific nmae.
1022 * @asic_type: ASIC specific type.
1023 * @completion_queue: array of hl_cq.
1024 * @cq_wq: work queue of completion queues for executing work in process context
1025 * @eq_wq: work queue of event queue for executing work in process context.
1026 * @kernel_ctx: KMD context structure.
1027 * @kernel_queues: array of hl_hw_queue.
1028 * @hw_queues_mirror_list: CS mirror list for TDR.
1029 * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
1030 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
1031 * @event_queue: event queue for IRQ from ArmCP.
1032 * @dma_pool: DMA pool for small allocations.
1033 * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
1034 * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
1035 * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
1036 * @asid_bitmap: holds used/available ASIDs.
1037 * @asid_mutex: protects asid_bitmap.
1038 * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
1039 * fd_open_cnt is atomic, we need this lock to serialize
1040 * the open function because the driver currently supports
1041 * only a single process at a time. In addition, we need a
1042 * lock here so we can flush user processes which are opening
1043 * the device while we are trying to hard reset it
1044 * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
1045 * @asic_prop: ASIC specific immutable properties.
1046 * @asic_funcs: ASIC specific functions.
1047 * @asic_specific: ASIC specific information to use only from ASIC files.
1048 * @mmu_pgt_pool: pool of available MMU hops.
1049 * @vm: virtual memory manager for MMU.
1050 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context
1051 * @hwmon_dev: H/W monitor device.
1052 * @pm_mng_profile: current power management profile.
1053 * @hl_chip_info: ASIC's sensors information.
1054 * @hl_debugfs: device's debugfs manager.
1055 * @cb_pool: list of preallocated CBs.
1056 * @cb_pool_lock: protects the CB pool.
1057 * @user_ctx: current user context executing.
1058 * @dram_used_mem: current DRAM memory consumption.
1059 * @in_reset: is device in reset flow.
1060 * @curr_pll_profile: current PLL profile.
1061 * @fd_open_cnt: number of open user processes.
1062 * @timeout_jiffies: device CS timeout value.
1063 * @max_power: the max power of the device, as configured by the sysadmin. This
1064 * value is saved so in case of hard-reset, KMD will restore this
1065 * value and update the F/W after the re-initialization
1066 * @major: habanalabs KMD major.
1067 * @high_pll: high PLL profile frequency.
1068 * @soft_reset_cnt: number of soft reset since KMD loading.
1069 * @hard_reset_cnt: number of hard reset since KMD loading.
1070 * @id: device minor.
1071 * @disabled: is device disabled.
1072 * @late_init_done: is late init stage was done during initialization.
1073 * @hwmon_initialized: is H/W monitor sensors was initialized.
1074 * @hard_reset_pending: is there a hard reset work pending.
1075 * @heartbeat: is heartbeat sanity check towards ArmCP enabled.
1076 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
1077 * otherwise.
1078 * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
1079 * @dram_default_page_mapping: is DRAM default page mapping enabled.
1080 * @init_done: is the initialization of the device done.
1081 * @mmu_enable: is MMU enabled.
1082 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1083 */
1084struct hl_device {
1085 struct pci_dev *pdev;
1086 void __iomem *pcie_bar[6];
1087 void __iomem *rmmio;
1088 struct cdev cdev;
1089 struct device *dev;
1090 struct delayed_work work_freq;
1091 struct delayed_work work_heartbeat;
1092 char asic_name[16];
1093 enum hl_asic_type asic_type;
1094 struct hl_cq *completion_queue;
1095 struct workqueue_struct *cq_wq;
1096 struct workqueue_struct *eq_wq;
1097 struct hl_ctx *kernel_ctx;
1098 struct hl_hw_queue *kernel_queues;
1099 struct list_head hw_queues_mirror_list;
1100 spinlock_t hw_queues_mirror_lock;
1101 struct hl_cb_mgr kernel_cb_mgr;
1102 struct hl_eq event_queue;
1103 struct dma_pool *dma_pool;
1104 void *cpu_accessible_dma_mem;
1105 dma_addr_t cpu_accessible_dma_address;
1106 struct gen_pool *cpu_accessible_dma_pool;
1107 unsigned long *asid_bitmap;
1108 struct mutex asid_mutex;
1109 /* TODO: remove fd_open_cnt_lock for multiple process support */
1110 struct mutex fd_open_cnt_lock;
1111 struct mutex send_cpu_message_lock;
1112 struct asic_fixed_properties asic_prop;
1113 const struct hl_asic_funcs *asic_funcs;
1114 void *asic_specific;
1115 struct gen_pool *mmu_pgt_pool;
1116 struct hl_vm vm;
1117 struct mutex mmu_cache_lock;
1118 struct device *hwmon_dev;
1119 enum hl_pm_mng_profile pm_mng_profile;
1120 struct hwmon_chip_info *hl_chip_info;
1121
1122 struct hl_dbg_device_entry hl_debugfs;
1123
1124 struct list_head cb_pool;
1125 spinlock_t cb_pool_lock;
1126
1127 /* TODO: remove user_ctx for multiple process support */
1128 struct hl_ctx *user_ctx;
1129
1130 atomic64_t dram_used_mem;
1131 atomic_t in_reset;
1132 atomic_t curr_pll_profile;
1133 atomic_t fd_open_cnt;
1134 u64 timeout_jiffies;
1135 u64 max_power;
1136 u32 major;
1137 u32 high_pll;
1138 u32 soft_reset_cnt;
1139 u32 hard_reset_cnt;
1140 u16 id;
1141 u8 disabled;
1142 u8 late_init_done;
1143 u8 hwmon_initialized;
1144 u8 hard_reset_pending;
1145 u8 heartbeat;
1146 u8 reset_on_lockup;
1147 u8 dram_supports_virtual_memory;
1148 u8 dram_default_page_mapping;
1149 u8 init_done;
1150 u8 device_cpu_disabled;
1151
1152 /* Parameters for bring-up */
1153 u8 mmu_enable;
1154 u8 cpu_enable;
1155 u8 reset_pcilink;
1156 u8 cpu_queues_enable;
1157 u8 fw_loading;
1158 u8 pldm;
1159};
1160
1161
1162/*
1163 * IOCTLs
1164 */
1165
1166/**
1167 * typedef hl_ioctl_t - typedef for ioctl function in the driver
1168 * @hpriv: pointer to the FD's private data, which contains state of
1169 * user process
1170 * @data: pointer to the input/output arguments structure of the IOCTL
1171 *
1172 * Return: 0 for success, negative value for error
1173 */
1174typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
1175
1176/**
1177 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
1178 * @cmd: the IOCTL code as created by the kernel macros.
1179 * @func: pointer to the driver's function that should be called for this IOCTL.
1180 */
1181struct hl_ioctl_desc {
1182 unsigned int cmd;
1183 hl_ioctl_t *func;
1184};
1185
1186
1187/*
1188 * Kernel module functions that can be accessed by entire module
1189 */
1190
1191/**
1192 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
1193 * @address: The start address of the area we want to validate.
1194 * @size: The size in bytes of the area we want to validate.
1195 * @range_start_address: The start address of the valid range.
1196 * @range_end_address: The end address of the valid range.
1197 *
1198 * Return: true if the area is inside the valid range, false otherwise.
1199 */
1200static inline bool hl_mem_area_inside_range(u64 address, u32 size,
1201 u64 range_start_address, u64 range_end_address)
1202{
1203 u64 end_address = address + size;
1204
1205 if ((address >= range_start_address) &&
1206 (end_address <= range_end_address) &&
1207 (end_address > address))
1208 return true;
1209
1210 return false;
1211}
1212
1213/**
1214 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
1215 * @address: The start address of the area we want to validate.
1216 * @size: The size in bytes of the area we want to validate.
1217 * @range_start_address: The start address of the valid range.
1218 * @range_end_address: The end address of the valid range.
1219 *
1220 * Return: true if the area overlaps part or all of the valid range,
1221 * false otherwise.
1222 */
1223static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
1224 u64 range_start_address, u64 range_end_address)
1225{
1226 u64 end_address = address + size;
1227
1228 if ((address >= range_start_address) &&
1229 (address < range_end_address))
1230 return true;
1231
1232 if ((end_address >= range_start_address) &&
1233 (end_address < range_end_address))
1234 return true;
1235
1236 if ((address < range_start_address) &&
1237 (end_address >= range_end_address))
1238 return true;
1239
1240 return false;
1241}
1242
1243int hl_device_open(struct inode *inode, struct file *filp);
1244bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1245int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
1246 enum hl_asic_type asic_type, int minor);
1247void destroy_hdev(struct hl_device *hdev);
1248int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us,
1249 u32 *val);
1250int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
1251 u32 timeout_us, u32 *val);
1252int hl_hw_queues_create(struct hl_device *hdev);
1253void hl_hw_queues_destroy(struct hl_device *hdev);
1254int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
1255 u32 cb_size, u64 cb_ptr);
1256int hl_hw_queue_schedule_cs(struct hl_cs *cs);
1257u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
1258void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
1259void hl_int_hw_queue_update_ci(struct hl_cs *cs);
1260void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
1261
1262#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
1263#define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
1264
1265int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
1266void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
1267int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
1268void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
1269void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
1270void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
1271irqreturn_t hl_irq_handler_cq(int irq, void *arg);
1272irqreturn_t hl_irq_handler_eq(int irq, void *arg);
1273u32 hl_cq_inc_ptr(u32 ptr);
1274
1275int hl_asid_init(struct hl_device *hdev);
1276void hl_asid_fini(struct hl_device *hdev);
1277unsigned long hl_asid_alloc(struct hl_device *hdev);
1278void hl_asid_free(struct hl_device *hdev, unsigned long asid);
1279
1280int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
1281void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
1282int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
1283void hl_ctx_do_release(struct kref *ref);
1284void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
1285int hl_ctx_put(struct hl_ctx *ctx);
1286struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
1287void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
1288void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
1289
1290int hl_device_init(struct hl_device *hdev, struct class *hclass);
1291void hl_device_fini(struct hl_device *hdev);
1292int hl_device_suspend(struct hl_device *hdev);
1293int hl_device_resume(struct hl_device *hdev);
1294int hl_device_reset(struct hl_device *hdev, bool hard_reset,
1295 bool from_hard_reset_thread);
1296void hl_hpriv_get(struct hl_fpriv *hpriv);
1297void hl_hpriv_put(struct hl_fpriv *hpriv);
1298int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
1299
1300int hl_build_hwmon_channel_info(struct hl_device *hdev,
1301 struct armcp_sensor *sensors_arr);
1302
1303int hl_sysfs_init(struct hl_device *hdev);
1304void hl_sysfs_fini(struct hl_device *hdev);
1305
1306int hl_hwmon_init(struct hl_device *hdev);
1307void hl_hwmon_fini(struct hl_device *hdev);
1308
1309int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
1310 u64 *handle, int ctx_id);
1311int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
1312int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
1313struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
1314 u32 handle);
1315void hl_cb_put(struct hl_cb *cb);
1316void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
1317void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
1318struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
1319int hl_cb_pool_init(struct hl_device *hdev);
1320int hl_cb_pool_fini(struct hl_device *hdev);
1321
1322void hl_cs_rollback_all(struct hl_device *hdev);
1323struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
1324
1325void goya_set_asic_funcs(struct hl_device *hdev);
1326
1327int hl_vm_ctx_init(struct hl_ctx *ctx);
1328void hl_vm_ctx_fini(struct hl_ctx *ctx);
1329
1330int hl_vm_init(struct hl_device *hdev);
1331void hl_vm_fini(struct hl_device *hdev);
1332
1333int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1334 struct hl_userptr *userptr);
1335int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
1336void hl_userptr_delete_list(struct hl_device *hdev,
1337 struct list_head *userptr_list);
1338bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
1339 struct list_head *userptr_list,
1340 struct hl_userptr **userptr);
1341
1342int hl_mmu_init(struct hl_device *hdev);
1343void hl_mmu_fini(struct hl_device *hdev);
1344int hl_mmu_ctx_init(struct hl_ctx *ctx);
1345void hl_mmu_ctx_fini(struct hl_ctx *ctx);
1346int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
1347int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
1348void hl_mmu_swap_out(struct hl_ctx *ctx);
1349void hl_mmu_swap_in(struct hl_ctx *ctx);
1350
1351long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
1352void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
1353long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
1354long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
1355long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
1356long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
1357long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
1358void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
1359 long value);
1360u64 hl_get_max_power(struct hl_device *hdev);
1361void hl_set_max_power(struct hl_device *hdev, u64 value);
1362
1363#ifdef CONFIG_DEBUG_FS
1364
1365void hl_debugfs_init(void);
1366void hl_debugfs_fini(void);
1367void hl_debugfs_add_device(struct hl_device *hdev);
1368void hl_debugfs_remove_device(struct hl_device *hdev);
1369void hl_debugfs_add_file(struct hl_fpriv *hpriv);
1370void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
1371void hl_debugfs_add_cb(struct hl_cb *cb);
1372void hl_debugfs_remove_cb(struct hl_cb *cb);
1373void hl_debugfs_add_cs(struct hl_cs *cs);
1374void hl_debugfs_remove_cs(struct hl_cs *cs);
1375void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
1376void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
1377void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
1378void hl_debugfs_remove_userptr(struct hl_device *hdev,
1379 struct hl_userptr *userptr);
1380void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
1381void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
1382
1383#else
1384
1385static inline void __init hl_debugfs_init(void)
1386{
1387}
1388
1389static inline void hl_debugfs_fini(void)
1390{
1391}
1392
1393static inline void hl_debugfs_add_device(struct hl_device *hdev)
1394{
1395}
1396
1397static inline void hl_debugfs_remove_device(struct hl_device *hdev)
1398{
1399}
1400
1401static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1402{
1403}
1404
1405static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1406{
1407}
1408
1409static inline void hl_debugfs_add_cb(struct hl_cb *cb)
1410{
1411}
1412
1413static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
1414{
1415}
1416
1417static inline void hl_debugfs_add_cs(struct hl_cs *cs)
1418{
1419}
1420
1421static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
1422{
1423}
1424
1425static inline void hl_debugfs_add_job(struct hl_device *hdev,
1426 struct hl_cs_job *job)
1427{
1428}
1429
1430static inline void hl_debugfs_remove_job(struct hl_device *hdev,
1431 struct hl_cs_job *job)
1432{
1433}
1434
1435static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
1436 struct hl_userptr *userptr)
1437{
1438}
1439
1440static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
1441 struct hl_userptr *userptr)
1442{
1443}
1444
1445static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
1446 struct hl_ctx *ctx)
1447{
1448}
1449
1450static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
1451 struct hl_ctx *ctx)
1452{
1453}
1454
1455#endif
1456
1457/* IOCTLs */
1458long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
1459int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
1460int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
1461int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
1462int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
1463
1464#endif /* HABANALABSP_H_ */
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
new file mode 100644
index 000000000000..748601463f11
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -0,0 +1,461 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 *
7 */
8
9#include "habanalabs.h"
10
11#include <linux/pci.h>
12#include <linux/module.h>
13
14#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
15
16#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators"
17
18MODULE_AUTHOR(HL_DRIVER_AUTHOR);
19MODULE_DESCRIPTION(HL_DRIVER_DESC);
20MODULE_LICENSE("GPL v2");
21
22static int hl_major;
23static struct class *hl_class;
24static DEFINE_IDR(hl_devs_idr);
25static DEFINE_MUTEX(hl_devs_idr_lock);
26
27static int timeout_locked = 5;
28static int reset_on_lockup = 1;
29
30module_param(timeout_locked, int, 0444);
31MODULE_PARM_DESC(timeout_locked,
32 "Device lockup timeout in seconds (0 = disabled, default 5s)");
33
34module_param(reset_on_lockup, int, 0444);
35MODULE_PARM_DESC(reset_on_lockup,
36 "Do device reset on lockup (0 = no, 1 = yes, default yes)");
37
38#define PCI_VENDOR_ID_HABANALABS 0x1da3
39
40#define PCI_IDS_GOYA 0x0001
41
42static const struct pci_device_id ids[] = {
43 { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
44 { 0, }
45};
46MODULE_DEVICE_TABLE(pci, ids);
47
48/*
49 * get_asic_type - translate device id to asic type
50 *
51 * @device: id of the PCI device
52 *
53 * Translate device id to asic type.
54 * In case of unidentified device, return -1
55 */
56static enum hl_asic_type get_asic_type(u16 device)
57{
58 enum hl_asic_type asic_type;
59
60 switch (device) {
61 case PCI_IDS_GOYA:
62 asic_type = ASIC_GOYA;
63 break;
64 default:
65 asic_type = ASIC_INVALID;
66 break;
67 }
68
69 return asic_type;
70}
71
72/*
73 * hl_device_open - open function for habanalabs device
74 *
75 * @inode: pointer to inode structure
76 * @filp: pointer to file structure
77 *
78 * Called when process opens an habanalabs device.
79 */
80int hl_device_open(struct inode *inode, struct file *filp)
81{
82 struct hl_device *hdev;
83 struct hl_fpriv *hpriv;
84 int rc;
85
86 mutex_lock(&hl_devs_idr_lock);
87 hdev = idr_find(&hl_devs_idr, iminor(inode));
88 mutex_unlock(&hl_devs_idr_lock);
89
90 if (!hdev) {
91 pr_err("Couldn't find device %d:%d\n",
92 imajor(inode), iminor(inode));
93 return -ENXIO;
94 }
95
96 mutex_lock(&hdev->fd_open_cnt_lock);
97
98 if (hl_device_disabled_or_in_reset(hdev)) {
99 dev_err_ratelimited(hdev->dev,
100 "Can't open %s because it is disabled or in reset\n",
101 dev_name(hdev->dev));
102 mutex_unlock(&hdev->fd_open_cnt_lock);
103 return -EPERM;
104 }
105
106 if (atomic_read(&hdev->fd_open_cnt)) {
107 dev_info_ratelimited(hdev->dev,
108 "Device %s is already attached to application\n",
109 dev_name(hdev->dev));
110 mutex_unlock(&hdev->fd_open_cnt_lock);
111 return -EBUSY;
112 }
113
114 atomic_inc(&hdev->fd_open_cnt);
115
116 mutex_unlock(&hdev->fd_open_cnt_lock);
117
118 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
119 if (!hpriv) {
120 rc = -ENOMEM;
121 goto close_device;
122 }
123
124 hpriv->hdev = hdev;
125 filp->private_data = hpriv;
126 hpriv->filp = filp;
127 mutex_init(&hpriv->restore_phase_mutex);
128 kref_init(&hpriv->refcount);
129 nonseekable_open(inode, filp);
130
131 hl_cb_mgr_init(&hpriv->cb_mgr);
132 hl_ctx_mgr_init(&hpriv->ctx_mgr);
133
134 rc = hl_ctx_create(hdev, hpriv);
135 if (rc) {
136 dev_err(hdev->dev, "Failed to open FD (CTX fail)\n");
137 goto out_err;
138 }
139
140 hpriv->taskpid = find_get_pid(current->pid);
141
142 /*
143 * Device is IDLE at this point so it is legal to change PLLs. There
144 * is no need to check anything because if the PLL is already HIGH, the
145 * set function will return without doing anything
146 */
147 hl_device_set_frequency(hdev, PLL_HIGH);
148
149 hl_debugfs_add_file(hpriv);
150
151 return 0;
152
153out_err:
154 filp->private_data = NULL;
155 hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
156 hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
157 mutex_destroy(&hpriv->restore_phase_mutex);
158 kfree(hpriv);
159
160close_device:
161 atomic_dec(&hdev->fd_open_cnt);
162 return rc;
163}
164
165/*
166 * create_hdev - create habanalabs device instance
167 *
168 * @dev: will hold the pointer to the new habanalabs device structure
169 * @pdev: pointer to the pci device
170 * @asic_type: in case of simulator device, which device is it
171 * @minor: in case of simulator device, the minor of the device
172 *
173 * Allocate memory for habanalabs device and initialize basic fields
174 * Identify the ASIC type
175 * Allocate ID (minor) for the device (only for real devices)
176 */
177int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
178 enum hl_asic_type asic_type, int minor)
179{
180 struct hl_device *hdev;
181 int rc;
182
183 *dev = NULL;
184
185 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
186 if (!hdev)
187 return -ENOMEM;
188
189 hdev->major = hl_major;
190 hdev->reset_on_lockup = reset_on_lockup;
191
192 /* Parameters for bring-up - set them to defaults */
193 hdev->mmu_enable = 1;
194 hdev->cpu_enable = 1;
195 hdev->reset_pcilink = 0;
196 hdev->cpu_queues_enable = 1;
197 hdev->fw_loading = 1;
198 hdev->pldm = 0;
199 hdev->heartbeat = 1;
200
201 /* If CPU is disabled, no point in loading FW */
202 if (!hdev->cpu_enable)
203 hdev->fw_loading = 0;
204
205 /* If we don't load FW, no need to initialize CPU queues */
206 if (!hdev->fw_loading)
207 hdev->cpu_queues_enable = 0;
208
209 /* If CPU queues not enabled, no way to do heartbeat */
210 if (!hdev->cpu_queues_enable)
211 hdev->heartbeat = 0;
212
213 if (timeout_locked)
214 hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
215 else
216 hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
217
218 hdev->disabled = true;
219 hdev->pdev = pdev; /* can be NULL in case of simulator device */
220
221 if (asic_type == ASIC_AUTO_DETECT) {
222 hdev->asic_type = get_asic_type(pdev->device);
223 if (hdev->asic_type == ASIC_INVALID) {
224 dev_err(&pdev->dev, "Unsupported ASIC\n");
225 rc = -ENODEV;
226 goto free_hdev;
227 }
228 } else {
229 hdev->asic_type = asic_type;
230 }
231
232 mutex_lock(&hl_devs_idr_lock);
233
234 if (minor == -1) {
235 rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
236 GFP_KERNEL);
237 } else {
238 void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);
239
240 if (IS_ERR_VALUE(old_idr)) {
241 rc = PTR_ERR(old_idr);
242 pr_err("Error %d when trying to replace minor %d\n",
243 rc, minor);
244 mutex_unlock(&hl_devs_idr_lock);
245 goto free_hdev;
246 }
247 rc = minor;
248 }
249
250 mutex_unlock(&hl_devs_idr_lock);
251
252 if (rc < 0) {
253 if (rc == -ENOSPC) {
254 pr_err("too many devices in the system\n");
255 rc = -EBUSY;
256 }
257 goto free_hdev;
258 }
259
260 hdev->id = rc;
261
262 *dev = hdev;
263
264 return 0;
265
266free_hdev:
267 kfree(hdev);
268 return rc;
269}
270
271/*
272 * destroy_hdev - destroy habanalabs device instance
273 *
274 * @dev: pointer to the habanalabs device structure
275 *
276 */
277void destroy_hdev(struct hl_device *hdev)
278{
279 /* Remove device from the device list */
280 mutex_lock(&hl_devs_idr_lock);
281 idr_remove(&hl_devs_idr, hdev->id);
282 mutex_unlock(&hl_devs_idr_lock);
283
284 kfree(hdev);
285}
286
287static int hl_pmops_suspend(struct device *dev)
288{
289 struct pci_dev *pdev = to_pci_dev(dev);
290 struct hl_device *hdev = pci_get_drvdata(pdev);
291
292 pr_debug("Going to suspend PCI device\n");
293
294 if (!hdev) {
295 pr_err("device pointer is NULL in suspend\n");
296 return 0;
297 }
298
299 return hl_device_suspend(hdev);
300}
301
302static int hl_pmops_resume(struct device *dev)
303{
304 struct pci_dev *pdev = to_pci_dev(dev);
305 struct hl_device *hdev = pci_get_drvdata(pdev);
306
307 pr_debug("Going to resume PCI device\n");
308
309 if (!hdev) {
310 pr_err("device pointer is NULL in resume\n");
311 return 0;
312 }
313
314 return hl_device_resume(hdev);
315}
316
317/*
318 * hl_pci_probe - probe PCI habanalabs devices
319 *
320 * @pdev: pointer to pci device
321 * @id: pointer to pci device id structure
322 *
323 * Standard PCI probe function for habanalabs device.
324 * Create a new habanalabs device and initialize it according to the
325 * device's type
326 */
327static int hl_pci_probe(struct pci_dev *pdev,
328 const struct pci_device_id *id)
329{
330 struct hl_device *hdev;
331 int rc;
332
333 dev_info(&pdev->dev, HL_NAME
334 " device found [%04x:%04x] (rev %x)\n",
335 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
336
337 rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1);
338 if (rc)
339 return rc;
340
341 pci_set_drvdata(pdev, hdev);
342
343 rc = hl_device_init(hdev, hl_class);
344 if (rc) {
345 dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
346 rc = -ENODEV;
347 goto disable_device;
348 }
349
350 return 0;
351
352disable_device:
353 pci_set_drvdata(pdev, NULL);
354 destroy_hdev(hdev);
355
356 return rc;
357}
358
359/*
360 * hl_pci_remove - remove PCI habanalabs devices
361 *
362 * @pdev: pointer to pci device
363 *
364 * Standard PCI remove function for habanalabs device
365 */
366static void hl_pci_remove(struct pci_dev *pdev)
367{
368 struct hl_device *hdev;
369
370 hdev = pci_get_drvdata(pdev);
371 if (!hdev)
372 return;
373
374 hl_device_fini(hdev);
375 pci_set_drvdata(pdev, NULL);
376
377 destroy_hdev(hdev);
378}
379
380static const struct dev_pm_ops hl_pm_ops = {
381 .suspend = hl_pmops_suspend,
382 .resume = hl_pmops_resume,
383};
384
385static struct pci_driver hl_pci_driver = {
386 .name = HL_NAME,
387 .id_table = ids,
388 .probe = hl_pci_probe,
389 .remove = hl_pci_remove,
390 .driver.pm = &hl_pm_ops,
391};
392
393/*
394 * hl_init - Initialize the habanalabs kernel driver
395 */
396static int __init hl_init(void)
397{
398 int rc;
399 dev_t dev;
400
401 pr_info("loading driver\n");
402
403 rc = alloc_chrdev_region(&dev, 0, HL_MAX_MINORS, HL_NAME);
404 if (rc < 0) {
405 pr_err("unable to get major\n");
406 return rc;
407 }
408
409 hl_major = MAJOR(dev);
410
411 hl_class = class_create(THIS_MODULE, HL_NAME);
412 if (IS_ERR(hl_class)) {
413 pr_err("failed to allocate class\n");
414 rc = PTR_ERR(hl_class);
415 goto remove_major;
416 }
417
418 hl_debugfs_init();
419
420 rc = pci_register_driver(&hl_pci_driver);
421 if (rc) {
422 pr_err("failed to register pci device\n");
423 goto remove_debugfs;
424 }
425
426 pr_debug("driver loaded\n");
427
428 return 0;
429
430remove_debugfs:
431 hl_debugfs_fini();
432 class_destroy(hl_class);
433remove_major:
434 unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
435 return rc;
436}
437
438/*
439 * hl_exit - Release all resources of the habanalabs kernel driver
440 */
441static void __exit hl_exit(void)
442{
443 pci_unregister_driver(&hl_pci_driver);
444
445 /*
446 * Removing debugfs must be after all devices or simulator devices
447 * have been removed because otherwise we get a bug in the
448 * debugfs module for referencing NULL objects
449 */
450 hl_debugfs_fini();
451
452 class_destroy(hl_class);
453 unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
454
455 idr_destroy(&hl_devs_idr);
456
457 pr_debug("driver removed\n");
458}
459
460module_init(hl_init);
461module_exit(hl_exit);
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
new file mode 100644
index 000000000000..2c2739a3c5ec
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -0,0 +1,234 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/fs.h>
12#include <linux/uaccess.h>
13#include <linux/slab.h>
14
15static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
16{
17 struct hl_info_hw_ip_info hw_ip = {0};
18 u32 size = args->return_size;
19 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
20 struct asic_fixed_properties *prop = &hdev->asic_prop;
21 u64 sram_kmd_size, dram_kmd_size;
22
23 if ((!size) || (!out))
24 return -EINVAL;
25
26 sram_kmd_size = (prop->sram_user_base_address -
27 prop->sram_base_address);
28 dram_kmd_size = (prop->dram_user_base_address -
29 prop->dram_base_address);
30
31 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
32 hw_ip.sram_base_address = prop->sram_user_base_address;
33 hw_ip.dram_base_address = prop->dram_user_base_address;
34 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
35 hw_ip.sram_size = prop->sram_size - sram_kmd_size;
36 hw_ip.dram_size = prop->dram_size - dram_kmd_size;
37 if (hw_ip.dram_size > 0)
38 hw_ip.dram_enabled = 1;
39 hw_ip.num_of_events = prop->num_of_events;
40 memcpy(hw_ip.armcp_version,
41 prop->armcp_info.armcp_version, VERSION_MAX_LEN);
42 hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version);
43 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
44 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
45 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
46 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
47
48 return copy_to_user(out, &hw_ip,
49 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
50}
51
52static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
53{
54 u32 size, max_size = args->return_size;
55 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
56 void *arr;
57
58 if ((!max_size) || (!out))
59 return -EINVAL;
60
61 arr = hdev->asic_funcs->get_events_stat(hdev, &size);
62
63 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
64}
65
66static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
67{
68 struct hl_info_dram_usage dram_usage = {0};
69 u32 max_size = args->return_size;
70 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
71 struct asic_fixed_properties *prop = &hdev->asic_prop;
72 u64 dram_kmd_size;
73
74 if ((!max_size) || (!out))
75 return -EINVAL;
76
77 dram_kmd_size = (prop->dram_user_base_address -
78 prop->dram_base_address);
79 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
80 atomic64_read(&hdev->dram_used_mem);
81 dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem);
82
83 return copy_to_user(out, &dram_usage,
84 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
85}
86
87static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
88{
89 struct hl_info_hw_idle hw_idle = {0};
90 u32 max_size = args->return_size;
91 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
92
93 if ((!max_size) || (!out))
94 return -EINVAL;
95
96 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev);
97
98 return copy_to_user(out, &hw_idle,
99 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
100}
101
102static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
103{
104 struct hl_info_args *args = data;
105 struct hl_device *hdev = hpriv->hdev;
106 int rc;
107
108 if (hl_device_disabled_or_in_reset(hdev)) {
109 dev_err(hdev->dev,
110 "Device is disabled or in reset. Can't execute INFO IOCTL\n");
111 return -EBUSY;
112 }
113
114 switch (args->op) {
115 case HL_INFO_HW_IP_INFO:
116 rc = hw_ip_info(hdev, args);
117 break;
118
119 case HL_INFO_HW_EVENTS:
120 rc = hw_events_info(hdev, args);
121 break;
122
123 case HL_INFO_DRAM_USAGE:
124 rc = dram_usage_info(hdev, args);
125 break;
126
127 case HL_INFO_HW_IDLE:
128 rc = hw_idle(hdev, args);
129 break;
130
131 default:
132 dev_err(hdev->dev, "Invalid request %d\n", args->op);
133 rc = -ENOTTY;
134 break;
135 }
136
137 return rc;
138}
139
140#define HL_IOCTL_DEF(ioctl, _func) \
141 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
142
143static const struct hl_ioctl_desc hl_ioctls[] = {
144 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
145 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
146 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
147 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
148 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl)
149};
150
151#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
152
153long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
154{
155 struct hl_fpriv *hpriv = filep->private_data;
156 struct hl_device *hdev = hpriv->hdev;
157 hl_ioctl_t *func;
158 const struct hl_ioctl_desc *ioctl = NULL;
159 unsigned int nr = _IOC_NR(cmd);
160 char stack_kdata[128] = {0};
161 char *kdata = NULL;
162 unsigned int usize, asize;
163 int retcode;
164
165 if (hdev->hard_reset_pending) {
166 dev_crit_ratelimited(hdev->dev,
167 "Device HARD reset pending! Please close FD\n");
168 return -ENODEV;
169 }
170
171 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
172 u32 hl_size;
173
174 ioctl = &hl_ioctls[nr];
175
176 hl_size = _IOC_SIZE(ioctl->cmd);
177 usize = asize = _IOC_SIZE(cmd);
178 if (hl_size > asize)
179 asize = hl_size;
180
181 cmd = ioctl->cmd;
182 } else {
183 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
184 task_pid_nr(current), nr);
185 return -ENOTTY;
186 }
187
188 /* Do not trust userspace, use our own definition */
189 func = ioctl->func;
190
191 if (unlikely(!func)) {
192 dev_dbg(hdev->dev, "no function\n");
193 retcode = -ENOTTY;
194 goto out_err;
195 }
196
197 if (cmd & (IOC_IN | IOC_OUT)) {
198 if (asize <= sizeof(stack_kdata)) {
199 kdata = stack_kdata;
200 } else {
201 kdata = kzalloc(asize, GFP_KERNEL);
202 if (!kdata) {
203 retcode = -ENOMEM;
204 goto out_err;
205 }
206 }
207 }
208
209 if (cmd & IOC_IN) {
210 if (copy_from_user(kdata, (void __user *)arg, usize)) {
211 retcode = -EFAULT;
212 goto out_err;
213 }
214 } else if (cmd & IOC_OUT) {
215 memset(kdata, 0, usize);
216 }
217
218 retcode = func(hpriv, kdata);
219
220 if (cmd & IOC_OUT)
221 if (copy_to_user((void __user *)arg, kdata, usize))
222 retcode = -EFAULT;
223
224out_err:
225 if (retcode)
226 dev_dbg(hdev->dev,
227 "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
228 task_pid_nr(current), cmd, nr);
229
230 if (kdata != stack_kdata)
231 kfree(kdata);
232
233 return retcode;
234}
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
new file mode 100644
index 000000000000..67bece26417c
--- /dev/null
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -0,0 +1,635 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12/*
13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
14 *
15 * @ptr: the current pi/ci value
16 * @val: the amount to add
17 *
18 * Add val to ptr. It can go until twice the queue length.
19 */
20inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
21{
22 ptr += val;
23 ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
24 return ptr;
25}
26
27static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
28{
29 int delta = (q->pi - q->ci);
30
31 if (delta >= 0)
32 return (queue_len - delta);
33 else
34 return (abs(delta) - queue_len);
35}
36
37void hl_int_hw_queue_update_ci(struct hl_cs *cs)
38{
39 struct hl_device *hdev = cs->ctx->hdev;
40 struct hl_hw_queue *q;
41 int i;
42
43 hdev->asic_funcs->hw_queues_lock(hdev);
44
45 if (hdev->disabled)
46 goto out;
47
48 q = &hdev->kernel_queues[0];
49 for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
50 if (q->queue_type == QUEUE_TYPE_INT) {
51 q->ci += cs->jobs_in_queue_cnt[i];
52 q->ci &= ((q->int_queue_len << 1) - 1);
53 }
54 }
55
56out:
57 hdev->asic_funcs->hw_queues_unlock(hdev);
58}
59
60/*
61 * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
62 *
63 * @hdev: pointer to habanalabs device structure
64 * @q: pointer to habanalabs queue structure
65 * @ctl: BD's control word
66 * @len: BD's length
67 * @ptr: BD's pointer
68 *
69 * This function assumes there is enough space on the queue to submit a new
70 * BD to it. It initializes the next BD and calls the device specific
71 * function to set the pi (and doorbell)
72 *
73 * This function must be called when the scheduler mutex is taken
74 *
75 */
76static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
77 u32 ctl, u32 len, u64 ptr)
78{
79 struct hl_bd *bd;
80
81 bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
82 bd += hl_pi_2_offset(q->pi);
83 bd->ctl = __cpu_to_le32(ctl);
84 bd->len = __cpu_to_le32(len);
85 bd->ptr = __cpu_to_le64(ptr + hdev->asic_prop.host_phys_base_address);
86
87 q->pi = hl_queue_inc_ptr(q->pi);
88 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
89}
90
91/*
92 * ext_queue_sanity_checks - perform some sanity checks on external queue
93 *
94 * @hdev : pointer to hl_device structure
95 * @q : pointer to hl_hw_queue structure
96 * @num_of_entries : how many entries to check for space
97 * @reserve_cq_entry : whether to reserve an entry in the cq
98 *
99 * H/W queues spinlock should be taken before calling this function
100 *
101 * Perform the following:
102 * - Make sure we have enough space in the h/w queue
103 * - Make sure we have enough space in the completion queue
104 * - Reserve space in the completion queue (needs to be reversed if there
105 * is a failure down the road before the actual submission of work). Only
106 * do this action if reserve_cq_entry is true
107 *
108 */
109static int ext_queue_sanity_checks(struct hl_device *hdev,
110 struct hl_hw_queue *q, int num_of_entries,
111 bool reserve_cq_entry)
112{
113 atomic_t *free_slots =
114 &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
115 int free_slots_cnt;
116
117 /* Check we have enough space in the queue */
118 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
119
120 if (free_slots_cnt < num_of_entries) {
121 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
122 q->hw_queue_id, num_of_entries);
123 return -EAGAIN;
124 }
125
126 if (reserve_cq_entry) {
127 /*
128 * Check we have enough space in the completion queue
129 * Add -1 to counter (decrement) unless counter was already 0
130 * In that case, CQ is full so we can't submit a new CB because
131 * we won't get ack on its completion
132 * atomic_add_unless will return 0 if counter was already 0
133 */
134 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
135 dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
136 num_of_entries, q->hw_queue_id);
137 atomic_add(num_of_entries, free_slots);
138 return -EAGAIN;
139 }
140 }
141
142 return 0;
143}
144
145/*
146 * int_queue_sanity_checks - perform some sanity checks on internal queue
147 *
148 * @hdev : pointer to hl_device structure
149 * @q : pointer to hl_hw_queue structure
150 * @num_of_entries : how many entries to check for space
151 *
152 * H/W queues spinlock should be taken before calling this function
153 *
154 * Perform the following:
155 * - Make sure we have enough space in the h/w queue
156 *
157 */
158static int int_queue_sanity_checks(struct hl_device *hdev,
159 struct hl_hw_queue *q,
160 int num_of_entries)
161{
162 int free_slots_cnt;
163
164 /* Check we have enough space in the queue */
165 free_slots_cnt = queue_free_slots(q, q->int_queue_len);
166
167 if (free_slots_cnt < num_of_entries) {
168 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
169 q->hw_queue_id, num_of_entries);
170 return -EAGAIN;
171 }
172
173 return 0;
174}
175
176/*
177 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
178 *
179 * @hdev: pointer to hl_device structure
180 * @hw_queue_id: Queue's type
181 * @cb_size: size of CB
182 * @cb_ptr: pointer to CB location
183 *
184 * This function sends a single CB, that must NOT generate a completion entry
185 *
186 */
187int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
188 u32 cb_size, u64 cb_ptr)
189{
190 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
191 int rc;
192
193 /*
194 * The CPU queue is a synchronous queue with an effective depth of
195 * a single entry (although it is allocated with room for multiple
196 * entries). Therefore, there is a different lock, called
197 * send_cpu_message_lock, that serializes accesses to the CPU queue.
198 * As a result, we don't need to lock the access to the entire H/W
199 * queues module when submitting a JOB to the CPU queue
200 */
201 if (q->queue_type != QUEUE_TYPE_CPU)
202 hdev->asic_funcs->hw_queues_lock(hdev);
203
204 if (hdev->disabled) {
205 rc = -EPERM;
206 goto out;
207 }
208
209 rc = ext_queue_sanity_checks(hdev, q, 1, false);
210 if (rc)
211 goto out;
212
213 ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
214
215out:
216 if (q->queue_type != QUEUE_TYPE_CPU)
217 hdev->asic_funcs->hw_queues_unlock(hdev);
218
219 return rc;
220}
221
222/*
223 * ext_hw_queue_schedule_job - submit an JOB to an external queue
224 *
225 * @job: pointer to the job that needs to be submitted to the queue
226 *
227 * This function must be called when the scheduler mutex is taken
228 *
229 */
230static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
231{
232 struct hl_device *hdev = job->cs->ctx->hdev;
233 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
234 struct hl_cq_entry cq_pkt;
235 struct hl_cq *cq;
236 u64 cq_addr;
237 struct hl_cb *cb;
238 u32 ctl;
239 u32 len;
240 u64 ptr;
241
242 /*
243 * Update the JOB ID inside the BD CTL so the device would know what
244 * to write in the completion queue
245 */
246 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
247
248 cb = job->patched_cb;
249 len = job->job_cb_size;
250 ptr = cb->bus_address;
251
252 cq_pkt.data = __cpu_to_le32(
253 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
254 & CQ_ENTRY_SHADOW_INDEX_MASK) |
255 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
256 (1 << CQ_ENTRY_READY_SHIFT));
257
258 /*
259 * No need to protect pi_offset because scheduling to the
260 * H/W queues is done under the scheduler mutex
261 *
262 * No need to check if CQ is full because it was already
263 * checked in hl_queue_sanity_checks
264 */
265 cq = &hdev->completion_queue[q->hw_queue_id];
266 cq_addr = cq->bus_address +
267 hdev->asic_prop.host_phys_base_address;
268 cq_addr += cq->pi * sizeof(struct hl_cq_entry);
269
270 hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
271 cq_addr,
272 __le32_to_cpu(cq_pkt.data),
273 q->hw_queue_id);
274
275 q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
276
277 cq->pi = hl_cq_inc_ptr(cq->pi);
278
279 ext_queue_submit_bd(hdev, q, ctl, len, ptr);
280}
281
282/*
283 * int_hw_queue_schedule_job - submit an JOB to an internal queue
284 *
285 * @job: pointer to the job that needs to be submitted to the queue
286 *
287 * This function must be called when the scheduler mutex is taken
288 *
289 */
290static void int_hw_queue_schedule_job(struct hl_cs_job *job)
291{
292 struct hl_device *hdev = job->cs->ctx->hdev;
293 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
294 struct hl_bd bd;
295 u64 *pi, *pbd = (u64 *) &bd;
296
297 bd.ctl = 0;
298 bd.len = __cpu_to_le32(job->job_cb_size);
299 bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
300
301 pi = (u64 *) (uintptr_t) (q->kernel_address +
302 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
303
304 pi[0] = pbd[0];
305 pi[1] = pbd[1];
306
307 q->pi++;
308 q->pi &= ((q->int_queue_len << 1) - 1);
309
310 /* Flush PQ entry write. Relevant only for specific ASICs */
311 hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
312
313 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
314}
315
316/*
317 * hl_hw_queue_schedule_cs - schedule a command submission
318 *
319 * @job : pointer to the CS
320 *
321 */
322int hl_hw_queue_schedule_cs(struct hl_cs *cs)
323{
324 struct hl_device *hdev = cs->ctx->hdev;
325 struct hl_cs_job *job, *tmp;
326 struct hl_hw_queue *q;
327 int rc = 0, i, cq_cnt;
328
329 hdev->asic_funcs->hw_queues_lock(hdev);
330
331 if (hl_device_disabled_or_in_reset(hdev)) {
332 dev_err(hdev->dev,
333 "device is disabled or in reset, CS rejected!\n");
334 rc = -EPERM;
335 goto out;
336 }
337
338 q = &hdev->kernel_queues[0];
339 /* This loop assumes all external queues are consecutive */
340 for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
341 if (q->queue_type == QUEUE_TYPE_EXT) {
342 if (cs->jobs_in_queue_cnt[i]) {
343 rc = ext_queue_sanity_checks(hdev, q,
344 cs->jobs_in_queue_cnt[i], true);
345 if (rc)
346 goto unroll_cq_resv;
347 cq_cnt++;
348 }
349 } else if (q->queue_type == QUEUE_TYPE_INT) {
350 if (cs->jobs_in_queue_cnt[i]) {
351 rc = int_queue_sanity_checks(hdev, q,
352 cs->jobs_in_queue_cnt[i]);
353 if (rc)
354 goto unroll_cq_resv;
355 }
356 }
357 }
358
359 spin_lock(&hdev->hw_queues_mirror_lock);
360 list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
361
362 /* Queue TDR if the CS is the first entry and if timeout is wanted */
363 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
364 (list_first_entry(&hdev->hw_queues_mirror_list,
365 struct hl_cs, mirror_node) == cs)) {
366 cs->tdr_active = true;
367 schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
368 spin_unlock(&hdev->hw_queues_mirror_lock);
369 } else {
370 spin_unlock(&hdev->hw_queues_mirror_lock);
371 }
372
373 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
374 if (job->ext_queue)
375 ext_hw_queue_schedule_job(job);
376 else
377 int_hw_queue_schedule_job(job);
378 }
379
380 cs->submitted = true;
381
382 goto out;
383
384unroll_cq_resv:
385 /* This loop assumes all external queues are consecutive */
386 q = &hdev->kernel_queues[0];
387 for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
388 if ((q->queue_type == QUEUE_TYPE_EXT) &&
389 (cs->jobs_in_queue_cnt[i])) {
390 atomic_t *free_slots =
391 &hdev->completion_queue[i].free_slots_cnt;
392 atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
393 cq_cnt--;
394 }
395 }
396
397out:
398 hdev->asic_funcs->hw_queues_unlock(hdev);
399
400 return rc;
401}
402
403/*
404 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
405 *
406 * @hdev: pointer to hl_device structure
407 * @hw_queue_id: which queue to increment its ci
408 */
409void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
410{
411 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
412
413 q->ci = hl_queue_inc_ptr(q->ci);
414}
415
416static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
417 struct hl_hw_queue *q)
418{
419 void *p;
420 int rc;
421
422 p = hdev->asic_funcs->dma_alloc_coherent(hdev,
423 HL_QUEUE_SIZE_IN_BYTES,
424 &q->bus_address, GFP_KERNEL | __GFP_ZERO);
425 if (!p)
426 return -ENOMEM;
427
428 q->kernel_address = (u64) (uintptr_t) p;
429
430 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
431 sizeof(*q->shadow_queue),
432 GFP_KERNEL);
433 if (!q->shadow_queue) {
434 dev_err(hdev->dev,
435 "Failed to allocate shadow queue for H/W queue %d\n",
436 q->hw_queue_id);
437 rc = -ENOMEM;
438 goto free_queue;
439 }
440
441 /* Make sure read/write pointers are initialized to start of queue */
442 q->ci = 0;
443 q->pi = 0;
444
445 return 0;
446
447free_queue:
448 hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
449 (void *) (uintptr_t) q->kernel_address, q->bus_address);
450
451 return rc;
452}
453
454static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
455{
456 void *p;
457
458 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
459 &q->bus_address, &q->int_queue_len);
460 if (!p) {
461 dev_err(hdev->dev,
462 "Failed to get base address for internal queue %d\n",
463 q->hw_queue_id);
464 return -EFAULT;
465 }
466
467 q->kernel_address = (u64) (uintptr_t) p;
468 q->pi = 0;
469 q->ci = 0;
470
471 return 0;
472}
473
474static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
475{
476 return ext_and_cpu_hw_queue_init(hdev, q);
477}
478
479static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
480{
481 return ext_and_cpu_hw_queue_init(hdev, q);
482}
483
484/*
485 * hw_queue_init - main initialization function for H/W queue object
486 *
487 * @hdev: pointer to hl_device device structure
488 * @q: pointer to hl_hw_queue queue structure
489 * @hw_queue_id: The id of the H/W queue
490 *
491 * Allocate dma-able memory for the queue and initialize fields
492 * Returns 0 on success
493 */
494static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
495 u32 hw_queue_id)
496{
497 int rc;
498
499 BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
500
501 q->hw_queue_id = hw_queue_id;
502
503 switch (q->queue_type) {
504 case QUEUE_TYPE_EXT:
505 rc = ext_hw_queue_init(hdev, q);
506 break;
507
508 case QUEUE_TYPE_INT:
509 rc = int_hw_queue_init(hdev, q);
510 break;
511
512 case QUEUE_TYPE_CPU:
513 rc = cpu_hw_queue_init(hdev, q);
514 break;
515
516 case QUEUE_TYPE_NA:
517 q->valid = 0;
518 return 0;
519
520 default:
521 dev_crit(hdev->dev, "wrong queue type %d during init\n",
522 q->queue_type);
523 rc = -EINVAL;
524 break;
525 }
526
527 if (rc)
528 return rc;
529
530 q->valid = 1;
531
532 return 0;
533}
534
535/*
536 * hw_queue_fini - destroy queue
537 *
538 * @hdev: pointer to hl_device device structure
539 * @q: pointer to hl_hw_queue queue structure
540 *
541 * Free the queue memory
542 */
543static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
544{
545 if (!q->valid)
546 return;
547
548 /*
549 * If we arrived here, there are no jobs waiting on this queue
550 * so we can safely remove it.
551 * This is because this function can only called when:
552 * 1. Either a context is deleted, which only can occur if all its
553 * jobs were finished
554 * 2. A context wasn't able to be created due to failure or timeout,
555 * which means there are no jobs on the queue yet
556 *
557 * The only exception are the queues of the kernel context, but
558 * if they are being destroyed, it means that the entire module is
559 * being removed. If the module is removed, it means there is no open
560 * user context. It also means that if a job was submitted by
561 * the kernel driver (e.g. context creation), the job itself was
562 * released by the kernel driver when a timeout occurred on its
563 * Completion. Thus, we don't need to release it again.
564 */
565
566 if (q->queue_type == QUEUE_TYPE_INT)
567 return;
568
569 kfree(q->shadow_queue);
570
571 hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
572 (void *) (uintptr_t) q->kernel_address, q->bus_address);
573}
574
575int hl_hw_queues_create(struct hl_device *hdev)
576{
577 struct asic_fixed_properties *asic = &hdev->asic_prop;
578 struct hl_hw_queue *q;
579 int i, rc, q_ready_cnt;
580
581 hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
582 sizeof(*hdev->kernel_queues), GFP_KERNEL);
583
584 if (!hdev->kernel_queues) {
585 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
586 return -ENOMEM;
587 }
588
589 /* Initialize the H/W queues */
590 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
591 i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
592
593 q->queue_type = asic->hw_queues_props[i].type;
594 rc = hw_queue_init(hdev, q, i);
595 if (rc) {
596 dev_err(hdev->dev,
597 "failed to initialize queue %d\n", i);
598 goto release_queues;
599 }
600 }
601
602 return 0;
603
604release_queues:
605 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
606 hw_queue_fini(hdev, q);
607
608 kfree(hdev->kernel_queues);
609
610 return rc;
611}
612
613void hl_hw_queues_destroy(struct hl_device *hdev)
614{
615 struct hl_hw_queue *q;
616 int i;
617
618 for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
619 hw_queue_fini(hdev, q);
620
621 kfree(hdev->kernel_queues);
622}
623
624void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
625{
626 struct hl_hw_queue *q;
627 int i;
628
629 for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
630 if ((!q->valid) ||
631 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
632 continue;
633 q->pi = q->ci = 0;
634 }
635}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
new file mode 100644
index 000000000000..77facd25c4a2
--- /dev/null
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -0,0 +1,458 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/pci.h>
11#include <linux/hwmon.h>
12
13#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
14#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
15
16int hl_build_hwmon_channel_info(struct hl_device *hdev,
17 struct armcp_sensor *sensors_arr)
18{
19 u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
20 u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
21 u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0};
22 struct hwmon_channel_info **channels_info;
23 u32 num_sensors_for_type, num_active_sensor_types = 0,
24 arr_size = 0, *curr_arr;
25 enum hwmon_sensor_types type;
26 int rc, i, j;
27
28 for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
29 type = __le32_to_cpu(sensors_arr[i].type);
30
31 if ((type == 0) && (sensors_arr[i].flags == 0))
32 break;
33
34 if (type >= HWMON_NR_SENSOR_TYPES) {
35 dev_err(hdev->dev,
36 "Got wrong sensor type %d from device\n", type);
37 return -EINVAL;
38 }
39
40 counts[type]++;
41 arr_size++;
42 }
43
44 for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
45 if (counts[i] == 0)
46 continue;
47
48 num_sensors_for_type = counts[i] + 1;
49 curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr),
50 GFP_KERNEL);
51 if (!curr_arr) {
52 rc = -ENOMEM;
53 goto sensors_type_err;
54 }
55
56 num_active_sensor_types++;
57 sensors_by_type[i] = curr_arr;
58 }
59
60 for (i = 0 ; i < arr_size ; i++) {
61 type = __le32_to_cpu(sensors_arr[i].type);
62 curr_arr = sensors_by_type[type];
63 curr_arr[sensors_by_type_next_index[type]++] =
64 __le32_to_cpu(sensors_arr[i].flags);
65 }
66
67 channels_info = kcalloc(num_active_sensor_types + 1,
68 sizeof(*channels_info), GFP_KERNEL);
69 if (!channels_info) {
70 rc = -ENOMEM;
71 goto channels_info_array_err;
72 }
73
74 for (i = 0 ; i < num_active_sensor_types ; i++) {
75 channels_info[i] = kzalloc(sizeof(*channels_info[i]),
76 GFP_KERNEL);
77 if (!channels_info[i]) {
78 rc = -ENOMEM;
79 goto channel_info_err;
80 }
81 }
82
83 for (i = 0, j = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
84 if (!sensors_by_type[i])
85 continue;
86
87 channels_info[j]->type = i;
88 channels_info[j]->config = sensors_by_type[i];
89 j++;
90 }
91
92 hdev->hl_chip_info->info =
93 (const struct hwmon_channel_info **)channels_info;
94
95 return 0;
96
97channel_info_err:
98 for (i = 0 ; i < num_active_sensor_types ; i++)
99 if (channels_info[i]) {
100 kfree(channels_info[i]->config);
101 kfree(channels_info[i]);
102 }
103 kfree(channels_info);
104channels_info_array_err:
105sensors_type_err:
106 for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++)
107 kfree(sensors_by_type[i]);
108
109 return rc;
110}
111
112static int hl_read(struct device *dev, enum hwmon_sensor_types type,
113 u32 attr, int channel, long *val)
114{
115 struct hl_device *hdev = dev_get_drvdata(dev);
116
117 if (hl_device_disabled_or_in_reset(hdev))
118 return -ENODEV;
119
120 switch (type) {
121 case hwmon_temp:
122 switch (attr) {
123 case hwmon_temp_input:
124 case hwmon_temp_max:
125 case hwmon_temp_crit:
126 case hwmon_temp_max_hyst:
127 case hwmon_temp_crit_hyst:
128 break;
129 default:
130 return -EINVAL;
131 }
132
133 *val = hl_get_temperature(hdev, channel, attr);
134 break;
135 case hwmon_in:
136 switch (attr) {
137 case hwmon_in_input:
138 case hwmon_in_min:
139 case hwmon_in_max:
140 break;
141 default:
142 return -EINVAL;
143 }
144
145 *val = hl_get_voltage(hdev, channel, attr);
146 break;
147 case hwmon_curr:
148 switch (attr) {
149 case hwmon_curr_input:
150 case hwmon_curr_min:
151 case hwmon_curr_max:
152 break;
153 default:
154 return -EINVAL;
155 }
156
157 *val = hl_get_current(hdev, channel, attr);
158 break;
159 case hwmon_fan:
160 switch (attr) {
161 case hwmon_fan_input:
162 case hwmon_fan_min:
163 case hwmon_fan_max:
164 break;
165 default:
166 return -EINVAL;
167 }
168 *val = hl_get_fan_speed(hdev, channel, attr);
169 break;
170 case hwmon_pwm:
171 switch (attr) {
172 case hwmon_pwm_input:
173 case hwmon_pwm_enable:
174 break;
175 default:
176 return -EINVAL;
177 }
178 *val = hl_get_pwm_info(hdev, channel, attr);
179 break;
180 default:
181 return -EINVAL;
182 }
183 return 0;
184}
185
186static int hl_write(struct device *dev, enum hwmon_sensor_types type,
187 u32 attr, int channel, long val)
188{
189 struct hl_device *hdev = dev_get_drvdata(dev);
190
191 if (hl_device_disabled_or_in_reset(hdev))
192 return -ENODEV;
193
194 switch (type) {
195 case hwmon_pwm:
196 switch (attr) {
197 case hwmon_pwm_input:
198 case hwmon_pwm_enable:
199 break;
200 default:
201 return -EINVAL;
202 }
203 hl_set_pwm_info(hdev, channel, attr, val);
204 break;
205 default:
206 return -EINVAL;
207 }
208 return 0;
209}
210
211static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
212 u32 attr, int channel)
213{
214 switch (type) {
215 case hwmon_temp:
216 switch (attr) {
217 case hwmon_temp_input:
218 case hwmon_temp_max:
219 case hwmon_temp_max_hyst:
220 case hwmon_temp_crit:
221 case hwmon_temp_crit_hyst:
222 return 0444;
223 }
224 break;
225 case hwmon_in:
226 switch (attr) {
227 case hwmon_in_input:
228 case hwmon_in_min:
229 case hwmon_in_max:
230 return 0444;
231 }
232 break;
233 case hwmon_curr:
234 switch (attr) {
235 case hwmon_curr_input:
236 case hwmon_curr_min:
237 case hwmon_curr_max:
238 return 0444;
239 }
240 break;
241 case hwmon_fan:
242 switch (attr) {
243 case hwmon_fan_input:
244 case hwmon_fan_min:
245 case hwmon_fan_max:
246 return 0444;
247 }
248 break;
249 case hwmon_pwm:
250 switch (attr) {
251 case hwmon_pwm_input:
252 case hwmon_pwm_enable:
253 return 0644;
254 }
255 break;
256 default:
257 break;
258 }
259 return 0;
260}
261
262static const struct hwmon_ops hl_hwmon_ops = {
263 .is_visible = hl_is_visible,
264 .read = hl_read,
265 .write = hl_write
266};
267
268long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
269{
270 struct armcp_packet pkt;
271 long result;
272 int rc;
273
274 memset(&pkt, 0, sizeof(pkt));
275
276 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
277 ARMCP_PKT_CTL_OPCODE_SHIFT);
278 pkt.sensor_index = __cpu_to_le16(sensor_index);
279 pkt.type = __cpu_to_le16(attr);
280
281 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
282 SENSORS_PKT_TIMEOUT, &result);
283
284 if (rc) {
285 dev_err(hdev->dev,
286 "Failed to get temperature from sensor %d, error %d\n",
287 sensor_index, rc);
288 result = 0;
289 }
290
291 return result;
292}
293
294long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
295{
296 struct armcp_packet pkt;
297 long result;
298 int rc;
299
300 memset(&pkt, 0, sizeof(pkt));
301
302 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
303 ARMCP_PKT_CTL_OPCODE_SHIFT);
304 pkt.sensor_index = __cpu_to_le16(sensor_index);
305 pkt.type = __cpu_to_le16(attr);
306
307 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
308 SENSORS_PKT_TIMEOUT, &result);
309
310 if (rc) {
311 dev_err(hdev->dev,
312 "Failed to get voltage from sensor %d, error %d\n",
313 sensor_index, rc);
314 result = 0;
315 }
316
317 return result;
318}
319
320long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
321{
322 struct armcp_packet pkt;
323 long result;
324 int rc;
325
326 memset(&pkt, 0, sizeof(pkt));
327
328 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
329 ARMCP_PKT_CTL_OPCODE_SHIFT);
330 pkt.sensor_index = __cpu_to_le16(sensor_index);
331 pkt.type = __cpu_to_le16(attr);
332
333 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
334 SENSORS_PKT_TIMEOUT, &result);
335
336 if (rc) {
337 dev_err(hdev->dev,
338 "Failed to get current from sensor %d, error %d\n",
339 sensor_index, rc);
340 result = 0;
341 }
342
343 return result;
344}
345
346long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
347{
348 struct armcp_packet pkt;
349 long result;
350 int rc;
351
352 memset(&pkt, 0, sizeof(pkt));
353
354 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
355 ARMCP_PKT_CTL_OPCODE_SHIFT);
356 pkt.sensor_index = __cpu_to_le16(sensor_index);
357 pkt.type = __cpu_to_le16(attr);
358
359 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
360 SENSORS_PKT_TIMEOUT, &result);
361
362 if (rc) {
363 dev_err(hdev->dev,
364 "Failed to get fan speed from sensor %d, error %d\n",
365 sensor_index, rc);
366 result = 0;
367 }
368
369 return result;
370}
371
372long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
373{
374 struct armcp_packet pkt;
375 long result;
376 int rc;
377
378 memset(&pkt, 0, sizeof(pkt));
379
380 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET <<
381 ARMCP_PKT_CTL_OPCODE_SHIFT);
382 pkt.sensor_index = __cpu_to_le16(sensor_index);
383 pkt.type = __cpu_to_le16(attr);
384
385 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
386 SENSORS_PKT_TIMEOUT, &result);
387
388 if (rc) {
389 dev_err(hdev->dev,
390 "Failed to get pwm info from sensor %d, error %d\n",
391 sensor_index, rc);
392 result = 0;
393 }
394
395 return result;
396}
397
398void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
399 long value)
400{
401 struct armcp_packet pkt;
402 int rc;
403
404 memset(&pkt, 0, sizeof(pkt));
405
406 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET <<
407 ARMCP_PKT_CTL_OPCODE_SHIFT);
408 pkt.sensor_index = __cpu_to_le16(sensor_index);
409 pkt.type = __cpu_to_le16(attr);
410 pkt.value = __cpu_to_le64(value);
411
412 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
413 SENSORS_PKT_TIMEOUT, NULL);
414
415 if (rc)
416 dev_err(hdev->dev,
417 "Failed to set pwm info to sensor %d, error %d\n",
418 sensor_index, rc);
419}
420
421int hl_hwmon_init(struct hl_device *hdev)
422{
423 struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
424 int rc;
425
426 if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
427 return 0;
428
429 if (hdev->hl_chip_info->info) {
430 hdev->hl_chip_info->ops = &hl_hwmon_ops;
431
432 hdev->hwmon_dev = hwmon_device_register_with_info(dev,
433 "habanalabs", hdev, hdev->hl_chip_info, NULL);
434 if (IS_ERR(hdev->hwmon_dev)) {
435 rc = PTR_ERR(hdev->hwmon_dev);
436 dev_err(hdev->dev,
437 "Unable to register hwmon device: %d\n", rc);
438 return rc;
439 }
440
441 dev_info(hdev->dev, "%s: add sensors information\n",
442 dev_name(hdev->hwmon_dev));
443
444 hdev->hwmon_initialized = true;
445 } else {
446 dev_info(hdev->dev, "no available sensors\n");
447 }
448
449 return 0;
450}
451
452void hl_hwmon_fini(struct hl_device *hdev)
453{
454 if (!hdev->hwmon_initialized)
455 return;
456
457 hwmon_device_unregister(hdev->hwmon_dev);
458}
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
new file mode 100644
index 000000000000..9dddb917e72c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -0,0 +1,335 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef ARMCP_IF_H
9#define ARMCP_IF_H
10
11#include <linux/types.h>
12
13/*
14 * EVENT QUEUE
15 */
16
17struct hl_eq_header {
18 __le32 reserved;
19 __le32 ctl;
20};
21
22struct hl_eq_entry {
23 struct hl_eq_header hdr;
24 __le64 data[7];
25};
26
27#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
28
29#define EQ_CTL_READY_SHIFT 31
30#define EQ_CTL_READY_MASK 0x80000000
31
32#define EQ_CTL_EVENT_TYPE_SHIFT 16
33#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
34
35#define EVENT_QUEUE_MSIX_IDX 5
36
37enum pq_init_status {
38 PQ_INIT_STATUS_NA = 0,
39 PQ_INIT_STATUS_READY_FOR_CP,
40 PQ_INIT_STATUS_READY_FOR_HOST
41};
42
43/*
44 * ArmCP Primary Queue Packets
45 *
46 * During normal operation, KMD needs to send various messages to ArmCP,
47 * usually either to SET some value into a H/W periphery or to GET the current
48 * value of some H/W periphery. For example, SET the frequency of MME/TPC and
49 * GET the value of the thermal sensor.
50 *
51 * These messages can be initiated either by the User application or by KMD
52 * itself, e.g. power management code. In either case, the communication from
53 * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will
54 * send a single message and poll until the message was acknowledged and the
55 * results are ready (if results are needed).
56 *
57 * This means that only a single message can be sent at a time and KMD must
58 * wait for its result before sending the next message. Having said that,
59 * because these are control messages which are sent in a relatively low
60 * frequency, this limitation seems acceptable. It's important to note that
61 * in case of multiple devices, messages to different devices *can* be sent
62 * at the same time.
63 *
64 * The message, inputs/outputs (if relevant) and fence object will be located
65 * on the device DDR at an address that will be determined by KMD. During
66 * device initialization phase, KMD will pass to ArmCP that address. Most of
67 * the message types will contain inputs/outputs inside the message itself.
68 * The common part of each message will contain the opcode of the message (its
69 * type) and a field representing a fence object.
70 *
71 * When KMD wishes to send a message to ArmCP, it will write the message
72 * contents to the device DDR, clear the fence object and then write the
73 * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
74 * the 484 interrupt-id to the ARM core.
75 *
76 * Upon receiving the 484 interrupt-id, ArmCP will read the message from the
77 * DDR. In case the message is a SET operation, ArmCP will first perform the
78 * operation and then write to the fence object on the device DDR. In case the
79 * message is a GET operation, ArmCP will first fill the results section on the
80 * device DDR and then write to the fence object. If an error occurred, ArmCP
81 * will fill the rc field with the right error code.
82 *
83 * In the meantime, KMD will poll on the fence object. Once KMD sees that the
84 * fence object is signaled, it will read the results from the device DDR
85 * (if relevant) and resume the code execution in KMD.
86 *
87 * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
88 * so the value being put by the KMD matches the value read by ArmCP
89 *
90 * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
91 *
92 * Detailed description:
93 *
94 * ARMCP_PACKET_DISABLE_PCI_ACCESS -
95 * After receiving this packet the embedded CPU must NOT issue PCI
96 * transactions (read/write) towards the Host CPU. This also include
97 * sending MSI-X interrupts.
98 * This packet is usually sent before the device is moved to D3Hot state.
99 *
100 * ARMCP_PACKET_ENABLE_PCI_ACCESS -
101 * After receiving this packet the embedded CPU is allowed to issue PCI
102 * transactions towards the Host CPU, including sending MSI-X interrupts.
103 * This packet is usually send after the device is moved to D0 state.
104 *
105 * ARMCP_PACKET_TEMPERATURE_GET -
106 * Fetch the current temperature / Max / Max Hyst / Critical /
107 * Critical Hyst of a specified thermal sensor. The packet's
108 * arguments specify the desired sensor and the field to get.
109 *
110 * ARMCP_PACKET_VOLTAGE_GET -
111 * Fetch the voltage / Max / Min of a specified sensor. The packet's
112 * arguments specify the sensor and type.
113 *
114 * ARMCP_PACKET_CURRENT_GET -
115 * Fetch the current / Max / Min of a specified sensor. The packet's
116 * arguments specify the sensor and type.
117 *
118 * ARMCP_PACKET_FAN_SPEED_GET -
119 * Fetch the speed / Max / Min of a specified fan. The packet's
120 * arguments specify the sensor and type.
121 *
122 * ARMCP_PACKET_PWM_GET -
123 * Fetch the pwm value / mode of a specified pwm. The packet's
124 * arguments specify the sensor and type.
125 *
126 * ARMCP_PACKET_PWM_SET -
127 * Set the pwm value / mode of a specified pwm. The packet's
128 * arguments specify the sensor, type and value.
129 *
130 * ARMCP_PACKET_FREQUENCY_SET -
131 * Set the frequency of a specified PLL. The packet's arguments specify
132 * the PLL and the desired frequency. The actual frequency in the device
133 * might differ from the requested frequency.
134 *
135 * ARMCP_PACKET_FREQUENCY_GET -
136 * Fetch the frequency of a specified PLL. The packet's arguments specify
137 * the PLL.
138 *
139 * ARMCP_PACKET_LED_SET -
140 * Set the state of a specified led. The packet's arguments
141 * specify the led and the desired state.
142 *
143 * ARMCP_PACKET_I2C_WR -
144 * Write 32-bit value to I2C device. The packet's arguments specify the
145 * I2C bus, address and value.
146 *
147 * ARMCP_PACKET_I2C_RD -
148 * Read 32-bit value from I2C device. The packet's arguments specify the
149 * I2C bus and address.
150 *
151 * ARMCP_PACKET_INFO_GET -
152 * Fetch information from the device as specified in the packet's
153 * structure. KMD passes the max size it allows the ArmCP to write to
154 * the structure, to prevent data corruption in case of mismatched
155 * KMD/FW versions.
156 *
157 * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
158 *
159 * ARMCP_PACKET_UNMASK_RAZWI_IRQ -
160 * Unmask the given IRQ. The IRQ number is specified in the value field.
161 * The packet is sent after receiving an interrupt and printing its
162 * relevant information.
163 *
164 * ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
165 * Unmask the given IRQs. The IRQs numbers are specified in an array right
166 * after the armcp_packet structure, where its first element is the array
167 * length. The packet is sent after a soft reset was done in order to
168 * handle any interrupts that were sent during the reset process.
169 *
170 * ARMCP_PACKET_TEST -
171 * Test packet for ArmCP connectivity. The CPU will put the fence value
172 * in the result field.
173 *
174 * ARMCP_PACKET_FREQUENCY_CURR_GET -
175 * Fetch the current frequency of a specified PLL. The packet's arguments
176 * specify the PLL.
177 *
178 * ARMCP_PACKET_MAX_POWER_GET -
179 * Fetch the maximal power of the device.
180 *
181 * ARMCP_PACKET_MAX_POWER_SET -
182 * Set the maximal power of the device. The packet's arguments specify
183 * the power.
184 *
185 * ARMCP_PACKET_EEPROM_DATA_GET -
186 * Get EEPROM data from the ArmCP kernel. The buffer is specified in the
187 * addr field. The CPU will put the returned data size in the result
188 * field. In addition, KMD passes the max size it allows the ArmCP to
189 * write to the structure, to prevent data corruption in case of
190 * mismatched KMD/FW versions.
191 *
192 */
193
194enum armcp_packet_id {
195 ARMCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
196 ARMCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
197 ARMCP_PACKET_TEMPERATURE_GET, /* sysfs */
198 ARMCP_PACKET_VOLTAGE_GET, /* sysfs */
199 ARMCP_PACKET_CURRENT_GET, /* sysfs */
200 ARMCP_PACKET_FAN_SPEED_GET, /* sysfs */
201 ARMCP_PACKET_PWM_GET, /* sysfs */
202 ARMCP_PACKET_PWM_SET, /* sysfs */
203 ARMCP_PACKET_FREQUENCY_SET, /* sysfs */
204 ARMCP_PACKET_FREQUENCY_GET, /* sysfs */
205 ARMCP_PACKET_LED_SET, /* debugfs */
206 ARMCP_PACKET_I2C_WR, /* debugfs */
207 ARMCP_PACKET_I2C_RD, /* debugfs */
208 ARMCP_PACKET_INFO_GET, /* IOCTL */
209 ARMCP_PACKET_FLASH_PROGRAM_REMOVED,
210 ARMCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
211 ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
212 ARMCP_PACKET_TEST, /* internal */
213 ARMCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
214 ARMCP_PACKET_MAX_POWER_GET, /* sysfs */
215 ARMCP_PACKET_MAX_POWER_SET, /* sysfs */
216 ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
217};
218
219#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
220
221#define ARMCP_PKT_CTL_RC_SHIFT 12
222#define ARMCP_PKT_CTL_RC_MASK 0x0000F000
223
224#define ARMCP_PKT_CTL_OPCODE_SHIFT 16
225#define ARMCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
226
227struct armcp_packet {
228 union {
229 __le64 value; /* For SET packets */
230 __le64 result; /* For GET packets */
231 __le64 addr; /* For PQ */
232 };
233
234 __le32 ctl;
235
236 __le32 fence; /* Signal to KMD that message is completed */
237
238 union {
239 struct {/* For temperature/current/voltage/fan/pwm get/set */
240 __le16 sensor_index;
241 __le16 type;
242 };
243
244 struct { /* For I2C read/write */
245 __u8 i2c_bus;
246 __u8 i2c_addr;
247 __u8 i2c_reg;
248 __u8 pad; /* unused */
249 };
250
251 /* For frequency get/set */
252 __le32 pll_index;
253
254 /* For led set */
255 __le32 led_index;
256
257 /* For get Armcp info/EEPROM data */
258 __le32 data_max_size;
259 };
260};
261
262struct armcp_unmask_irq_arr_packet {
263 struct armcp_packet armcp_pkt;
264 __le32 length;
265 __le32 irqs[0];
266};
267
268enum armcp_packet_rc {
269 armcp_packet_success,
270 armcp_packet_invalid,
271 armcp_packet_fault
272};
273
274enum armcp_temp_type {
275 armcp_temp_input,
276 armcp_temp_max = 6,
277 armcp_temp_max_hyst,
278 armcp_temp_crit,
279 armcp_temp_crit_hyst
280};
281
282enum armcp_in_attributes {
283 armcp_in_input,
284 armcp_in_min,
285 armcp_in_max
286};
287
288enum armcp_curr_attributes {
289 armcp_curr_input,
290 armcp_curr_min,
291 armcp_curr_max
292};
293
294enum armcp_fan_attributes {
295 armcp_fan_input,
296 armcp_fan_min = 2,
297 armcp_fan_max
298};
299
300enum armcp_pwm_attributes {
301 armcp_pwm_input,
302 armcp_pwm_enable
303};
304
305/* Event Queue Packets */
306
307struct eq_generic_event {
308 __le64 data[7];
309};
310
311/*
312 * ArmCP info
313 */
314
315#define VERSION_MAX_LEN 128
316#define ARMCP_MAX_SENSORS 128
317
318struct armcp_sensor {
319 __le32 type;
320 __le32 flags;
321};
322
323struct armcp_info {
324 struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
325 __u8 kernel_version[VERSION_MAX_LEN];
326 __le32 reserved[3];
327 __le32 cpld_version;
328 __le32 infineon_version;
329 __u8 fuse_version[VERSION_MAX_LEN];
330 __u8 thermal_version[VERSION_MAX_LEN];
331 __u8 armcp_version[VERSION_MAX_LEN];
332 __le64 dram_size;
333};
334
335#endif /* ARMCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
new file mode 100644
index 000000000000..2cf5c46b6e8e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
@@ -0,0 +1,191 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_CPU_CA53_CFG_MASKS_H_
14#define ASIC_REG_CPU_CA53_CFG_MASKS_H_
15
16/*
17 *****************************************
18 * CPU_CA53_CFG (Prototype: CA53_CFG)
19 *****************************************
20 */
21
22/* CPU_CA53_CFG_ARM_CFG */
23#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_SHIFT 0
24#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_MASK 0x3
25#define CPU_CA53_CFG_ARM_CFG_END_SHIFT 4
26#define CPU_CA53_CFG_ARM_CFG_END_MASK 0x30
27#define CPU_CA53_CFG_ARM_CFG_TE_SHIFT 8
28#define CPU_CA53_CFG_ARM_CFG_TE_MASK 0x300
29#define CPU_CA53_CFG_ARM_CFG_VINITHI_SHIFT 12
30#define CPU_CA53_CFG_ARM_CFG_VINITHI_MASK 0x3000
31
32/* CPU_CA53_CFG_RST_ADDR_LSB */
33#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_SHIFT 0
34#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_MASK 0xFFFFFFFF
35
36/* CPU_CA53_CFG_RST_ADDR_MSB */
37#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_SHIFT 0
38#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_MASK 0xFF
39
40/* CPU_CA53_CFG_ARM_RST_CONTROL */
41#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0
42#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3
43#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4
44#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30
45#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8
46#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100
47#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12
48#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000
49#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16
50#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000
51#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20
52#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000
53
54/* CPU_CA53_CFG_ARM_AFFINITY */
55#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_SHIFT 0
56#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_MASK 0xFF
57#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_SHIFT 8
58#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_MASK 0xFF00
59
60/* CPU_CA53_CFG_ARM_DISABLE */
61#define CPU_CA53_CFG_ARM_DISABLE_CP15S_SHIFT 0
62#define CPU_CA53_CFG_ARM_DISABLE_CP15S_MASK 0x3
63#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_SHIFT 4
64#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_MASK 0x30
65#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_SHIFT 8
66#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_MASK 0x100
67#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_SHIFT 9
68#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_MASK 0x200
69
70/* CPU_CA53_CFG_ARM_GIC_PERIPHBASE */
71#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_SHIFT 0
72#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_MASK 0x3FFFFF
73
74/* CPU_CA53_CFG_ARM_GIC_IRQ_CFG */
75#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_SHIFT 0
76#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_MASK 0x3
77#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_SHIFT 4
78#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_MASK 0x30
79#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_SHIFT 8
80#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_MASK 0x300
81#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_SHIFT 12
82#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_MASK 0x3000
83#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_SHIFT 16
84#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_MASK 0x30000
85#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_SHIFT 20
86#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_MASK 0x300000
87#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_SHIFT 24
88#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_MASK 0x3000000
89#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_SHIFT 31
90#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_MASK 0x80000000
91
92/* CPU_CA53_CFG_ARM_PWR_MNG */
93#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_SHIFT 0
94#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_MASK 0x1
95#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_SHIFT 1
96#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_MASK 0x2
97#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_SHIFT 2
98#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_MASK 0x4
99#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_SHIFT 3
100#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_MASK 0x8
101#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_SHIFT 4
102#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_MASK 0x30
103#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_SHIFT 8
104#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_MASK 0x300
105#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_SHIFT 12
106#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_MASK 0x3000
107
108/* CPU_CA53_CFG_ARB_DBG_ROM_ADDR */
109#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_SHIFT 0
110#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_MASK 0xFFFFFFF
111#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_SHIFT 31
112#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_MASK 0x80000000
113
114/* CPU_CA53_CFG_ARM_DBG_MODES */
115#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_SHIFT 0
116#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_MASK 0x3
117#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_SHIFT 4
118#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_MASK 0x30
119#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_SHIFT 8
120#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_MASK 0x300
121#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_SHIFT 12
122#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_MASK 0x3000
123#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_SHIFT 16
124#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_MASK 0x30000
125
126/* CPU_CA53_CFG_ARM_PWR_STAT_0 */
127#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_SHIFT 0
128#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_MASK 0x1
129#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_SHIFT 1
130#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_MASK 0x2
131#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_SHIFT 4
132#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_MASK 0x30
133#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_SHIFT 8
134#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_MASK 0x300
135#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_SHIFT 12
136#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_MASK 0x1000
137#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_SHIFT 13
138#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_MASK 0x2000
139#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_SHIFT 16
140#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_MASK 0x30000
141
142/* CPU_CA53_CFG_ARM_PWR_STAT_1 */
143#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_SHIFT 0
144#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_MASK 0x3
145#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_SHIFT 4
146#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_MASK 0x30
147#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_SHIFT 8
148#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_MASK 0x300
149#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_SHIFT 12
150#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_MASK 0x3000
151#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_SHIFT 16
152#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_MASK 0x30000
153#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_SHIFT 20
154#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_MASK 0x300000
155#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_SHIFT 24
156#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_MASK 0x1000000
157#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_SHIFT 25
158#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_MASK 0x2000000
159#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_SHIFT 26
160#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_MASK 0x4000000
161
162/* CPU_CA53_CFG_ARM_DBG_STATUS */
163#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_SHIFT 0
164#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_MASK 0x3
165#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_SHIFT 4
166#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_MASK 0x30
167#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_SHIFT 8
168#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_MASK 0x300
169#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_SHIFT 12
170#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_MASK 0x3000
171#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_SHIFT 16
172#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_MASK 0x30000
173#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_SHIFT 20
174#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_MASK 0x300000
175
176/* CPU_CA53_CFG_ARM_MEM_ATTR */
177#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_SHIFT 0
178#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_MASK 0xFF
179#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_SHIFT 8
180#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_MASK 0xFF00
181#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_SHIFT 16
182#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_MASK 0x10000
183#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_SHIFT 20
184#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_MASK 0x100000
185
186/* CPU_CA53_CFG_ARM_PMU */
187#define CPU_CA53_CFG_ARM_PMU_EVENT_SHIFT 0
188#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF
189
190#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
191
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
new file mode 100644
index 000000000000..840ccffa1081
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
@@ -0,0 +1,61 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_CPU_CA53_CFG_REGS_H_
14#define ASIC_REG_CPU_CA53_CFG_REGS_H_
15
16/*
17 *****************************************
18 * CPU_CA53_CFG (Prototype: CA53_CFG)
19 *****************************************
20 */
21
22#define mmCPU_CA53_CFG_ARM_CFG 0x441100
23
24#define mmCPU_CA53_CFG_RST_ADDR_LSB_0 0x441104
25
26#define mmCPU_CA53_CFG_RST_ADDR_LSB_1 0x441108
27
28#define mmCPU_CA53_CFG_RST_ADDR_MSB_0 0x441114
29
30#define mmCPU_CA53_CFG_RST_ADDR_MSB_1 0x441118
31
32#define mmCPU_CA53_CFG_ARM_RST_CONTROL 0x441124
33
34#define mmCPU_CA53_CFG_ARM_AFFINITY 0x441128
35
36#define mmCPU_CA53_CFG_ARM_DISABLE 0x44112C
37
38#define mmCPU_CA53_CFG_ARM_GIC_PERIPHBASE 0x441130
39
40#define mmCPU_CA53_CFG_ARM_GIC_IRQ_CFG 0x441134
41
42#define mmCPU_CA53_CFG_ARM_PWR_MNG 0x441138
43
44#define mmCPU_CA53_CFG_ARB_DBG_ROM_ADDR 0x44113C
45
46#define mmCPU_CA53_CFG_ARM_DBG_MODES 0x441140
47
48#define mmCPU_CA53_CFG_ARM_PWR_STAT_0 0x441200
49
50#define mmCPU_CA53_CFG_ARM_PWR_STAT_1 0x441204
51
52#define mmCPU_CA53_CFG_ARM_DBG_STATUS 0x441208
53
54#define mmCPU_CA53_CFG_ARM_MEM_ATTR 0x44120C
55
56#define mmCPU_CA53_CFG_ARM_PMU_0 0x441210
57
58#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214
59
60#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
61
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
new file mode 100644
index 000000000000..f23cb3e41c30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_CPU_IF_REGS_H_
14#define ASIC_REG_CPU_IF_REGS_H_
15
16/*
17 *****************************************
18 * CPU_IF (Prototype: CPU_IF)
19 *****************************************
20 */
21
22#define mmCPU_IF_PF_PQ_PI 0x442100
23
24#define mmCPU_IF_ARUSER_OVR 0x442104
25
26#define mmCPU_IF_ARUSER_OVR_EN 0x442108
27
28#define mmCPU_IF_AWUSER_OVR 0x44210C
29
30#define mmCPU_IF_AWUSER_OVR_EN 0x442110
31
32#define mmCPU_IF_AXCACHE_OVR 0x442114
33
34#define mmCPU_IF_LOCK_OVR 0x442118
35
36#define mmCPU_IF_PROT_OVR 0x44211C
37
38#define mmCPU_IF_MAX_OUTSTANDING 0x442120
39
40#define mmCPU_IF_EARLY_BRESP_EN 0x442124
41
42#define mmCPU_IF_FORCE_RSP_OK 0x442128
43
44#define mmCPU_IF_CPU_MSB_ADDR 0x44212C
45
46#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
47
48#endif /* ASIC_REG_CPU_IF_REGS_H_ */
49
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
new file mode 100644
index 000000000000..8fc97f838ada
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_CPU_PLL_REGS_H_
14#define ASIC_REG_CPU_PLL_REGS_H_
15
16/*
17 *****************************************
18 * CPU_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmCPU_PLL_NR 0x4A2100
23
24#define mmCPU_PLL_NF 0x4A2104
25
26#define mmCPU_PLL_OD 0x4A2108
27
28#define mmCPU_PLL_NB 0x4A210C
29
30#define mmCPU_PLL_CFG 0x4A2110
31
32#define mmCPU_PLL_LOSE_MASK 0x4A2120
33
34#define mmCPU_PLL_LOCK_INTR 0x4A2128
35
36#define mmCPU_PLL_LOCK_BYPASS 0x4A212C
37
38#define mmCPU_PLL_DATA_CHNG 0x4A2130
39
40#define mmCPU_PLL_RST 0x4A2134
41
42#define mmCPU_PLL_SLIP_WD_CNTR 0x4A2150
43
44#define mmCPU_PLL_DIV_FACTOR_0 0x4A2200
45
46#define mmCPU_PLL_DIV_FACTOR_1 0x4A2204
47
48#define mmCPU_PLL_DIV_FACTOR_2 0x4A2208
49
50#define mmCPU_PLL_DIV_FACTOR_3 0x4A220C
51
52#define mmCPU_PLL_DIV_FACTOR_CMD_0 0x4A2220
53
54#define mmCPU_PLL_DIV_FACTOR_CMD_1 0x4A2224
55
56#define mmCPU_PLL_DIV_FACTOR_CMD_2 0x4A2228
57
58#define mmCPU_PLL_DIV_FACTOR_CMD_3 0x4A222C
59
60#define mmCPU_PLL_DIV_SEL_0 0x4A2280
61
62#define mmCPU_PLL_DIV_SEL_1 0x4A2284
63
64#define mmCPU_PLL_DIV_SEL_2 0x4A2288
65
66#define mmCPU_PLL_DIV_SEL_3 0x4A228C
67
68#define mmCPU_PLL_DIV_EN_0 0x4A22A0
69
70#define mmCPU_PLL_DIV_EN_1 0x4A22A4
71
72#define mmCPU_PLL_DIV_EN_2 0x4A22A8
73
74#define mmCPU_PLL_DIV_EN_3 0x4A22AC
75
76#define mmCPU_PLL_DIV_FACTOR_BUSY_0 0x4A22C0
77
78#define mmCPU_PLL_DIV_FACTOR_BUSY_1 0x4A22C4
79
80#define mmCPU_PLL_DIV_FACTOR_BUSY_2 0x4A22C8
81
82#define mmCPU_PLL_DIV_FACTOR_BUSY_3 0x4A22CC
83
84#define mmCPU_PLL_CLK_GATER 0x4A2300
85
86#define mmCPU_PLL_CLK_RLX_0 0x4A2310
87
88#define mmCPU_PLL_CLK_RLX_1 0x4A2314
89
90#define mmCPU_PLL_CLK_RLX_2 0x4A2318
91
92#define mmCPU_PLL_CLK_RLX_3 0x4A231C
93
94#define mmCPU_PLL_REF_CNTR_PERIOD 0x4A2400
95
96#define mmCPU_PLL_REF_LOW_THRESHOLD 0x4A2410
97
98#define mmCPU_PLL_REF_HIGH_THRESHOLD 0x4A2420
99
100#define mmCPU_PLL_PLL_NOT_STABLE 0x4A2430
101
102#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440
103
104#endif /* ASIC_REG_CPU_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
new file mode 100644
index 000000000000..61c8cd9ce58b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_CH_0_REGS_H_
14#define ASIC_REG_DMA_CH_0_REGS_H_
15
16/*
17 *****************************************
18 * DMA_CH_0 (Prototype: DMA_CH)
19 *****************************************
20 */
21
22#define mmDMA_CH_0_CFG0 0x401000
23
24#define mmDMA_CH_0_CFG1 0x401004
25
26#define mmDMA_CH_0_ERRMSG_ADDR_LO 0x401008
27
28#define mmDMA_CH_0_ERRMSG_ADDR_HI 0x40100C
29
30#define mmDMA_CH_0_ERRMSG_WDATA 0x401010
31
32#define mmDMA_CH_0_RD_COMP_ADDR_LO 0x401014
33
34#define mmDMA_CH_0_RD_COMP_ADDR_HI 0x401018
35
36#define mmDMA_CH_0_RD_COMP_WDATA 0x40101C
37
38#define mmDMA_CH_0_WR_COMP_ADDR_LO 0x401020
39
40#define mmDMA_CH_0_WR_COMP_ADDR_HI 0x401024
41
42#define mmDMA_CH_0_WR_COMP_WDATA 0x401028
43
44#define mmDMA_CH_0_LDMA_SRC_ADDR_LO 0x40102C
45
46#define mmDMA_CH_0_LDMA_SRC_ADDR_HI 0x401030
47
48#define mmDMA_CH_0_LDMA_DST_ADDR_LO 0x401034
49
50#define mmDMA_CH_0_LDMA_DST_ADDR_HI 0x401038
51
52#define mmDMA_CH_0_LDMA_TSIZE 0x40103C
53
54#define mmDMA_CH_0_COMIT_TRANSFER 0x401040
55
56#define mmDMA_CH_0_STS0 0x401044
57
58#define mmDMA_CH_0_STS1 0x401048
59
60#define mmDMA_CH_0_STS2 0x40104C
61
62#define mmDMA_CH_0_STS3 0x401050
63
64#define mmDMA_CH_0_STS4 0x401054
65
66#define mmDMA_CH_0_SRC_ADDR_LO_STS 0x401058
67
68#define mmDMA_CH_0_SRC_ADDR_HI_STS 0x40105C
69
70#define mmDMA_CH_0_SRC_TSIZE_STS 0x401060
71
72#define mmDMA_CH_0_DST_ADDR_LO_STS 0x401064
73
74#define mmDMA_CH_0_DST_ADDR_HI_STS 0x401068
75
76#define mmDMA_CH_0_DST_TSIZE_STS 0x40106C
77
78#define mmDMA_CH_0_RD_RATE_LIM_EN 0x401070
79
80#define mmDMA_CH_0_RD_RATE_LIM_RST_TOKEN 0x401074
81
82#define mmDMA_CH_0_RD_RATE_LIM_SAT 0x401078
83
84#define mmDMA_CH_0_RD_RATE_LIM_TOUT 0x40107C
85
86#define mmDMA_CH_0_WR_RATE_LIM_EN 0x401080
87
88#define mmDMA_CH_0_WR_RATE_LIM_RST_TOKEN 0x401084
89
90#define mmDMA_CH_0_WR_RATE_LIM_SAT 0x401088
91
92#define mmDMA_CH_0_WR_RATE_LIM_TOUT 0x40108C
93
94#define mmDMA_CH_0_CFG2 0x401090
95
96#define mmDMA_CH_0_TDMA_CTL 0x401100
97
98#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_LO 0x401104
99
100#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_HI 0x401108
101
102#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_0 0x40110C
103
104#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_0 0x401110
105
106#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 0x401114
107
108#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_0 0x401118
109
110#define mmDMA_CH_0_TDMA_SRC_STRIDE_0 0x40111C
111
112#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_1 0x401120
113
114#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_1 0x401124
115
116#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 0x401128
117
118#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_1 0x40112C
119
120#define mmDMA_CH_0_TDMA_SRC_STRIDE_1 0x401130
121
122#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_2 0x401134
123
124#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_2 0x401138
125
126#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 0x40113C
127
128#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_2 0x401140
129
130#define mmDMA_CH_0_TDMA_SRC_STRIDE_2 0x401144
131
132#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_3 0x401148
133
134#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_3 0x40114C
135
136#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 0x401150
137
138#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_3 0x401154
139
140#define mmDMA_CH_0_TDMA_SRC_STRIDE_3 0x401158
141
142#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_4 0x40115C
143
144#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_4 0x401160
145
146#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 0x401164
147
148#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_4 0x401168
149
150#define mmDMA_CH_0_TDMA_SRC_STRIDE_4 0x40116C
151
152#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_LO 0x401170
153
154#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_HI 0x401174
155
156#define mmDMA_CH_0_TDMA_DST_ROI_BASE_0 0x401178
157
158#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_0 0x40117C
159
160#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 0x401180
161
162#define mmDMA_CH_0_TDMA_DST_START_OFFSET_0 0x401184
163
164#define mmDMA_CH_0_TDMA_DST_STRIDE_0 0x401188
165
166#define mmDMA_CH_0_TDMA_DST_ROI_BASE_1 0x40118C
167
168#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_1 0x401190
169
170#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 0x401194
171
172#define mmDMA_CH_0_TDMA_DST_START_OFFSET_1 0x401198
173
174#define mmDMA_CH_0_TDMA_DST_STRIDE_1 0x40119C
175
176#define mmDMA_CH_0_TDMA_DST_ROI_BASE_2 0x4011A0
177
178#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_2 0x4011A4
179
180#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 0x4011A8
181
182#define mmDMA_CH_0_TDMA_DST_START_OFFSET_2 0x4011AC
183
184#define mmDMA_CH_0_TDMA_DST_STRIDE_2 0x4011B0
185
186#define mmDMA_CH_0_TDMA_DST_ROI_BASE_3 0x4011B4
187
188#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_3 0x4011B8
189
190#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 0x4011BC
191
192#define mmDMA_CH_0_TDMA_DST_START_OFFSET_3 0x4011C0
193
194#define mmDMA_CH_0_TDMA_DST_STRIDE_3 0x4011C4
195
196#define mmDMA_CH_0_TDMA_DST_ROI_BASE_4 0x4011C8
197
198#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_4 0x4011CC
199
200#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 0x4011D0
201
202#define mmDMA_CH_0_TDMA_DST_START_OFFSET_4 0x4011D4
203
204#define mmDMA_CH_0_TDMA_DST_STRIDE_4 0x4011D8
205
206#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC
207
208#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
new file mode 100644
index 000000000000..92960ef5e308
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_CH_1_REGS_H_
14#define ASIC_REG_DMA_CH_1_REGS_H_
15
16/*
17 *****************************************
18 * DMA_CH_1 (Prototype: DMA_CH)
19 *****************************************
20 */
21
22#define mmDMA_CH_1_CFG0 0x409000
23
24#define mmDMA_CH_1_CFG1 0x409004
25
26#define mmDMA_CH_1_ERRMSG_ADDR_LO 0x409008
27
28#define mmDMA_CH_1_ERRMSG_ADDR_HI 0x40900C
29
30#define mmDMA_CH_1_ERRMSG_WDATA 0x409010
31
32#define mmDMA_CH_1_RD_COMP_ADDR_LO 0x409014
33
34#define mmDMA_CH_1_RD_COMP_ADDR_HI 0x409018
35
36#define mmDMA_CH_1_RD_COMP_WDATA 0x40901C
37
38#define mmDMA_CH_1_WR_COMP_ADDR_LO 0x409020
39
40#define mmDMA_CH_1_WR_COMP_ADDR_HI 0x409024
41
42#define mmDMA_CH_1_WR_COMP_WDATA 0x409028
43
44#define mmDMA_CH_1_LDMA_SRC_ADDR_LO 0x40902C
45
46#define mmDMA_CH_1_LDMA_SRC_ADDR_HI 0x409030
47
48#define mmDMA_CH_1_LDMA_DST_ADDR_LO 0x409034
49
50#define mmDMA_CH_1_LDMA_DST_ADDR_HI 0x409038
51
52#define mmDMA_CH_1_LDMA_TSIZE 0x40903C
53
54#define mmDMA_CH_1_COMIT_TRANSFER 0x409040
55
56#define mmDMA_CH_1_STS0 0x409044
57
58#define mmDMA_CH_1_STS1 0x409048
59
60#define mmDMA_CH_1_STS2 0x40904C
61
62#define mmDMA_CH_1_STS3 0x409050
63
64#define mmDMA_CH_1_STS4 0x409054
65
66#define mmDMA_CH_1_SRC_ADDR_LO_STS 0x409058
67
68#define mmDMA_CH_1_SRC_ADDR_HI_STS 0x40905C
69
70#define mmDMA_CH_1_SRC_TSIZE_STS 0x409060
71
72#define mmDMA_CH_1_DST_ADDR_LO_STS 0x409064
73
74#define mmDMA_CH_1_DST_ADDR_HI_STS 0x409068
75
76#define mmDMA_CH_1_DST_TSIZE_STS 0x40906C
77
78#define mmDMA_CH_1_RD_RATE_LIM_EN 0x409070
79
80#define mmDMA_CH_1_RD_RATE_LIM_RST_TOKEN 0x409074
81
82#define mmDMA_CH_1_RD_RATE_LIM_SAT 0x409078
83
84#define mmDMA_CH_1_RD_RATE_LIM_TOUT 0x40907C
85
86#define mmDMA_CH_1_WR_RATE_LIM_EN 0x409080
87
88#define mmDMA_CH_1_WR_RATE_LIM_RST_TOKEN 0x409084
89
90#define mmDMA_CH_1_WR_RATE_LIM_SAT 0x409088
91
92#define mmDMA_CH_1_WR_RATE_LIM_TOUT 0x40908C
93
94#define mmDMA_CH_1_CFG2 0x409090
95
96#define mmDMA_CH_1_TDMA_CTL 0x409100
97
98#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_LO 0x409104
99
100#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_HI 0x409108
101
102#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_0 0x40910C
103
104#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_0 0x409110
105
106#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_0 0x409114
107
108#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_0 0x409118
109
110#define mmDMA_CH_1_TDMA_SRC_STRIDE_0 0x40911C
111
112#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_1 0x409120
113
114#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_1 0x409124
115
116#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_1 0x409128
117
118#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_1 0x40912C
119
120#define mmDMA_CH_1_TDMA_SRC_STRIDE_1 0x409130
121
122#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_2 0x409134
123
124#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_2 0x409138
125
126#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_2 0x40913C
127
128#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_2 0x409140
129
130#define mmDMA_CH_1_TDMA_SRC_STRIDE_2 0x409144
131
132#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_3 0x409148
133
134#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_3 0x40914C
135
136#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_3 0x409150
137
138#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_3 0x409154
139
140#define mmDMA_CH_1_TDMA_SRC_STRIDE_3 0x409158
141
142#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_4 0x40915C
143
144#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_4 0x409160
145
146#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_4 0x409164
147
148#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_4 0x409168
149
150#define mmDMA_CH_1_TDMA_SRC_STRIDE_4 0x40916C
151
152#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_LO 0x409170
153
154#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_HI 0x409174
155
156#define mmDMA_CH_1_TDMA_DST_ROI_BASE_0 0x409178
157
158#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_0 0x40917C
159
160#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_0 0x409180
161
162#define mmDMA_CH_1_TDMA_DST_START_OFFSET_0 0x409184
163
164#define mmDMA_CH_1_TDMA_DST_STRIDE_0 0x409188
165
166#define mmDMA_CH_1_TDMA_DST_ROI_BASE_1 0x40918C
167
168#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_1 0x409190
169
170#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_1 0x409194
171
172#define mmDMA_CH_1_TDMA_DST_START_OFFSET_1 0x409198
173
174#define mmDMA_CH_1_TDMA_DST_STRIDE_1 0x40919C
175
176#define mmDMA_CH_1_TDMA_DST_ROI_BASE_2 0x4091A0
177
178#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_2 0x4091A4
179
180#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_2 0x4091A8
181
182#define mmDMA_CH_1_TDMA_DST_START_OFFSET_2 0x4091AC
183
184#define mmDMA_CH_1_TDMA_DST_STRIDE_2 0x4091B0
185
186#define mmDMA_CH_1_TDMA_DST_ROI_BASE_3 0x4091B4
187
188#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_3 0x4091B8
189
190#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_3 0x4091BC
191
192#define mmDMA_CH_1_TDMA_DST_START_OFFSET_3 0x4091C0
193
194#define mmDMA_CH_1_TDMA_DST_STRIDE_3 0x4091C4
195
196#define mmDMA_CH_1_TDMA_DST_ROI_BASE_4 0x4091C8
197
198#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_4 0x4091CC
199
200#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_4 0x4091D0
201
202#define mmDMA_CH_1_TDMA_DST_START_OFFSET_4 0x4091D4
203
204#define mmDMA_CH_1_TDMA_DST_STRIDE_4 0x4091D8
205
206#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC
207
208#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
new file mode 100644
index 000000000000..4e37871a51bb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_CH_2_REGS_H_
14#define ASIC_REG_DMA_CH_2_REGS_H_
15
16/*
17 *****************************************
18 * DMA_CH_2 (Prototype: DMA_CH)
19 *****************************************
20 */
21
22#define mmDMA_CH_2_CFG0 0x411000
23
24#define mmDMA_CH_2_CFG1 0x411004
25
26#define mmDMA_CH_2_ERRMSG_ADDR_LO 0x411008
27
28#define mmDMA_CH_2_ERRMSG_ADDR_HI 0x41100C
29
30#define mmDMA_CH_2_ERRMSG_WDATA 0x411010
31
32#define mmDMA_CH_2_RD_COMP_ADDR_LO 0x411014
33
34#define mmDMA_CH_2_RD_COMP_ADDR_HI 0x411018
35
36#define mmDMA_CH_2_RD_COMP_WDATA 0x41101C
37
38#define mmDMA_CH_2_WR_COMP_ADDR_LO 0x411020
39
40#define mmDMA_CH_2_WR_COMP_ADDR_HI 0x411024
41
42#define mmDMA_CH_2_WR_COMP_WDATA 0x411028
43
44#define mmDMA_CH_2_LDMA_SRC_ADDR_LO 0x41102C
45
46#define mmDMA_CH_2_LDMA_SRC_ADDR_HI 0x411030
47
48#define mmDMA_CH_2_LDMA_DST_ADDR_LO 0x411034
49
50#define mmDMA_CH_2_LDMA_DST_ADDR_HI 0x411038
51
52#define mmDMA_CH_2_LDMA_TSIZE 0x41103C
53
54#define mmDMA_CH_2_COMIT_TRANSFER 0x411040
55
56#define mmDMA_CH_2_STS0 0x411044
57
58#define mmDMA_CH_2_STS1 0x411048
59
60#define mmDMA_CH_2_STS2 0x41104C
61
62#define mmDMA_CH_2_STS3 0x411050
63
64#define mmDMA_CH_2_STS4 0x411054
65
66#define mmDMA_CH_2_SRC_ADDR_LO_STS 0x411058
67
68#define mmDMA_CH_2_SRC_ADDR_HI_STS 0x41105C
69
70#define mmDMA_CH_2_SRC_TSIZE_STS 0x411060
71
72#define mmDMA_CH_2_DST_ADDR_LO_STS 0x411064
73
74#define mmDMA_CH_2_DST_ADDR_HI_STS 0x411068
75
76#define mmDMA_CH_2_DST_TSIZE_STS 0x41106C
77
78#define mmDMA_CH_2_RD_RATE_LIM_EN 0x411070
79
80#define mmDMA_CH_2_RD_RATE_LIM_RST_TOKEN 0x411074
81
82#define mmDMA_CH_2_RD_RATE_LIM_SAT 0x411078
83
84#define mmDMA_CH_2_RD_RATE_LIM_TOUT 0x41107C
85
86#define mmDMA_CH_2_WR_RATE_LIM_EN 0x411080
87
88#define mmDMA_CH_2_WR_RATE_LIM_RST_TOKEN 0x411084
89
90#define mmDMA_CH_2_WR_RATE_LIM_SAT 0x411088
91
92#define mmDMA_CH_2_WR_RATE_LIM_TOUT 0x41108C
93
94#define mmDMA_CH_2_CFG2 0x411090
95
96#define mmDMA_CH_2_TDMA_CTL 0x411100
97
98#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_LO 0x411104
99
100#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_HI 0x411108
101
102#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_0 0x41110C
103
104#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_0 0x411110
105
106#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_0 0x411114
107
108#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_0 0x411118
109
110#define mmDMA_CH_2_TDMA_SRC_STRIDE_0 0x41111C
111
112#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_1 0x411120
113
114#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_1 0x411124
115
116#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_1 0x411128
117
118#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_1 0x41112C
119
120#define mmDMA_CH_2_TDMA_SRC_STRIDE_1 0x411130
121
122#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_2 0x411134
123
124#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_2 0x411138
125
126#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_2 0x41113C
127
128#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_2 0x411140
129
130#define mmDMA_CH_2_TDMA_SRC_STRIDE_2 0x411144
131
132#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_3 0x411148
133
134#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_3 0x41114C
135
136#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_3 0x411150
137
138#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_3 0x411154
139
140#define mmDMA_CH_2_TDMA_SRC_STRIDE_3 0x411158
141
142#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_4 0x41115C
143
144#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_4 0x411160
145
146#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_4 0x411164
147
148#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_4 0x411168
149
150#define mmDMA_CH_2_TDMA_SRC_STRIDE_4 0x41116C
151
152#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_LO 0x411170
153
154#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_HI 0x411174
155
156#define mmDMA_CH_2_TDMA_DST_ROI_BASE_0 0x411178
157
158#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_0 0x41117C
159
160#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_0 0x411180
161
162#define mmDMA_CH_2_TDMA_DST_START_OFFSET_0 0x411184
163
164#define mmDMA_CH_2_TDMA_DST_STRIDE_0 0x411188
165
166#define mmDMA_CH_2_TDMA_DST_ROI_BASE_1 0x41118C
167
168#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_1 0x411190
169
170#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_1 0x411194
171
172#define mmDMA_CH_2_TDMA_DST_START_OFFSET_1 0x411198
173
174#define mmDMA_CH_2_TDMA_DST_STRIDE_1 0x41119C
175
176#define mmDMA_CH_2_TDMA_DST_ROI_BASE_2 0x4111A0
177
178#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_2 0x4111A4
179
180#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_2 0x4111A8
181
182#define mmDMA_CH_2_TDMA_DST_START_OFFSET_2 0x4111AC
183
184#define mmDMA_CH_2_TDMA_DST_STRIDE_2 0x4111B0
185
186#define mmDMA_CH_2_TDMA_DST_ROI_BASE_3 0x4111B4
187
188#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_3 0x4111B8
189
190#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_3 0x4111BC
191
192#define mmDMA_CH_2_TDMA_DST_START_OFFSET_3 0x4111C0
193
194#define mmDMA_CH_2_TDMA_DST_STRIDE_3 0x4111C4
195
196#define mmDMA_CH_2_TDMA_DST_ROI_BASE_4 0x4111C8
197
198#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_4 0x4111CC
199
200#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_4 0x4111D0
201
202#define mmDMA_CH_2_TDMA_DST_START_OFFSET_4 0x4111D4
203
204#define mmDMA_CH_2_TDMA_DST_STRIDE_4 0x4111D8
205
206#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC
207
208#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
new file mode 100644
index 000000000000..a2d6aeb32a18
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_CH_3_REGS_H_
14#define ASIC_REG_DMA_CH_3_REGS_H_
15
16/*
17 *****************************************
18 * DMA_CH_3 (Prototype: DMA_CH)
19 *****************************************
20 */
21
22#define mmDMA_CH_3_CFG0 0x419000
23
24#define mmDMA_CH_3_CFG1 0x419004
25
26#define mmDMA_CH_3_ERRMSG_ADDR_LO 0x419008
27
28#define mmDMA_CH_3_ERRMSG_ADDR_HI 0x41900C
29
30#define mmDMA_CH_3_ERRMSG_WDATA 0x419010
31
32#define mmDMA_CH_3_RD_COMP_ADDR_LO 0x419014
33
34#define mmDMA_CH_3_RD_COMP_ADDR_HI 0x419018
35
36#define mmDMA_CH_3_RD_COMP_WDATA 0x41901C
37
38#define mmDMA_CH_3_WR_COMP_ADDR_LO 0x419020
39
40#define mmDMA_CH_3_WR_COMP_ADDR_HI 0x419024
41
42#define mmDMA_CH_3_WR_COMP_WDATA 0x419028
43
44#define mmDMA_CH_3_LDMA_SRC_ADDR_LO 0x41902C
45
46#define mmDMA_CH_3_LDMA_SRC_ADDR_HI 0x419030
47
48#define mmDMA_CH_3_LDMA_DST_ADDR_LO 0x419034
49
50#define mmDMA_CH_3_LDMA_DST_ADDR_HI 0x419038
51
52#define mmDMA_CH_3_LDMA_TSIZE 0x41903C
53
54#define mmDMA_CH_3_COMIT_TRANSFER 0x419040
55
56#define mmDMA_CH_3_STS0 0x419044
57
58#define mmDMA_CH_3_STS1 0x419048
59
60#define mmDMA_CH_3_STS2 0x41904C
61
62#define mmDMA_CH_3_STS3 0x419050
63
64#define mmDMA_CH_3_STS4 0x419054
65
66#define mmDMA_CH_3_SRC_ADDR_LO_STS 0x419058
67
68#define mmDMA_CH_3_SRC_ADDR_HI_STS 0x41905C
69
70#define mmDMA_CH_3_SRC_TSIZE_STS 0x419060
71
72#define mmDMA_CH_3_DST_ADDR_LO_STS 0x419064
73
74#define mmDMA_CH_3_DST_ADDR_HI_STS 0x419068
75
76#define mmDMA_CH_3_DST_TSIZE_STS 0x41906C
77
78#define mmDMA_CH_3_RD_RATE_LIM_EN 0x419070
79
80#define mmDMA_CH_3_RD_RATE_LIM_RST_TOKEN 0x419074
81
82#define mmDMA_CH_3_RD_RATE_LIM_SAT 0x419078
83
84#define mmDMA_CH_3_RD_RATE_LIM_TOUT 0x41907C
85
86#define mmDMA_CH_3_WR_RATE_LIM_EN 0x419080
87
88#define mmDMA_CH_3_WR_RATE_LIM_RST_TOKEN 0x419084
89
90#define mmDMA_CH_3_WR_RATE_LIM_SAT 0x419088
91
92#define mmDMA_CH_3_WR_RATE_LIM_TOUT 0x41908C
93
94#define mmDMA_CH_3_CFG2 0x419090
95
96#define mmDMA_CH_3_TDMA_CTL 0x419100
97
98#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_LO 0x419104
99
100#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_HI 0x419108
101
102#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_0 0x41910C
103
104#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_0 0x419110
105
106#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_0 0x419114
107
108#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_0 0x419118
109
110#define mmDMA_CH_3_TDMA_SRC_STRIDE_0 0x41911C
111
112#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_1 0x419120
113
114#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_1 0x419124
115
116#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_1 0x419128
117
118#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_1 0x41912C
119
120#define mmDMA_CH_3_TDMA_SRC_STRIDE_1 0x419130
121
122#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_2 0x419134
123
124#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_2 0x419138
125
126#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_2 0x41913C
127
128#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_2 0x419140
129
130#define mmDMA_CH_3_TDMA_SRC_STRIDE_2 0x419144
131
132#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_3 0x419148
133
134#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_3 0x41914C
135
136#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_3 0x419150
137
138#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_3 0x419154
139
140#define mmDMA_CH_3_TDMA_SRC_STRIDE_3 0x419158
141
142#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_4 0x41915C
143
144#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_4 0x419160
145
146#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_4 0x419164
147
148#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_4 0x419168
149
150#define mmDMA_CH_3_TDMA_SRC_STRIDE_4 0x41916C
151
152#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_LO 0x419170
153
154#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_HI 0x419174
155
156#define mmDMA_CH_3_TDMA_DST_ROI_BASE_0 0x419178
157
158#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_0 0x41917C
159
160#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_0 0x419180
161
162#define mmDMA_CH_3_TDMA_DST_START_OFFSET_0 0x419184
163
164#define mmDMA_CH_3_TDMA_DST_STRIDE_0 0x419188
165
166#define mmDMA_CH_3_TDMA_DST_ROI_BASE_1 0x41918C
167
168#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_1 0x419190
169
170#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_1 0x419194
171
172#define mmDMA_CH_3_TDMA_DST_START_OFFSET_1 0x419198
173
174#define mmDMA_CH_3_TDMA_DST_STRIDE_1 0x41919C
175
176#define mmDMA_CH_3_TDMA_DST_ROI_BASE_2 0x4191A0
177
178#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_2 0x4191A4
179
180#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_2 0x4191A8
181
182#define mmDMA_CH_3_TDMA_DST_START_OFFSET_2 0x4191AC
183
184#define mmDMA_CH_3_TDMA_DST_STRIDE_2 0x4191B0
185
186#define mmDMA_CH_3_TDMA_DST_ROI_BASE_3 0x4191B4
187
188#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_3 0x4191B8
189
190#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_3 0x4191BC
191
192#define mmDMA_CH_3_TDMA_DST_START_OFFSET_3 0x4191C0
193
194#define mmDMA_CH_3_TDMA_DST_STRIDE_3 0x4191C4
195
196#define mmDMA_CH_3_TDMA_DST_ROI_BASE_4 0x4191C8
197
198#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_4 0x4191CC
199
200#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_4 0x4191D0
201
202#define mmDMA_CH_3_TDMA_DST_START_OFFSET_4 0x4191D4
203
204#define mmDMA_CH_3_TDMA_DST_STRIDE_4 0x4191D8
205
206#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC
207
208#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
new file mode 100644
index 000000000000..400d6fd3acf5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_CH_4_REGS_H_
14#define ASIC_REG_DMA_CH_4_REGS_H_
15
16/*
17 *****************************************
18 * DMA_CH_4 (Prototype: DMA_CH)
19 *****************************************
20 */
21
22#define mmDMA_CH_4_CFG0 0x421000
23
24#define mmDMA_CH_4_CFG1 0x421004
25
26#define mmDMA_CH_4_ERRMSG_ADDR_LO 0x421008
27
28#define mmDMA_CH_4_ERRMSG_ADDR_HI 0x42100C
29
30#define mmDMA_CH_4_ERRMSG_WDATA 0x421010
31
32#define mmDMA_CH_4_RD_COMP_ADDR_LO 0x421014
33
34#define mmDMA_CH_4_RD_COMP_ADDR_HI 0x421018
35
36#define mmDMA_CH_4_RD_COMP_WDATA 0x42101C
37
38#define mmDMA_CH_4_WR_COMP_ADDR_LO 0x421020
39
40#define mmDMA_CH_4_WR_COMP_ADDR_HI 0x421024
41
42#define mmDMA_CH_4_WR_COMP_WDATA 0x421028
43
44#define mmDMA_CH_4_LDMA_SRC_ADDR_LO 0x42102C
45
46#define mmDMA_CH_4_LDMA_SRC_ADDR_HI 0x421030
47
48#define mmDMA_CH_4_LDMA_DST_ADDR_LO 0x421034
49
50#define mmDMA_CH_4_LDMA_DST_ADDR_HI 0x421038
51
52#define mmDMA_CH_4_LDMA_TSIZE 0x42103C
53
54#define mmDMA_CH_4_COMIT_TRANSFER 0x421040
55
56#define mmDMA_CH_4_STS0 0x421044
57
58#define mmDMA_CH_4_STS1 0x421048
59
60#define mmDMA_CH_4_STS2 0x42104C
61
62#define mmDMA_CH_4_STS3 0x421050
63
64#define mmDMA_CH_4_STS4 0x421054
65
66#define mmDMA_CH_4_SRC_ADDR_LO_STS 0x421058
67
68#define mmDMA_CH_4_SRC_ADDR_HI_STS 0x42105C
69
70#define mmDMA_CH_4_SRC_TSIZE_STS 0x421060
71
72#define mmDMA_CH_4_DST_ADDR_LO_STS 0x421064
73
74#define mmDMA_CH_4_DST_ADDR_HI_STS 0x421068
75
76#define mmDMA_CH_4_DST_TSIZE_STS 0x42106C
77
78#define mmDMA_CH_4_RD_RATE_LIM_EN 0x421070
79
80#define mmDMA_CH_4_RD_RATE_LIM_RST_TOKEN 0x421074
81
82#define mmDMA_CH_4_RD_RATE_LIM_SAT 0x421078
83
84#define mmDMA_CH_4_RD_RATE_LIM_TOUT 0x42107C
85
86#define mmDMA_CH_4_WR_RATE_LIM_EN 0x421080
87
88#define mmDMA_CH_4_WR_RATE_LIM_RST_TOKEN 0x421084
89
90#define mmDMA_CH_4_WR_RATE_LIM_SAT 0x421088
91
92#define mmDMA_CH_4_WR_RATE_LIM_TOUT 0x42108C
93
94#define mmDMA_CH_4_CFG2 0x421090
95
96#define mmDMA_CH_4_TDMA_CTL 0x421100
97
98#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_LO 0x421104
99
100#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_HI 0x421108
101
102#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_0 0x42110C
103
104#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_0 0x421110
105
106#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_0 0x421114
107
108#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_0 0x421118
109
110#define mmDMA_CH_4_TDMA_SRC_STRIDE_0 0x42111C
111
112#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_1 0x421120
113
114#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_1 0x421124
115
116#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_1 0x421128
117
118#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_1 0x42112C
119
120#define mmDMA_CH_4_TDMA_SRC_STRIDE_1 0x421130
121
122#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_2 0x421134
123
124#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_2 0x421138
125
126#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_2 0x42113C
127
128#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_2 0x421140
129
130#define mmDMA_CH_4_TDMA_SRC_STRIDE_2 0x421144
131
132#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_3 0x421148
133
134#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_3 0x42114C
135
136#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_3 0x421150
137
138#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_3 0x421154
139
140#define mmDMA_CH_4_TDMA_SRC_STRIDE_3 0x421158
141
142#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_4 0x42115C
143
144#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_4 0x421160
145
146#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_4 0x421164
147
148#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_4 0x421168
149
150#define mmDMA_CH_4_TDMA_SRC_STRIDE_4 0x42116C
151
152#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_LO 0x421170
153
154#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_HI 0x421174
155
156#define mmDMA_CH_4_TDMA_DST_ROI_BASE_0 0x421178
157
158#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_0 0x42117C
159
160#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_0 0x421180
161
162#define mmDMA_CH_4_TDMA_DST_START_OFFSET_0 0x421184
163
164#define mmDMA_CH_4_TDMA_DST_STRIDE_0 0x421188
165
166#define mmDMA_CH_4_TDMA_DST_ROI_BASE_1 0x42118C
167
168#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_1 0x421190
169
170#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_1 0x421194
171
172#define mmDMA_CH_4_TDMA_DST_START_OFFSET_1 0x421198
173
174#define mmDMA_CH_4_TDMA_DST_STRIDE_1 0x42119C
175
176#define mmDMA_CH_4_TDMA_DST_ROI_BASE_2 0x4211A0
177
178#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_2 0x4211A4
179
180#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_2 0x4211A8
181
182#define mmDMA_CH_4_TDMA_DST_START_OFFSET_2 0x4211AC
183
184#define mmDMA_CH_4_TDMA_DST_STRIDE_2 0x4211B0
185
186#define mmDMA_CH_4_TDMA_DST_ROI_BASE_3 0x4211B4
187
188#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_3 0x4211B8
189
190#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_3 0x4211BC
191
192#define mmDMA_CH_4_TDMA_DST_START_OFFSET_3 0x4211C0
193
194#define mmDMA_CH_4_TDMA_DST_STRIDE_3 0x4211C4
195
196#define mmDMA_CH_4_TDMA_DST_ROI_BASE_4 0x4211C8
197
198#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_4 0x4211CC
199
200#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_4 0x4211D0
201
202#define mmDMA_CH_4_TDMA_DST_START_OFFSET_4 0x4211D4
203
204#define mmDMA_CH_4_TDMA_DST_STRIDE_4 0x4211D8
205
206#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC
207
208#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
new file mode 100644
index 000000000000..8d965443c51e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_MACRO_MASKS_H_
14#define ASIC_REG_DMA_MACRO_MASKS_H_
15
16/*
17 *****************************************
18 * DMA_MACRO (Prototype: DMA_MACRO)
19 *****************************************
20 */
21
22/* DMA_MACRO_LBW_RANGE_HIT_BLOCK */
23#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_SHIFT 0
24#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_MASK 0xFFFF
25
26/* DMA_MACRO_LBW_RANGE_MASK */
27#define DMA_MACRO_LBW_RANGE_MASK_R_SHIFT 0
28#define DMA_MACRO_LBW_RANGE_MASK_R_MASK 0x3FFFFFF
29
30/* DMA_MACRO_LBW_RANGE_BASE */
31#define DMA_MACRO_LBW_RANGE_BASE_R_SHIFT 0
32#define DMA_MACRO_LBW_RANGE_BASE_R_MASK 0x3FFFFFF
33
34/* DMA_MACRO_HBW_RANGE_HIT_BLOCK */
35#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_SHIFT 0
36#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_MASK 0xFF
37
38/* DMA_MACRO_HBW_RANGE_MASK_49_32 */
39#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_SHIFT 0
40#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_MASK 0x3FFFF
41
42/* DMA_MACRO_HBW_RANGE_MASK_31_0 */
43#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_SHIFT 0
44#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_MASK 0xFFFFFFFF
45
46/* DMA_MACRO_HBW_RANGE_BASE_49_32 */
47#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_SHIFT 0
48#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_MASK 0x3FFFF
49
50/* DMA_MACRO_HBW_RANGE_BASE_31_0 */
51#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_SHIFT 0
52#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_MASK 0xFFFFFFFF
53
54/* DMA_MACRO_WRITE_EN */
55#define DMA_MACRO_WRITE_EN_R_SHIFT 0
56#define DMA_MACRO_WRITE_EN_R_MASK 0x1
57
58/* DMA_MACRO_WRITE_CREDIT */
59#define DMA_MACRO_WRITE_CREDIT_R_SHIFT 0
60#define DMA_MACRO_WRITE_CREDIT_R_MASK 0x3FF
61
62/* DMA_MACRO_READ_EN */
63#define DMA_MACRO_READ_EN_R_SHIFT 0
64#define DMA_MACRO_READ_EN_R_MASK 0x1
65
66/* DMA_MACRO_READ_CREDIT */
67#define DMA_MACRO_READ_CREDIT_R_SHIFT 0
68#define DMA_MACRO_READ_CREDIT_R_MASK 0x3FF
69
70/* DMA_MACRO_SRAM_BUSY */
71
72/* DMA_MACRO_RAZWI_LBW_WT_VLD */
73#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_SHIFT 0
74#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_MASK 0x1
75
76/* DMA_MACRO_RAZWI_LBW_WT_ID */
77#define DMA_MACRO_RAZWI_LBW_WT_ID_R_SHIFT 0
78#define DMA_MACRO_RAZWI_LBW_WT_ID_R_MASK 0x7FFF
79
80/* DMA_MACRO_RAZWI_LBW_RD_VLD */
81#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_SHIFT 0
82#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_MASK 0x1
83
84/* DMA_MACRO_RAZWI_LBW_RD_ID */
85#define DMA_MACRO_RAZWI_LBW_RD_ID_R_SHIFT 0
86#define DMA_MACRO_RAZWI_LBW_RD_ID_R_MASK 0x7FFF
87
88/* DMA_MACRO_RAZWI_HBW_WT_VLD */
89#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_SHIFT 0
90#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_MASK 0x1
91
92/* DMA_MACRO_RAZWI_HBW_WT_ID */
93#define DMA_MACRO_RAZWI_HBW_WT_ID_R_SHIFT 0
94#define DMA_MACRO_RAZWI_HBW_WT_ID_R_MASK 0x1FFFFFFF
95
96/* DMA_MACRO_RAZWI_HBW_RD_VLD */
97#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_SHIFT 0
98#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_MASK 0x1
99
100/* DMA_MACRO_RAZWI_HBW_RD_ID */
101#define DMA_MACRO_RAZWI_HBW_RD_ID_R_SHIFT 0
102#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF
103
104#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
new file mode 100644
index 000000000000..8bfcb001189d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
@@ -0,0 +1,181 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_MACRO_REGS_H_
14#define ASIC_REG_DMA_MACRO_REGS_H_
15
16/*
17 *****************************************
18 * DMA_MACRO (Prototype: DMA_MACRO)
19 *****************************************
20 */
21
22#define mmDMA_MACRO_LBW_RANGE_HIT_BLOCK 0x4B0000
23
24#define mmDMA_MACRO_LBW_RANGE_MASK_0 0x4B0004
25
26#define mmDMA_MACRO_LBW_RANGE_MASK_1 0x4B0008
27
28#define mmDMA_MACRO_LBW_RANGE_MASK_2 0x4B000C
29
30#define mmDMA_MACRO_LBW_RANGE_MASK_3 0x4B0010
31
32#define mmDMA_MACRO_LBW_RANGE_MASK_4 0x4B0014
33
34#define mmDMA_MACRO_LBW_RANGE_MASK_5 0x4B0018
35
36#define mmDMA_MACRO_LBW_RANGE_MASK_6 0x4B001C
37
38#define mmDMA_MACRO_LBW_RANGE_MASK_7 0x4B0020
39
40#define mmDMA_MACRO_LBW_RANGE_MASK_8 0x4B0024
41
42#define mmDMA_MACRO_LBW_RANGE_MASK_9 0x4B0028
43
44#define mmDMA_MACRO_LBW_RANGE_MASK_10 0x4B002C
45
46#define mmDMA_MACRO_LBW_RANGE_MASK_11 0x4B0030
47
48#define mmDMA_MACRO_LBW_RANGE_MASK_12 0x4B0034
49
50#define mmDMA_MACRO_LBW_RANGE_MASK_13 0x4B0038
51
52#define mmDMA_MACRO_LBW_RANGE_MASK_14 0x4B003C
53
54#define mmDMA_MACRO_LBW_RANGE_MASK_15 0x4B0040
55
56#define mmDMA_MACRO_LBW_RANGE_BASE_0 0x4B0044
57
58#define mmDMA_MACRO_LBW_RANGE_BASE_1 0x4B0048
59
60#define mmDMA_MACRO_LBW_RANGE_BASE_2 0x4B004C
61
62#define mmDMA_MACRO_LBW_RANGE_BASE_3 0x4B0050
63
64#define mmDMA_MACRO_LBW_RANGE_BASE_4 0x4B0054
65
66#define mmDMA_MACRO_LBW_RANGE_BASE_5 0x4B0058
67
68#define mmDMA_MACRO_LBW_RANGE_BASE_6 0x4B005C
69
70#define mmDMA_MACRO_LBW_RANGE_BASE_7 0x4B0060
71
72#define mmDMA_MACRO_LBW_RANGE_BASE_8 0x4B0064
73
74#define mmDMA_MACRO_LBW_RANGE_BASE_9 0x4B0068
75
76#define mmDMA_MACRO_LBW_RANGE_BASE_10 0x4B006C
77
78#define mmDMA_MACRO_LBW_RANGE_BASE_11 0x4B0070
79
80#define mmDMA_MACRO_LBW_RANGE_BASE_12 0x4B0074
81
82#define mmDMA_MACRO_LBW_RANGE_BASE_13 0x4B0078
83
84#define mmDMA_MACRO_LBW_RANGE_BASE_14 0x4B007C
85
86#define mmDMA_MACRO_LBW_RANGE_BASE_15 0x4B0080
87
88#define mmDMA_MACRO_HBW_RANGE_HIT_BLOCK 0x4B0084
89
90#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_0 0x4B00A8
91
92#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_1 0x4B00AC
93
94#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_2 0x4B00B0
95
96#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_3 0x4B00B4
97
98#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_4 0x4B00B8
99
100#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_5 0x4B00BC
101
102#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_6 0x4B00C0
103
104#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_7 0x4B00C4
105
106#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_0 0x4B00C8
107
108#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_1 0x4B00CC
109
110#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_2 0x4B00D0
111
112#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_3 0x4B00D4
113
114#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_4 0x4B00D8
115
116#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_5 0x4B00DC
117
118#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_6 0x4B00E0
119
120#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_7 0x4B00E4
121
122#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_0 0x4B00E8
123
124#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_1 0x4B00EC
125
126#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_2 0x4B00F0
127
128#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_3 0x4B00F4
129
130#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_4 0x4B00F8
131
132#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_5 0x4B00FC
133
134#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_6 0x4B0100
135
136#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_7 0x4B0104
137
138#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_0 0x4B0108
139
140#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_1 0x4B010C
141
142#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_2 0x4B0110
143
144#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_3 0x4B0114
145
146#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_4 0x4B0118
147
148#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_5 0x4B011C
149
150#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_6 0x4B0120
151
152#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_7 0x4B0124
153
154#define mmDMA_MACRO_WRITE_EN 0x4B0128
155
156#define mmDMA_MACRO_WRITE_CREDIT 0x4B012C
157
158#define mmDMA_MACRO_READ_EN 0x4B0130
159
160#define mmDMA_MACRO_READ_CREDIT 0x4B0134
161
162#define mmDMA_MACRO_SRAM_BUSY 0x4B0138
163
164#define mmDMA_MACRO_RAZWI_LBW_WT_VLD 0x4B013C
165
166#define mmDMA_MACRO_RAZWI_LBW_WT_ID 0x4B0140
167
168#define mmDMA_MACRO_RAZWI_LBW_RD_VLD 0x4B0144
169
170#define mmDMA_MACRO_RAZWI_LBW_RD_ID 0x4B0148
171
172#define mmDMA_MACRO_RAZWI_HBW_WT_VLD 0x4B014C
173
174#define mmDMA_MACRO_RAZWI_HBW_WT_ID 0x4B0150
175
176#define mmDMA_MACRO_RAZWI_HBW_RD_VLD 0x4B0154
177
178#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158
179
180#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
181
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
new file mode 100644
index 000000000000..9f33f351a3c1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_NRTR_MASKS_H_
14#define ASIC_REG_DMA_NRTR_MASKS_H_
15
16/*
17 *****************************************
18 * DMA_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22/* DMA_NRTR_HBW_MAX_CRED */
23#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
24#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
25#define DMA_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
26#define DMA_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
27#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
28#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
29#define DMA_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
30#define DMA_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
31
32/* DMA_NRTR_LBW_MAX_CRED */
33#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
34#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
35#define DMA_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
36#define DMA_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
37#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
38#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
39#define DMA_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
40#define DMA_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
41
42/* DMA_NRTR_DBG_E_ARB */
43#define DMA_NRTR_DBG_E_ARB_W_SHIFT 0
44#define DMA_NRTR_DBG_E_ARB_W_MASK 0x7
45#define DMA_NRTR_DBG_E_ARB_S_SHIFT 8
46#define DMA_NRTR_DBG_E_ARB_S_MASK 0x700
47#define DMA_NRTR_DBG_E_ARB_N_SHIFT 16
48#define DMA_NRTR_DBG_E_ARB_N_MASK 0x70000
49#define DMA_NRTR_DBG_E_ARB_L_SHIFT 24
50#define DMA_NRTR_DBG_E_ARB_L_MASK 0x7000000
51
52/* DMA_NRTR_DBG_W_ARB */
53#define DMA_NRTR_DBG_W_ARB_E_SHIFT 0
54#define DMA_NRTR_DBG_W_ARB_E_MASK 0x7
55#define DMA_NRTR_DBG_W_ARB_S_SHIFT 8
56#define DMA_NRTR_DBG_W_ARB_S_MASK 0x700
57#define DMA_NRTR_DBG_W_ARB_N_SHIFT 16
58#define DMA_NRTR_DBG_W_ARB_N_MASK 0x70000
59#define DMA_NRTR_DBG_W_ARB_L_SHIFT 24
60#define DMA_NRTR_DBG_W_ARB_L_MASK 0x7000000
61
62/* DMA_NRTR_DBG_N_ARB */
63#define DMA_NRTR_DBG_N_ARB_W_SHIFT 0
64#define DMA_NRTR_DBG_N_ARB_W_MASK 0x7
65#define DMA_NRTR_DBG_N_ARB_E_SHIFT 8
66#define DMA_NRTR_DBG_N_ARB_E_MASK 0x700
67#define DMA_NRTR_DBG_N_ARB_S_SHIFT 16
68#define DMA_NRTR_DBG_N_ARB_S_MASK 0x70000
69#define DMA_NRTR_DBG_N_ARB_L_SHIFT 24
70#define DMA_NRTR_DBG_N_ARB_L_MASK 0x7000000
71
72/* DMA_NRTR_DBG_S_ARB */
73#define DMA_NRTR_DBG_S_ARB_W_SHIFT 0
74#define DMA_NRTR_DBG_S_ARB_W_MASK 0x7
75#define DMA_NRTR_DBG_S_ARB_E_SHIFT 8
76#define DMA_NRTR_DBG_S_ARB_E_MASK 0x700
77#define DMA_NRTR_DBG_S_ARB_N_SHIFT 16
78#define DMA_NRTR_DBG_S_ARB_N_MASK 0x70000
79#define DMA_NRTR_DBG_S_ARB_L_SHIFT 24
80#define DMA_NRTR_DBG_S_ARB_L_MASK 0x7000000
81
82/* DMA_NRTR_DBG_L_ARB */
83#define DMA_NRTR_DBG_L_ARB_W_SHIFT 0
84#define DMA_NRTR_DBG_L_ARB_W_MASK 0x7
85#define DMA_NRTR_DBG_L_ARB_E_SHIFT 8
86#define DMA_NRTR_DBG_L_ARB_E_MASK 0x700
87#define DMA_NRTR_DBG_L_ARB_S_SHIFT 16
88#define DMA_NRTR_DBG_L_ARB_S_MASK 0x70000
89#define DMA_NRTR_DBG_L_ARB_N_SHIFT 24
90#define DMA_NRTR_DBG_L_ARB_N_MASK 0x7000000
91
92/* DMA_NRTR_DBG_E_ARB_MAX */
93#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
94#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
95
96/* DMA_NRTR_DBG_W_ARB_MAX */
97#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
98#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
99
100/* DMA_NRTR_DBG_N_ARB_MAX */
101#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
102#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
103
104/* DMA_NRTR_DBG_S_ARB_MAX */
105#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
106#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
107
108/* DMA_NRTR_DBG_L_ARB_MAX */
109#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
110#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
111
112/* DMA_NRTR_SPLIT_COEF */
113#define DMA_NRTR_SPLIT_COEF_VAL_SHIFT 0
114#define DMA_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
115
116/* DMA_NRTR_SPLIT_CFG */
117#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
118#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
119#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
120#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
121#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
122#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
123#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
124#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
125#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
126#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
127#define DMA_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
128#define DMA_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
129
130/* DMA_NRTR_SPLIT_RD_SAT */
131#define DMA_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
132#define DMA_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
133
134/* DMA_NRTR_SPLIT_RD_RST_TOKEN */
135#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
136#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
137
138/* DMA_NRTR_SPLIT_RD_TIMEOUT */
139#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
140#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
141
142/* DMA_NRTR_SPLIT_WR_SAT */
143#define DMA_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
144#define DMA_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
145
146/* DMA_NRTR_WPLIT_WR_TST_TOLEN */
147#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
148#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
149
150/* DMA_NRTR_SPLIT_WR_TIMEOUT */
151#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
152#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
153
154/* DMA_NRTR_HBW_RANGE_HIT */
155#define DMA_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
156#define DMA_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
157
158/* DMA_NRTR_HBW_RANGE_MASK_L */
159#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
160#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
161
162/* DMA_NRTR_HBW_RANGE_MASK_H */
163#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
164#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
165
166/* DMA_NRTR_HBW_RANGE_BASE_L */
167#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
168#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
169
170/* DMA_NRTR_HBW_RANGE_BASE_H */
171#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
172#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
173
174/* DMA_NRTR_LBW_RANGE_HIT */
175#define DMA_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
176#define DMA_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
177
178/* DMA_NRTR_LBW_RANGE_MASK */
179#define DMA_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
180#define DMA_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
181
182/* DMA_NRTR_LBW_RANGE_BASE */
183#define DMA_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
184#define DMA_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
185
186/* DMA_NRTR_RGLTR */
187#define DMA_NRTR_RGLTR_WR_EN_SHIFT 0
188#define DMA_NRTR_RGLTR_WR_EN_MASK 0x1
189#define DMA_NRTR_RGLTR_RD_EN_SHIFT 4
190#define DMA_NRTR_RGLTR_RD_EN_MASK 0x10
191
192/* DMA_NRTR_RGLTR_WR_RESULT */
193#define DMA_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
194#define DMA_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
195
196/* DMA_NRTR_RGLTR_RD_RESULT */
197#define DMA_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
198#define DMA_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
199
200/* DMA_NRTR_SCRAMB_EN */
201#define DMA_NRTR_SCRAMB_EN_VAL_SHIFT 0
202#define DMA_NRTR_SCRAMB_EN_VAL_MASK 0x1
203
204/* DMA_NRTR_NON_LIN_SCRAMB */
205#define DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
206#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
207
208#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
new file mode 100644
index 000000000000..d8293745a02b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
@@ -0,0 +1,227 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_NRTR_REGS_H_
14#define ASIC_REG_DMA_NRTR_REGS_H_
15
16/*
17 *****************************************
18 * DMA_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22#define mmDMA_NRTR_HBW_MAX_CRED 0x1C0100
23
24#define mmDMA_NRTR_LBW_MAX_CRED 0x1C0120
25
26#define mmDMA_NRTR_DBG_E_ARB 0x1C0300
27
28#define mmDMA_NRTR_DBG_W_ARB 0x1C0304
29
30#define mmDMA_NRTR_DBG_N_ARB 0x1C0308
31
32#define mmDMA_NRTR_DBG_S_ARB 0x1C030C
33
34#define mmDMA_NRTR_DBG_L_ARB 0x1C0310
35
36#define mmDMA_NRTR_DBG_E_ARB_MAX 0x1C0320
37
38#define mmDMA_NRTR_DBG_W_ARB_MAX 0x1C0324
39
40#define mmDMA_NRTR_DBG_N_ARB_MAX 0x1C0328
41
42#define mmDMA_NRTR_DBG_S_ARB_MAX 0x1C032C
43
44#define mmDMA_NRTR_DBG_L_ARB_MAX 0x1C0330
45
46#define mmDMA_NRTR_SPLIT_COEF_0 0x1C0400
47
48#define mmDMA_NRTR_SPLIT_COEF_1 0x1C0404
49
50#define mmDMA_NRTR_SPLIT_COEF_2 0x1C0408
51
52#define mmDMA_NRTR_SPLIT_COEF_3 0x1C040C
53
54#define mmDMA_NRTR_SPLIT_COEF_4 0x1C0410
55
56#define mmDMA_NRTR_SPLIT_COEF_5 0x1C0414
57
58#define mmDMA_NRTR_SPLIT_COEF_6 0x1C0418
59
60#define mmDMA_NRTR_SPLIT_COEF_7 0x1C041C
61
62#define mmDMA_NRTR_SPLIT_COEF_8 0x1C0420
63
64#define mmDMA_NRTR_SPLIT_COEF_9 0x1C0424
65
66#define mmDMA_NRTR_SPLIT_CFG 0x1C0440
67
68#define mmDMA_NRTR_SPLIT_RD_SAT 0x1C0444
69
70#define mmDMA_NRTR_SPLIT_RD_RST_TOKEN 0x1C0448
71
72#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_0 0x1C044C
73
74#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_1 0x1C0450
75
76#define mmDMA_NRTR_SPLIT_WR_SAT 0x1C0454
77
78#define mmDMA_NRTR_WPLIT_WR_TST_TOLEN 0x1C0458
79
80#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_0 0x1C045C
81
82#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_1 0x1C0460
83
84#define mmDMA_NRTR_HBW_RANGE_HIT 0x1C0470
85
86#define mmDMA_NRTR_HBW_RANGE_MASK_L_0 0x1C0480
87
88#define mmDMA_NRTR_HBW_RANGE_MASK_L_1 0x1C0484
89
90#define mmDMA_NRTR_HBW_RANGE_MASK_L_2 0x1C0488
91
92#define mmDMA_NRTR_HBW_RANGE_MASK_L_3 0x1C048C
93
94#define mmDMA_NRTR_HBW_RANGE_MASK_L_4 0x1C0490
95
96#define mmDMA_NRTR_HBW_RANGE_MASK_L_5 0x1C0494
97
98#define mmDMA_NRTR_HBW_RANGE_MASK_L_6 0x1C0498
99
100#define mmDMA_NRTR_HBW_RANGE_MASK_L_7 0x1C049C
101
102#define mmDMA_NRTR_HBW_RANGE_MASK_H_0 0x1C04A0
103
104#define mmDMA_NRTR_HBW_RANGE_MASK_H_1 0x1C04A4
105
106#define mmDMA_NRTR_HBW_RANGE_MASK_H_2 0x1C04A8
107
108#define mmDMA_NRTR_HBW_RANGE_MASK_H_3 0x1C04AC
109
110#define mmDMA_NRTR_HBW_RANGE_MASK_H_4 0x1C04B0
111
112#define mmDMA_NRTR_HBW_RANGE_MASK_H_5 0x1C04B4
113
114#define mmDMA_NRTR_HBW_RANGE_MASK_H_6 0x1C04B8
115
116#define mmDMA_NRTR_HBW_RANGE_MASK_H_7 0x1C04BC
117
118#define mmDMA_NRTR_HBW_RANGE_BASE_L_0 0x1C04C0
119
120#define mmDMA_NRTR_HBW_RANGE_BASE_L_1 0x1C04C4
121
122#define mmDMA_NRTR_HBW_RANGE_BASE_L_2 0x1C04C8
123
124#define mmDMA_NRTR_HBW_RANGE_BASE_L_3 0x1C04CC
125
126#define mmDMA_NRTR_HBW_RANGE_BASE_L_4 0x1C04D0
127
128#define mmDMA_NRTR_HBW_RANGE_BASE_L_5 0x1C04D4
129
130#define mmDMA_NRTR_HBW_RANGE_BASE_L_6 0x1C04D8
131
132#define mmDMA_NRTR_HBW_RANGE_BASE_L_7 0x1C04DC
133
134#define mmDMA_NRTR_HBW_RANGE_BASE_H_0 0x1C04E0
135
136#define mmDMA_NRTR_HBW_RANGE_BASE_H_1 0x1C04E4
137
138#define mmDMA_NRTR_HBW_RANGE_BASE_H_2 0x1C04E8
139
140#define mmDMA_NRTR_HBW_RANGE_BASE_H_3 0x1C04EC
141
142#define mmDMA_NRTR_HBW_RANGE_BASE_H_4 0x1C04F0
143
144#define mmDMA_NRTR_HBW_RANGE_BASE_H_5 0x1C04F4
145
146#define mmDMA_NRTR_HBW_RANGE_BASE_H_6 0x1C04F8
147
148#define mmDMA_NRTR_HBW_RANGE_BASE_H_7 0x1C04FC
149
150#define mmDMA_NRTR_LBW_RANGE_HIT 0x1C0500
151
152#define mmDMA_NRTR_LBW_RANGE_MASK_0 0x1C0510
153
154#define mmDMA_NRTR_LBW_RANGE_MASK_1 0x1C0514
155
156#define mmDMA_NRTR_LBW_RANGE_MASK_2 0x1C0518
157
158#define mmDMA_NRTR_LBW_RANGE_MASK_3 0x1C051C
159
160#define mmDMA_NRTR_LBW_RANGE_MASK_4 0x1C0520
161
162#define mmDMA_NRTR_LBW_RANGE_MASK_5 0x1C0524
163
164#define mmDMA_NRTR_LBW_RANGE_MASK_6 0x1C0528
165
166#define mmDMA_NRTR_LBW_RANGE_MASK_7 0x1C052C
167
168#define mmDMA_NRTR_LBW_RANGE_MASK_8 0x1C0530
169
170#define mmDMA_NRTR_LBW_RANGE_MASK_9 0x1C0534
171
172#define mmDMA_NRTR_LBW_RANGE_MASK_10 0x1C0538
173
174#define mmDMA_NRTR_LBW_RANGE_MASK_11 0x1C053C
175
176#define mmDMA_NRTR_LBW_RANGE_MASK_12 0x1C0540
177
178#define mmDMA_NRTR_LBW_RANGE_MASK_13 0x1C0544
179
180#define mmDMA_NRTR_LBW_RANGE_MASK_14 0x1C0548
181
182#define mmDMA_NRTR_LBW_RANGE_MASK_15 0x1C054C
183
184#define mmDMA_NRTR_LBW_RANGE_BASE_0 0x1C0550
185
186#define mmDMA_NRTR_LBW_RANGE_BASE_1 0x1C0554
187
188#define mmDMA_NRTR_LBW_RANGE_BASE_2 0x1C0558
189
190#define mmDMA_NRTR_LBW_RANGE_BASE_3 0x1C055C
191
192#define mmDMA_NRTR_LBW_RANGE_BASE_4 0x1C0560
193
194#define mmDMA_NRTR_LBW_RANGE_BASE_5 0x1C0564
195
196#define mmDMA_NRTR_LBW_RANGE_BASE_6 0x1C0568
197
198#define mmDMA_NRTR_LBW_RANGE_BASE_7 0x1C056C
199
200#define mmDMA_NRTR_LBW_RANGE_BASE_8 0x1C0570
201
202#define mmDMA_NRTR_LBW_RANGE_BASE_9 0x1C0574
203
204#define mmDMA_NRTR_LBW_RANGE_BASE_10 0x1C0578
205
206#define mmDMA_NRTR_LBW_RANGE_BASE_11 0x1C057C
207
208#define mmDMA_NRTR_LBW_RANGE_BASE_12 0x1C0580
209
210#define mmDMA_NRTR_LBW_RANGE_BASE_13 0x1C0584
211
212#define mmDMA_NRTR_LBW_RANGE_BASE_14 0x1C0588
213
214#define mmDMA_NRTR_LBW_RANGE_BASE_15 0x1C058C
215
216#define mmDMA_NRTR_RGLTR 0x1C0590
217
218#define mmDMA_NRTR_RGLTR_WR_RESULT 0x1C0594
219
220#define mmDMA_NRTR_RGLTR_RD_RESULT 0x1C0598
221
222#define mmDMA_NRTR_SCRAMB_EN 0x1C0600
223
224#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604
225
226#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
new file mode 100644
index 000000000000..10619dbb9b17
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
@@ -0,0 +1,465 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_QM_0_MASKS_H_
14#define ASIC_REG_DMA_QM_0_MASKS_H_
15
16/*
17 *****************************************
18 * DMA_QM_0 (Prototype: QMAN)
19 *****************************************
20 */
21
22/* DMA_QM_0_GLBL_CFG0 */
23#define DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT 0
24#define DMA_QM_0_GLBL_CFG0_PQF_EN_MASK 0x1
25#define DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT 1
26#define DMA_QM_0_GLBL_CFG0_CQF_EN_MASK 0x2
27#define DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT 2
28#define DMA_QM_0_GLBL_CFG0_CP_EN_MASK 0x4
29#define DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT 3
30#define DMA_QM_0_GLBL_CFG0_DMA_EN_MASK 0x8
31
32/* DMA_QM_0_GLBL_CFG1 */
33#define DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT 0
34#define DMA_QM_0_GLBL_CFG1_PQF_STOP_MASK 0x1
35#define DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT 1
36#define DMA_QM_0_GLBL_CFG1_CQF_STOP_MASK 0x2
37#define DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT 2
38#define DMA_QM_0_GLBL_CFG1_CP_STOP_MASK 0x4
39#define DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT 3
40#define DMA_QM_0_GLBL_CFG1_DMA_STOP_MASK 0x8
41#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_SHIFT 8
42#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_MASK 0x100
43#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_SHIFT 9
44#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_MASK 0x200
45#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_SHIFT 10
46#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_MASK 0x400
47#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_SHIFT 11
48#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_MASK 0x800
49
50/* DMA_QM_0_GLBL_PROT */
51#define DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT 0
52#define DMA_QM_0_GLBL_PROT_PQF_PROT_MASK 0x1
53#define DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT 1
54#define DMA_QM_0_GLBL_PROT_CQF_PROT_MASK 0x2
55#define DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT 2
56#define DMA_QM_0_GLBL_PROT_CP_PROT_MASK 0x4
57#define DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT 3
58#define DMA_QM_0_GLBL_PROT_DMA_PROT_MASK 0x8
59#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
60#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
61#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
62#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
63#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT 6
64#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_MASK 0x40
65#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
66#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
67
68/* DMA_QM_0_GLBL_ERR_CFG */
69#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
70#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
71#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
72#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
73#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
74#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
75#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
76#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
77#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
78#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
79#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
80#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
81#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
82#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
83#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
84#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
85#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
86#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
87#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
88#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
89#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
90#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
91#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
92#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
93
94/* DMA_QM_0_GLBL_ERR_ADDR_LO */
95#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
96#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
97
98/* DMA_QM_0_GLBL_ERR_ADDR_HI */
99#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
100#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
101
102/* DMA_QM_0_GLBL_ERR_WDATA */
103#define DMA_QM_0_GLBL_ERR_WDATA_VAL_SHIFT 0
104#define DMA_QM_0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
105
106/* DMA_QM_0_GLBL_SECURE_PROPS */
107#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_SHIFT 0
108#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
109#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_SHIFT 10
110#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_MASK 0x400
111
112/* DMA_QM_0_GLBL_NON_SECURE_PROPS */
113#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
114#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
115#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
116#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
117
118/* DMA_QM_0_GLBL_STS0 */
119#define DMA_QM_0_GLBL_STS0_PQF_IDLE_SHIFT 0
120#define DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK 0x1
121#define DMA_QM_0_GLBL_STS0_CQF_IDLE_SHIFT 1
122#define DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK 0x2
123#define DMA_QM_0_GLBL_STS0_CP_IDLE_SHIFT 2
124#define DMA_QM_0_GLBL_STS0_CP_IDLE_MASK 0x4
125#define DMA_QM_0_GLBL_STS0_DMA_IDLE_SHIFT 3
126#define DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK 0x8
127#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT 4
128#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_MASK 0x10
129#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT 5
130#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_MASK 0x20
131#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT 6
132#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_MASK 0x40
133#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT 7
134#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_MASK 0x80
135
136/* DMA_QM_0_GLBL_STS1 */
137#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_SHIFT 0
138#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_MASK 0x1
139#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_SHIFT 1
140#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_MASK 0x2
141#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_SHIFT 2
142#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_MASK 0x4
143#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
144#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
145#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_SHIFT 4
146#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_MASK 0x10
147#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
148#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
149#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_SHIFT 8
150#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_MASK 0x100
151#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_SHIFT 9
152#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_MASK 0x200
153#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
154#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
155#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
156#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
157
158/* DMA_QM_0_PQ_BASE_LO */
159#define DMA_QM_0_PQ_BASE_LO_VAL_SHIFT 0
160#define DMA_QM_0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
161
162/* DMA_QM_0_PQ_BASE_HI */
163#define DMA_QM_0_PQ_BASE_HI_VAL_SHIFT 0
164#define DMA_QM_0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
165
166/* DMA_QM_0_PQ_SIZE */
167#define DMA_QM_0_PQ_SIZE_VAL_SHIFT 0
168#define DMA_QM_0_PQ_SIZE_VAL_MASK 0xFFFFFFFF
169
170/* DMA_QM_0_PQ_PI */
171#define DMA_QM_0_PQ_PI_VAL_SHIFT 0
172#define DMA_QM_0_PQ_PI_VAL_MASK 0xFFFFFFFF
173
174/* DMA_QM_0_PQ_CI */
175#define DMA_QM_0_PQ_CI_VAL_SHIFT 0
176#define DMA_QM_0_PQ_CI_VAL_MASK 0xFFFFFFFF
177
178/* DMA_QM_0_PQ_CFG0 */
179#define DMA_QM_0_PQ_CFG0_RESERVED_SHIFT 0
180#define DMA_QM_0_PQ_CFG0_RESERVED_MASK 0x1
181
182/* DMA_QM_0_PQ_CFG1 */
183#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_SHIFT 0
184#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
185#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
186#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
187
188/* DMA_QM_0_PQ_ARUSER */
189#define DMA_QM_0_PQ_ARUSER_NOSNOOP_SHIFT 0
190#define DMA_QM_0_PQ_ARUSER_NOSNOOP_MASK 0x1
191#define DMA_QM_0_PQ_ARUSER_WORD_SHIFT 1
192#define DMA_QM_0_PQ_ARUSER_WORD_MASK 0x2
193
194/* DMA_QM_0_PQ_PUSH0 */
195#define DMA_QM_0_PQ_PUSH0_PTR_LO_SHIFT 0
196#define DMA_QM_0_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
197
198/* DMA_QM_0_PQ_PUSH1 */
199#define DMA_QM_0_PQ_PUSH1_PTR_HI_SHIFT 0
200#define DMA_QM_0_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
201
202/* DMA_QM_0_PQ_PUSH2 */
203#define DMA_QM_0_PQ_PUSH2_TSIZE_SHIFT 0
204#define DMA_QM_0_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
205
206/* DMA_QM_0_PQ_PUSH3 */
207#define DMA_QM_0_PQ_PUSH3_RPT_SHIFT 0
208#define DMA_QM_0_PQ_PUSH3_RPT_MASK 0xFFFF
209#define DMA_QM_0_PQ_PUSH3_CTL_SHIFT 16
210#define DMA_QM_0_PQ_PUSH3_CTL_MASK 0xFFFF0000
211
212/* DMA_QM_0_PQ_STS0 */
213#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
214#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
215#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_SHIFT 16
216#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
217
218/* DMA_QM_0_PQ_STS1 */
219#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
220#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
221#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
222#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
223#define DMA_QM_0_PQ_STS1_PQ_BUSY_SHIFT 31
224#define DMA_QM_0_PQ_STS1_PQ_BUSY_MASK 0x80000000
225
226/* DMA_QM_0_PQ_RD_RATE_LIM_EN */
227#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
228#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
229
230/* DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN */
231#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
232#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
233
234/* DMA_QM_0_PQ_RD_RATE_LIM_SAT */
235#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
236#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
237
238/* DMA_QM_0_PQ_RD_RATE_LIM_TOUT */
239#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
240#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
241
242/* DMA_QM_0_CQ_CFG0 */
243#define DMA_QM_0_CQ_CFG0_RESERVED_SHIFT 0
244#define DMA_QM_0_CQ_CFG0_RESERVED_MASK 0x1
245
246/* DMA_QM_0_CQ_CFG1 */
247#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_SHIFT 0
248#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
249#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
250#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
251
252/* DMA_QM_0_CQ_ARUSER */
253#define DMA_QM_0_CQ_ARUSER_NOSNOOP_SHIFT 0
254#define DMA_QM_0_CQ_ARUSER_NOSNOOP_MASK 0x1
255#define DMA_QM_0_CQ_ARUSER_WORD_SHIFT 1
256#define DMA_QM_0_CQ_ARUSER_WORD_MASK 0x2
257
258/* DMA_QM_0_CQ_PTR_LO */
259#define DMA_QM_0_CQ_PTR_LO_VAL_SHIFT 0
260#define DMA_QM_0_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
261
262/* DMA_QM_0_CQ_PTR_HI */
263#define DMA_QM_0_CQ_PTR_HI_VAL_SHIFT 0
264#define DMA_QM_0_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
265
266/* DMA_QM_0_CQ_TSIZE */
267#define DMA_QM_0_CQ_TSIZE_VAL_SHIFT 0
268#define DMA_QM_0_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
269
270/* DMA_QM_0_CQ_CTL */
271#define DMA_QM_0_CQ_CTL_RPT_SHIFT 0
272#define DMA_QM_0_CQ_CTL_RPT_MASK 0xFFFF
273#define DMA_QM_0_CQ_CTL_CTL_SHIFT 16
274#define DMA_QM_0_CQ_CTL_CTL_MASK 0xFFFF0000
275
276/* DMA_QM_0_CQ_PTR_LO_STS */
277#define DMA_QM_0_CQ_PTR_LO_STS_VAL_SHIFT 0
278#define DMA_QM_0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
279
280/* DMA_QM_0_CQ_PTR_HI_STS */
281#define DMA_QM_0_CQ_PTR_HI_STS_VAL_SHIFT 0
282#define DMA_QM_0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
283
284/* DMA_QM_0_CQ_TSIZE_STS */
285#define DMA_QM_0_CQ_TSIZE_STS_VAL_SHIFT 0
286#define DMA_QM_0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
287
288/* DMA_QM_0_CQ_CTL_STS */
289#define DMA_QM_0_CQ_CTL_STS_RPT_SHIFT 0
290#define DMA_QM_0_CQ_CTL_STS_RPT_MASK 0xFFFF
291#define DMA_QM_0_CQ_CTL_STS_CTL_SHIFT 16
292#define DMA_QM_0_CQ_CTL_STS_CTL_MASK 0xFFFF0000
293
294/* DMA_QM_0_CQ_STS0 */
295#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
296#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
297#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_SHIFT 16
298#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
299
300/* DMA_QM_0_CQ_STS1 */
301#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
302#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
303#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
304#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
305#define DMA_QM_0_CQ_STS1_CQ_BUSY_SHIFT 31
306#define DMA_QM_0_CQ_STS1_CQ_BUSY_MASK 0x80000000
307
308/* DMA_QM_0_CQ_RD_RATE_LIM_EN */
309#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
310#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
311
312/* DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN */
313#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
314#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
315
316/* DMA_QM_0_CQ_RD_RATE_LIM_SAT */
317#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
318#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
319
320/* DMA_QM_0_CQ_RD_RATE_LIM_TOUT */
321#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
322#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
323
324/* DMA_QM_0_CQ_IFIFO_CNT */
325#define DMA_QM_0_CQ_IFIFO_CNT_VAL_SHIFT 0
326#define DMA_QM_0_CQ_IFIFO_CNT_VAL_MASK 0x3
327
328/* DMA_QM_0_CP_MSG_BASE0_ADDR_LO */
329#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
330#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
331
332/* DMA_QM_0_CP_MSG_BASE0_ADDR_HI */
333#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
334#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
335
336/* DMA_QM_0_CP_MSG_BASE1_ADDR_LO */
337#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
338#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
339
340/* DMA_QM_0_CP_MSG_BASE1_ADDR_HI */
341#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
342#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
343
344/* DMA_QM_0_CP_MSG_BASE2_ADDR_LO */
345#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
346#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
347
348/* DMA_QM_0_CP_MSG_BASE2_ADDR_HI */
349#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
350#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
351
352/* DMA_QM_0_CP_MSG_BASE3_ADDR_LO */
353#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
354#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
355
356/* DMA_QM_0_CP_MSG_BASE3_ADDR_HI */
357#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
358#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
359
360/* DMA_QM_0_CP_LDMA_TSIZE_OFFSET */
361#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
362#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
363
364/* DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET */
365#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
366#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
367
368/* DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET */
369#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
370#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
371
372/* DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET */
373#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
374#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
375
376/* DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET */
377#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
378#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
379
380/* DMA_QM_0_CP_LDMA_COMMIT_OFFSET */
381#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
382#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
383
384/* DMA_QM_0_CP_FENCE0_RDATA */
385#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
386#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
387
388/* DMA_QM_0_CP_FENCE1_RDATA */
389#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
390#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
391
392/* DMA_QM_0_CP_FENCE2_RDATA */
393#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
394#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
395
396/* DMA_QM_0_CP_FENCE3_RDATA */
397#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
398#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
399
400/* DMA_QM_0_CP_FENCE0_CNT */
401#define DMA_QM_0_CP_FENCE0_CNT_VAL_SHIFT 0
402#define DMA_QM_0_CP_FENCE0_CNT_VAL_MASK 0xFF
403
404/* DMA_QM_0_CP_FENCE1_CNT */
405#define DMA_QM_0_CP_FENCE1_CNT_VAL_SHIFT 0
406#define DMA_QM_0_CP_FENCE1_CNT_VAL_MASK 0xFF
407
408/* DMA_QM_0_CP_FENCE2_CNT */
409#define DMA_QM_0_CP_FENCE2_CNT_VAL_SHIFT 0
410#define DMA_QM_0_CP_FENCE2_CNT_VAL_MASK 0xFF
411
412/* DMA_QM_0_CP_FENCE3_CNT */
413#define DMA_QM_0_CP_FENCE3_CNT_VAL_SHIFT 0
414#define DMA_QM_0_CP_FENCE3_CNT_VAL_MASK 0xFF
415
416/* DMA_QM_0_CP_STS */
417#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
418#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
419#define DMA_QM_0_CP_STS_ERDY_SHIFT 16
420#define DMA_QM_0_CP_STS_ERDY_MASK 0x10000
421#define DMA_QM_0_CP_STS_RRDY_SHIFT 17
422#define DMA_QM_0_CP_STS_RRDY_MASK 0x20000
423#define DMA_QM_0_CP_STS_MRDY_SHIFT 18
424#define DMA_QM_0_CP_STS_MRDY_MASK 0x40000
425#define DMA_QM_0_CP_STS_SW_STOP_SHIFT 19
426#define DMA_QM_0_CP_STS_SW_STOP_MASK 0x80000
427#define DMA_QM_0_CP_STS_FENCE_ID_SHIFT 20
428#define DMA_QM_0_CP_STS_FENCE_ID_MASK 0x300000
429#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
430#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
431
432/* DMA_QM_0_CP_CURRENT_INST_LO */
433#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_SHIFT 0
434#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
435
436/* DMA_QM_0_CP_CURRENT_INST_HI */
437#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_SHIFT 0
438#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
439
440/* DMA_QM_0_CP_BARRIER_CFG */
441#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_SHIFT 0
442#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
443
444/* DMA_QM_0_CP_DBG_0 */
445#define DMA_QM_0_CP_DBG_0_VAL_SHIFT 0
446#define DMA_QM_0_CP_DBG_0_VAL_MASK 0xFF
447
448/* DMA_QM_0_PQ_BUF_ADDR */
449#define DMA_QM_0_PQ_BUF_ADDR_VAL_SHIFT 0
450#define DMA_QM_0_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
451
452/* DMA_QM_0_PQ_BUF_RDATA */
453#define DMA_QM_0_PQ_BUF_RDATA_VAL_SHIFT 0
454#define DMA_QM_0_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
455
456/* DMA_QM_0_CQ_BUF_ADDR */
457#define DMA_QM_0_CQ_BUF_ADDR_VAL_SHIFT 0
458#define DMA_QM_0_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
459
460/* DMA_QM_0_CQ_BUF_RDATA */
461#define DMA_QM_0_CQ_BUF_RDATA_VAL_SHIFT 0
462#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
463
464#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
465
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
new file mode 100644
index 000000000000..c693bc5dcb22
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_QM_0_REGS_H_
14#define ASIC_REG_DMA_QM_0_REGS_H_
15
16/*
17 *****************************************
18 * DMA_QM_0 (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmDMA_QM_0_GLBL_CFG0 0x400000
23
24#define mmDMA_QM_0_GLBL_CFG1 0x400004
25
26#define mmDMA_QM_0_GLBL_PROT 0x400008
27
28#define mmDMA_QM_0_GLBL_ERR_CFG 0x40000C
29
30#define mmDMA_QM_0_GLBL_ERR_ADDR_LO 0x400010
31
32#define mmDMA_QM_0_GLBL_ERR_ADDR_HI 0x400014
33
34#define mmDMA_QM_0_GLBL_ERR_WDATA 0x400018
35
36#define mmDMA_QM_0_GLBL_SECURE_PROPS 0x40001C
37
38#define mmDMA_QM_0_GLBL_NON_SECURE_PROPS 0x400020
39
40#define mmDMA_QM_0_GLBL_STS0 0x400024
41
42#define mmDMA_QM_0_GLBL_STS1 0x400028
43
44#define mmDMA_QM_0_PQ_BASE_LO 0x400060
45
46#define mmDMA_QM_0_PQ_BASE_HI 0x400064
47
48#define mmDMA_QM_0_PQ_SIZE 0x400068
49
50#define mmDMA_QM_0_PQ_PI 0x40006C
51
52#define mmDMA_QM_0_PQ_CI 0x400070
53
54#define mmDMA_QM_0_PQ_CFG0 0x400074
55
56#define mmDMA_QM_0_PQ_CFG1 0x400078
57
58#define mmDMA_QM_0_PQ_ARUSER 0x40007C
59
60#define mmDMA_QM_0_PQ_PUSH0 0x400080
61
62#define mmDMA_QM_0_PQ_PUSH1 0x400084
63
64#define mmDMA_QM_0_PQ_PUSH2 0x400088
65
66#define mmDMA_QM_0_PQ_PUSH3 0x40008C
67
68#define mmDMA_QM_0_PQ_STS0 0x400090
69
70#define mmDMA_QM_0_PQ_STS1 0x400094
71
72#define mmDMA_QM_0_PQ_RD_RATE_LIM_EN 0x4000A0
73
74#define mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN 0x4000A4
75
76#define mmDMA_QM_0_PQ_RD_RATE_LIM_SAT 0x4000A8
77
78#define mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT 0x4000AC
79
80#define mmDMA_QM_0_CQ_CFG0 0x4000B0
81
82#define mmDMA_QM_0_CQ_CFG1 0x4000B4
83
84#define mmDMA_QM_0_CQ_ARUSER 0x4000B8
85
86#define mmDMA_QM_0_CQ_PTR_LO 0x4000C0
87
88#define mmDMA_QM_0_CQ_PTR_HI 0x4000C4
89
90#define mmDMA_QM_0_CQ_TSIZE 0x4000C8
91
92#define mmDMA_QM_0_CQ_CTL 0x4000CC
93
94#define mmDMA_QM_0_CQ_PTR_LO_STS 0x4000D4
95
96#define mmDMA_QM_0_CQ_PTR_HI_STS 0x4000D8
97
98#define mmDMA_QM_0_CQ_TSIZE_STS 0x4000DC
99
100#define mmDMA_QM_0_CQ_CTL_STS 0x4000E0
101
102#define mmDMA_QM_0_CQ_STS0 0x4000E4
103
104#define mmDMA_QM_0_CQ_STS1 0x4000E8
105
106#define mmDMA_QM_0_CQ_RD_RATE_LIM_EN 0x4000F0
107
108#define mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN 0x4000F4
109
110#define mmDMA_QM_0_CQ_RD_RATE_LIM_SAT 0x4000F8
111
112#define mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT 0x4000FC
113
114#define mmDMA_QM_0_CQ_IFIFO_CNT 0x400108
115
116#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO 0x400120
117
118#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI 0x400124
119
120#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO 0x400128
121
122#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI 0x40012C
123
124#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO 0x400130
125
126#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI 0x400134
127
128#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO 0x400138
129
130#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI 0x40013C
131
132#define mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET 0x400140
133
134#define mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET 0x400144
135
136#define mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET 0x400148
137
138#define mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET 0x40014C
139
140#define mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET 0x400150
141
142#define mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET 0x400154
143
144#define mmDMA_QM_0_CP_FENCE0_RDATA 0x400158
145
146#define mmDMA_QM_0_CP_FENCE1_RDATA 0x40015C
147
148#define mmDMA_QM_0_CP_FENCE2_RDATA 0x400160
149
150#define mmDMA_QM_0_CP_FENCE3_RDATA 0x400164
151
152#define mmDMA_QM_0_CP_FENCE0_CNT 0x400168
153
154#define mmDMA_QM_0_CP_FENCE1_CNT 0x40016C
155
156#define mmDMA_QM_0_CP_FENCE2_CNT 0x400170
157
158#define mmDMA_QM_0_CP_FENCE3_CNT 0x400174
159
160#define mmDMA_QM_0_CP_STS 0x400178
161
162#define mmDMA_QM_0_CP_CURRENT_INST_LO 0x40017C
163
164#define mmDMA_QM_0_CP_CURRENT_INST_HI 0x400180
165
166#define mmDMA_QM_0_CP_BARRIER_CFG 0x400184
167
168#define mmDMA_QM_0_CP_DBG_0 0x400188
169
170#define mmDMA_QM_0_PQ_BUF_ADDR 0x400300
171
172#define mmDMA_QM_0_PQ_BUF_RDATA 0x400304
173
174#define mmDMA_QM_0_CQ_BUF_ADDR 0x400308
175
176#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C
177
178#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
new file mode 100644
index 000000000000..da928390f89c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_QM_1_REGS_H_
14#define ASIC_REG_DMA_QM_1_REGS_H_
15
16/*
17 *****************************************
18 * DMA_QM_1 (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmDMA_QM_1_GLBL_CFG0 0x408000
23
24#define mmDMA_QM_1_GLBL_CFG1 0x408004
25
26#define mmDMA_QM_1_GLBL_PROT 0x408008
27
28#define mmDMA_QM_1_GLBL_ERR_CFG 0x40800C
29
30#define mmDMA_QM_1_GLBL_ERR_ADDR_LO 0x408010
31
32#define mmDMA_QM_1_GLBL_ERR_ADDR_HI 0x408014
33
34#define mmDMA_QM_1_GLBL_ERR_WDATA 0x408018
35
36#define mmDMA_QM_1_GLBL_SECURE_PROPS 0x40801C
37
38#define mmDMA_QM_1_GLBL_NON_SECURE_PROPS 0x408020
39
40#define mmDMA_QM_1_GLBL_STS0 0x408024
41
42#define mmDMA_QM_1_GLBL_STS1 0x408028
43
44#define mmDMA_QM_1_PQ_BASE_LO 0x408060
45
46#define mmDMA_QM_1_PQ_BASE_HI 0x408064
47
48#define mmDMA_QM_1_PQ_SIZE 0x408068
49
50#define mmDMA_QM_1_PQ_PI 0x40806C
51
52#define mmDMA_QM_1_PQ_CI 0x408070
53
54#define mmDMA_QM_1_PQ_CFG0 0x408074
55
56#define mmDMA_QM_1_PQ_CFG1 0x408078
57
58#define mmDMA_QM_1_PQ_ARUSER 0x40807C
59
60#define mmDMA_QM_1_PQ_PUSH0 0x408080
61
62#define mmDMA_QM_1_PQ_PUSH1 0x408084
63
64#define mmDMA_QM_1_PQ_PUSH2 0x408088
65
66#define mmDMA_QM_1_PQ_PUSH3 0x40808C
67
68#define mmDMA_QM_1_PQ_STS0 0x408090
69
70#define mmDMA_QM_1_PQ_STS1 0x408094
71
72#define mmDMA_QM_1_PQ_RD_RATE_LIM_EN 0x4080A0
73
74#define mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN 0x4080A4
75
76#define mmDMA_QM_1_PQ_RD_RATE_LIM_SAT 0x4080A8
77
78#define mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT 0x4080AC
79
80#define mmDMA_QM_1_CQ_CFG0 0x4080B0
81
82#define mmDMA_QM_1_CQ_CFG1 0x4080B4
83
84#define mmDMA_QM_1_CQ_ARUSER 0x4080B8
85
86#define mmDMA_QM_1_CQ_PTR_LO 0x4080C0
87
88#define mmDMA_QM_1_CQ_PTR_HI 0x4080C4
89
90#define mmDMA_QM_1_CQ_TSIZE 0x4080C8
91
92#define mmDMA_QM_1_CQ_CTL 0x4080CC
93
94#define mmDMA_QM_1_CQ_PTR_LO_STS 0x4080D4
95
96#define mmDMA_QM_1_CQ_PTR_HI_STS 0x4080D8
97
98#define mmDMA_QM_1_CQ_TSIZE_STS 0x4080DC
99
100#define mmDMA_QM_1_CQ_CTL_STS 0x4080E0
101
102#define mmDMA_QM_1_CQ_STS0 0x4080E4
103
104#define mmDMA_QM_1_CQ_STS1 0x4080E8
105
106#define mmDMA_QM_1_CQ_RD_RATE_LIM_EN 0x4080F0
107
108#define mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN 0x4080F4
109
110#define mmDMA_QM_1_CQ_RD_RATE_LIM_SAT 0x4080F8
111
112#define mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT 0x4080FC
113
114#define mmDMA_QM_1_CQ_IFIFO_CNT 0x408108
115
116#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO 0x408120
117
118#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI 0x408124
119
120#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO 0x408128
121
122#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI 0x40812C
123
124#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO 0x408130
125
126#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI 0x408134
127
128#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO 0x408138
129
130#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI 0x40813C
131
132#define mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET 0x408140
133
134#define mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET 0x408144
135
136#define mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET 0x408148
137
138#define mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET 0x40814C
139
140#define mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET 0x408150
141
142#define mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET 0x408154
143
144#define mmDMA_QM_1_CP_FENCE0_RDATA 0x408158
145
146#define mmDMA_QM_1_CP_FENCE1_RDATA 0x40815C
147
148#define mmDMA_QM_1_CP_FENCE2_RDATA 0x408160
149
150#define mmDMA_QM_1_CP_FENCE3_RDATA 0x408164
151
152#define mmDMA_QM_1_CP_FENCE0_CNT 0x408168
153
154#define mmDMA_QM_1_CP_FENCE1_CNT 0x40816C
155
156#define mmDMA_QM_1_CP_FENCE2_CNT 0x408170
157
158#define mmDMA_QM_1_CP_FENCE3_CNT 0x408174
159
160#define mmDMA_QM_1_CP_STS 0x408178
161
162#define mmDMA_QM_1_CP_CURRENT_INST_LO 0x40817C
163
164#define mmDMA_QM_1_CP_CURRENT_INST_HI 0x408180
165
166#define mmDMA_QM_1_CP_BARRIER_CFG 0x408184
167
168#define mmDMA_QM_1_CP_DBG_0 0x408188
169
170#define mmDMA_QM_1_PQ_BUF_ADDR 0x408300
171
172#define mmDMA_QM_1_PQ_BUF_RDATA 0x408304
173
174#define mmDMA_QM_1_CQ_BUF_ADDR 0x408308
175
176#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C
177
178#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
new file mode 100644
index 000000000000..b4f06e9b71d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_QM_2_REGS_H_
14#define ASIC_REG_DMA_QM_2_REGS_H_
15
16/*
17 *****************************************
18 * DMA_QM_2 (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmDMA_QM_2_GLBL_CFG0 0x410000
23
24#define mmDMA_QM_2_GLBL_CFG1 0x410004
25
26#define mmDMA_QM_2_GLBL_PROT 0x410008
27
28#define mmDMA_QM_2_GLBL_ERR_CFG 0x41000C
29
30#define mmDMA_QM_2_GLBL_ERR_ADDR_LO 0x410010
31
32#define mmDMA_QM_2_GLBL_ERR_ADDR_HI 0x410014
33
34#define mmDMA_QM_2_GLBL_ERR_WDATA 0x410018
35
36#define mmDMA_QM_2_GLBL_SECURE_PROPS 0x41001C
37
38#define mmDMA_QM_2_GLBL_NON_SECURE_PROPS 0x410020
39
40#define mmDMA_QM_2_GLBL_STS0 0x410024
41
42#define mmDMA_QM_2_GLBL_STS1 0x410028
43
44#define mmDMA_QM_2_PQ_BASE_LO 0x410060
45
46#define mmDMA_QM_2_PQ_BASE_HI 0x410064
47
48#define mmDMA_QM_2_PQ_SIZE 0x410068
49
50#define mmDMA_QM_2_PQ_PI 0x41006C
51
52#define mmDMA_QM_2_PQ_CI 0x410070
53
54#define mmDMA_QM_2_PQ_CFG0 0x410074
55
56#define mmDMA_QM_2_PQ_CFG1 0x410078
57
58#define mmDMA_QM_2_PQ_ARUSER 0x41007C
59
60#define mmDMA_QM_2_PQ_PUSH0 0x410080
61
62#define mmDMA_QM_2_PQ_PUSH1 0x410084
63
64#define mmDMA_QM_2_PQ_PUSH2 0x410088
65
66#define mmDMA_QM_2_PQ_PUSH3 0x41008C
67
68#define mmDMA_QM_2_PQ_STS0 0x410090
69
70#define mmDMA_QM_2_PQ_STS1 0x410094
71
72#define mmDMA_QM_2_PQ_RD_RATE_LIM_EN 0x4100A0
73
74#define mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN 0x4100A4
75
76#define mmDMA_QM_2_PQ_RD_RATE_LIM_SAT 0x4100A8
77
78#define mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT 0x4100AC
79
80#define mmDMA_QM_2_CQ_CFG0 0x4100B0
81
82#define mmDMA_QM_2_CQ_CFG1 0x4100B4
83
84#define mmDMA_QM_2_CQ_ARUSER 0x4100B8
85
86#define mmDMA_QM_2_CQ_PTR_LO 0x4100C0
87
88#define mmDMA_QM_2_CQ_PTR_HI 0x4100C4
89
90#define mmDMA_QM_2_CQ_TSIZE 0x4100C8
91
92#define mmDMA_QM_2_CQ_CTL 0x4100CC
93
94#define mmDMA_QM_2_CQ_PTR_LO_STS 0x4100D4
95
96#define mmDMA_QM_2_CQ_PTR_HI_STS 0x4100D8
97
98#define mmDMA_QM_2_CQ_TSIZE_STS 0x4100DC
99
100#define mmDMA_QM_2_CQ_CTL_STS 0x4100E0
101
102#define mmDMA_QM_2_CQ_STS0 0x4100E4
103
104#define mmDMA_QM_2_CQ_STS1 0x4100E8
105
106#define mmDMA_QM_2_CQ_RD_RATE_LIM_EN 0x4100F0
107
108#define mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN 0x4100F4
109
110#define mmDMA_QM_2_CQ_RD_RATE_LIM_SAT 0x4100F8
111
112#define mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT 0x4100FC
113
114#define mmDMA_QM_2_CQ_IFIFO_CNT 0x410108
115
116#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO 0x410120
117
118#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI 0x410124
119
120#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO 0x410128
121
122#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI 0x41012C
123
124#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO 0x410130
125
126#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI 0x410134
127
128#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO 0x410138
129
130#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI 0x41013C
131
132#define mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET 0x410140
133
134#define mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET 0x410144
135
136#define mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET 0x410148
137
138#define mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET 0x41014C
139
140#define mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET 0x410150
141
142#define mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET 0x410154
143
144#define mmDMA_QM_2_CP_FENCE0_RDATA 0x410158
145
146#define mmDMA_QM_2_CP_FENCE1_RDATA 0x41015C
147
148#define mmDMA_QM_2_CP_FENCE2_RDATA 0x410160
149
150#define mmDMA_QM_2_CP_FENCE3_RDATA 0x410164
151
152#define mmDMA_QM_2_CP_FENCE0_CNT 0x410168
153
154#define mmDMA_QM_2_CP_FENCE1_CNT 0x41016C
155
156#define mmDMA_QM_2_CP_FENCE2_CNT 0x410170
157
158#define mmDMA_QM_2_CP_FENCE3_CNT 0x410174
159
160#define mmDMA_QM_2_CP_STS 0x410178
161
162#define mmDMA_QM_2_CP_CURRENT_INST_LO 0x41017C
163
164#define mmDMA_QM_2_CP_CURRENT_INST_HI 0x410180
165
166#define mmDMA_QM_2_CP_BARRIER_CFG 0x410184
167
168#define mmDMA_QM_2_CP_DBG_0 0x410188
169
170#define mmDMA_QM_2_PQ_BUF_ADDR 0x410300
171
172#define mmDMA_QM_2_PQ_BUF_RDATA 0x410304
173
174#define mmDMA_QM_2_CQ_BUF_ADDR 0x410308
175
176#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C
177
178#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
new file mode 100644
index 000000000000..53e3cd78a06b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_QM_3_REGS_H_
14#define ASIC_REG_DMA_QM_3_REGS_H_
15
16/*
17 *****************************************
18 * DMA_QM_3 (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmDMA_QM_3_GLBL_CFG0 0x418000
23
24#define mmDMA_QM_3_GLBL_CFG1 0x418004
25
26#define mmDMA_QM_3_GLBL_PROT 0x418008
27
28#define mmDMA_QM_3_GLBL_ERR_CFG 0x41800C
29
30#define mmDMA_QM_3_GLBL_ERR_ADDR_LO 0x418010
31
32#define mmDMA_QM_3_GLBL_ERR_ADDR_HI 0x418014
33
34#define mmDMA_QM_3_GLBL_ERR_WDATA 0x418018
35
36#define mmDMA_QM_3_GLBL_SECURE_PROPS 0x41801C
37
38#define mmDMA_QM_3_GLBL_NON_SECURE_PROPS 0x418020
39
40#define mmDMA_QM_3_GLBL_STS0 0x418024
41
42#define mmDMA_QM_3_GLBL_STS1 0x418028
43
44#define mmDMA_QM_3_PQ_BASE_LO 0x418060
45
46#define mmDMA_QM_3_PQ_BASE_HI 0x418064
47
48#define mmDMA_QM_3_PQ_SIZE 0x418068
49
50#define mmDMA_QM_3_PQ_PI 0x41806C
51
52#define mmDMA_QM_3_PQ_CI 0x418070
53
54#define mmDMA_QM_3_PQ_CFG0 0x418074
55
56#define mmDMA_QM_3_PQ_CFG1 0x418078
57
58#define mmDMA_QM_3_PQ_ARUSER 0x41807C
59
60#define mmDMA_QM_3_PQ_PUSH0 0x418080
61
62#define mmDMA_QM_3_PQ_PUSH1 0x418084
63
64#define mmDMA_QM_3_PQ_PUSH2 0x418088
65
66#define mmDMA_QM_3_PQ_PUSH3 0x41808C
67
68#define mmDMA_QM_3_PQ_STS0 0x418090
69
70#define mmDMA_QM_3_PQ_STS1 0x418094
71
72#define mmDMA_QM_3_PQ_RD_RATE_LIM_EN 0x4180A0
73
74#define mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN 0x4180A4
75
76#define mmDMA_QM_3_PQ_RD_RATE_LIM_SAT 0x4180A8
77
78#define mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT 0x4180AC
79
80#define mmDMA_QM_3_CQ_CFG0 0x4180B0
81
82#define mmDMA_QM_3_CQ_CFG1 0x4180B4
83
84#define mmDMA_QM_3_CQ_ARUSER 0x4180B8
85
86#define mmDMA_QM_3_CQ_PTR_LO 0x4180C0
87
88#define mmDMA_QM_3_CQ_PTR_HI 0x4180C4
89
90#define mmDMA_QM_3_CQ_TSIZE 0x4180C8
91
92#define mmDMA_QM_3_CQ_CTL 0x4180CC
93
94#define mmDMA_QM_3_CQ_PTR_LO_STS 0x4180D4
95
96#define mmDMA_QM_3_CQ_PTR_HI_STS 0x4180D8
97
98#define mmDMA_QM_3_CQ_TSIZE_STS 0x4180DC
99
100#define mmDMA_QM_3_CQ_CTL_STS 0x4180E0
101
102#define mmDMA_QM_3_CQ_STS0 0x4180E4
103
104#define mmDMA_QM_3_CQ_STS1 0x4180E8
105
106#define mmDMA_QM_3_CQ_RD_RATE_LIM_EN 0x4180F0
107
108#define mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN 0x4180F4
109
110#define mmDMA_QM_3_CQ_RD_RATE_LIM_SAT 0x4180F8
111
112#define mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT 0x4180FC
113
114#define mmDMA_QM_3_CQ_IFIFO_CNT 0x418108
115
116#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO 0x418120
117
118#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI 0x418124
119
120#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO 0x418128
121
122#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI 0x41812C
123
124#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO 0x418130
125
126#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI 0x418134
127
128#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO 0x418138
129
130#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI 0x41813C
131
132#define mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET 0x418140
133
134#define mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET 0x418144
135
136#define mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET 0x418148
137
138#define mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET 0x41814C
139
140#define mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET 0x418150
141
142#define mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET 0x418154
143
144#define mmDMA_QM_3_CP_FENCE0_RDATA 0x418158
145
146#define mmDMA_QM_3_CP_FENCE1_RDATA 0x41815C
147
148#define mmDMA_QM_3_CP_FENCE2_RDATA 0x418160
149
150#define mmDMA_QM_3_CP_FENCE3_RDATA 0x418164
151
152#define mmDMA_QM_3_CP_FENCE0_CNT 0x418168
153
154#define mmDMA_QM_3_CP_FENCE1_CNT 0x41816C
155
156#define mmDMA_QM_3_CP_FENCE2_CNT 0x418170
157
158#define mmDMA_QM_3_CP_FENCE3_CNT 0x418174
159
160#define mmDMA_QM_3_CP_STS 0x418178
161
162#define mmDMA_QM_3_CP_CURRENT_INST_LO 0x41817C
163
164#define mmDMA_QM_3_CP_CURRENT_INST_HI 0x418180
165
166#define mmDMA_QM_3_CP_BARRIER_CFG 0x418184
167
168#define mmDMA_QM_3_CP_DBG_0 0x418188
169
170#define mmDMA_QM_3_PQ_BUF_ADDR 0x418300
171
172#define mmDMA_QM_3_PQ_BUF_RDATA 0x418304
173
174#define mmDMA_QM_3_CQ_BUF_ADDR 0x418308
175
176#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C
177
178#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
new file mode 100644
index 000000000000..e0eb5f260201
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_DMA_QM_4_REGS_H_
14#define ASIC_REG_DMA_QM_4_REGS_H_
15
16/*
17 *****************************************
18 * DMA_QM_4 (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmDMA_QM_4_GLBL_CFG0 0x420000
23
24#define mmDMA_QM_4_GLBL_CFG1 0x420004
25
26#define mmDMA_QM_4_GLBL_PROT 0x420008
27
28#define mmDMA_QM_4_GLBL_ERR_CFG 0x42000C
29
30#define mmDMA_QM_4_GLBL_ERR_ADDR_LO 0x420010
31
32#define mmDMA_QM_4_GLBL_ERR_ADDR_HI 0x420014
33
34#define mmDMA_QM_4_GLBL_ERR_WDATA 0x420018
35
36#define mmDMA_QM_4_GLBL_SECURE_PROPS 0x42001C
37
38#define mmDMA_QM_4_GLBL_NON_SECURE_PROPS 0x420020
39
40#define mmDMA_QM_4_GLBL_STS0 0x420024
41
42#define mmDMA_QM_4_GLBL_STS1 0x420028
43
44#define mmDMA_QM_4_PQ_BASE_LO 0x420060
45
46#define mmDMA_QM_4_PQ_BASE_HI 0x420064
47
48#define mmDMA_QM_4_PQ_SIZE 0x420068
49
50#define mmDMA_QM_4_PQ_PI 0x42006C
51
52#define mmDMA_QM_4_PQ_CI 0x420070
53
54#define mmDMA_QM_4_PQ_CFG0 0x420074
55
56#define mmDMA_QM_4_PQ_CFG1 0x420078
57
58#define mmDMA_QM_4_PQ_ARUSER 0x42007C
59
60#define mmDMA_QM_4_PQ_PUSH0 0x420080
61
62#define mmDMA_QM_4_PQ_PUSH1 0x420084
63
64#define mmDMA_QM_4_PQ_PUSH2 0x420088
65
66#define mmDMA_QM_4_PQ_PUSH3 0x42008C
67
68#define mmDMA_QM_4_PQ_STS0 0x420090
69
70#define mmDMA_QM_4_PQ_STS1 0x420094
71
72#define mmDMA_QM_4_PQ_RD_RATE_LIM_EN 0x4200A0
73
74#define mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN 0x4200A4
75
76#define mmDMA_QM_4_PQ_RD_RATE_LIM_SAT 0x4200A8
77
78#define mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT 0x4200AC
79
80#define mmDMA_QM_4_CQ_CFG0 0x4200B0
81
82#define mmDMA_QM_4_CQ_CFG1 0x4200B4
83
84#define mmDMA_QM_4_CQ_ARUSER 0x4200B8
85
86#define mmDMA_QM_4_CQ_PTR_LO 0x4200C0
87
88#define mmDMA_QM_4_CQ_PTR_HI 0x4200C4
89
90#define mmDMA_QM_4_CQ_TSIZE 0x4200C8
91
92#define mmDMA_QM_4_CQ_CTL 0x4200CC
93
94#define mmDMA_QM_4_CQ_PTR_LO_STS 0x4200D4
95
96#define mmDMA_QM_4_CQ_PTR_HI_STS 0x4200D8
97
98#define mmDMA_QM_4_CQ_TSIZE_STS 0x4200DC
99
100#define mmDMA_QM_4_CQ_CTL_STS 0x4200E0
101
102#define mmDMA_QM_4_CQ_STS0 0x4200E4
103
104#define mmDMA_QM_4_CQ_STS1 0x4200E8
105
106#define mmDMA_QM_4_CQ_RD_RATE_LIM_EN 0x4200F0
107
108#define mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN 0x4200F4
109
110#define mmDMA_QM_4_CQ_RD_RATE_LIM_SAT 0x4200F8
111
112#define mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT 0x4200FC
113
114#define mmDMA_QM_4_CQ_IFIFO_CNT 0x420108
115
116#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO 0x420120
117
118#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI 0x420124
119
120#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO 0x420128
121
122#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI 0x42012C
123
124#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO 0x420130
125
126#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI 0x420134
127
128#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO 0x420138
129
130#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI 0x42013C
131
132#define mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET 0x420140
133
134#define mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET 0x420144
135
136#define mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET 0x420148
137
138#define mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET 0x42014C
139
140#define mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET 0x420150
141
142#define mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET 0x420154
143
144#define mmDMA_QM_4_CP_FENCE0_RDATA 0x420158
145
146#define mmDMA_QM_4_CP_FENCE1_RDATA 0x42015C
147
148#define mmDMA_QM_4_CP_FENCE2_RDATA 0x420160
149
150#define mmDMA_QM_4_CP_FENCE3_RDATA 0x420164
151
152#define mmDMA_QM_4_CP_FENCE0_CNT 0x420168
153
154#define mmDMA_QM_4_CP_FENCE1_CNT 0x42016C
155
156#define mmDMA_QM_4_CP_FENCE2_CNT 0x420170
157
158#define mmDMA_QM_4_CP_FENCE3_CNT 0x420174
159
160#define mmDMA_QM_4_CP_STS 0x420178
161
162#define mmDMA_QM_4_CP_CURRENT_INST_LO 0x42017C
163
164#define mmDMA_QM_4_CP_CURRENT_INST_HI 0x420180
165
166#define mmDMA_QM_4_CP_BARRIER_CFG 0x420184
167
168#define mmDMA_QM_4_CP_DBG_0 0x420188
169
170#define mmDMA_QM_4_PQ_BUF_ADDR 0x420300
171
172#define mmDMA_QM_4_PQ_BUF_RDATA 0x420304
173
174#define mmDMA_QM_4_CQ_BUF_ADDR 0x420308
175
176#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C
177
178#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
new file mode 100644
index 000000000000..85b15010cd7a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
@@ -0,0 +1,1372 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef GOYA_BLOCKS_H_
14#define GOYA_BLOCKS_H_
15
16#define mmPCI_NRTR_BASE 0x7FFC000000ull
17#define PCI_NRTR_MAX_OFFSET 0x608
18#define PCI_NRTR_SECTION 0x4000
19#define mmPCI_RD_REGULATOR_BASE 0x7FFC004000ull
20#define PCI_RD_REGULATOR_MAX_OFFSET 0x74
21#define PCI_RD_REGULATOR_SECTION 0x1000
22#define mmPCI_WR_REGULATOR_BASE 0x7FFC005000ull
23#define PCI_WR_REGULATOR_MAX_OFFSET 0x74
24#define PCI_WR_REGULATOR_SECTION 0x3B000
25#define mmMME1_RTR_BASE 0x7FFC040000ull
26#define MME1_RTR_MAX_OFFSET 0x608
27#define MME1_RTR_SECTION 0x4000
28#define mmMME1_RD_REGULATOR_BASE 0x7FFC044000ull
29#define MME1_RD_REGULATOR_MAX_OFFSET 0x74
30#define MME1_RD_REGULATOR_SECTION 0x1000
31#define mmMME1_WR_REGULATOR_BASE 0x7FFC045000ull
32#define MME1_WR_REGULATOR_MAX_OFFSET 0x74
33#define MME1_WR_REGULATOR_SECTION 0x3B000
34#define mmMME2_RTR_BASE 0x7FFC080000ull
35#define MME2_RTR_MAX_OFFSET 0x608
36#define MME2_RTR_SECTION 0x4000
37#define mmMME2_RD_REGULATOR_BASE 0x7FFC084000ull
38#define MME2_RD_REGULATOR_MAX_OFFSET 0x74
39#define MME2_RD_REGULATOR_SECTION 0x1000
40#define mmMME2_WR_REGULATOR_BASE 0x7FFC085000ull
41#define MME2_WR_REGULATOR_MAX_OFFSET 0x74
42#define MME2_WR_REGULATOR_SECTION 0x3B000
43#define mmMME3_RTR_BASE 0x7FFC0C0000ull
44#define MME3_RTR_MAX_OFFSET 0x608
45#define MME3_RTR_SECTION 0x4000
46#define mmMME3_RD_REGULATOR_BASE 0x7FFC0C4000ull
47#define MME3_RD_REGULATOR_MAX_OFFSET 0x74
48#define MME3_RD_REGULATOR_SECTION 0x1000
49#define mmMME3_WR_REGULATOR_BASE 0x7FFC0C5000ull
50#define MME3_WR_REGULATOR_MAX_OFFSET 0x74
51#define MME3_WR_REGULATOR_SECTION 0xB000
52#define mmMME_BASE 0x7FFC0D0000ull
53#define MME_MAX_OFFSET 0xBB0
54#define MME_SECTION 0x8000
55#define mmMME_QM_BASE 0x7FFC0D8000ull
56#define MME_QM_MAX_OFFSET 0x310
57#define MME_QM_SECTION 0x1000
58#define mmMME_CMDQ_BASE 0x7FFC0D9000ull
59#define MME_CMDQ_MAX_OFFSET 0x310
60#define MME_CMDQ_SECTION 0x1000
61#define mmACC_MS_ECC_MEM_0_BASE 0x7FFC0DA000ull
62#define ACC_MS_ECC_MEM_0_MAX_OFFSET 0x0
63#define ACC_MS_ECC_MEM_0_SECTION 0x1000
64#define mmACC_MS_ECC_MEM_1_BASE 0x7FFC0DB000ull
65#define ACC_MS_ECC_MEM_1_MAX_OFFSET 0x0
66#define ACC_MS_ECC_MEM_1_SECTION 0x1000
67#define mmACC_MS_ECC_MEM_2_BASE 0x7FFC0DC000ull
68#define ACC_MS_ECC_MEM_2_MAX_OFFSET 0x0
69#define ACC_MS_ECC_MEM_2_SECTION 0x1000
70#define mmACC_MS_ECC_MEM_3_BASE 0x7FFC0DD000ull
71#define ACC_MS_ECC_MEM_3_MAX_OFFSET 0x0
72#define ACC_MS_ECC_MEM_3_SECTION 0x1000
73#define mmSBA_ECC_MEM_BASE 0x7FFC0DE000ull
74#define SBA_ECC_MEM_MAX_OFFSET 0x0
75#define SBA_ECC_MEM_SECTION 0x1000
76#define mmSBB_ECC_MEM_BASE 0x7FFC0DF000ull
77#define SBB_ECC_MEM_MAX_OFFSET 0x0
78#define SBB_ECC_MEM_SECTION 0x21000
79#define mmMME4_RTR_BASE 0x7FFC100000ull
80#define MME4_RTR_MAX_OFFSET 0x608
81#define MME4_RTR_SECTION 0x4000
82#define mmMME4_RD_REGULATOR_BASE 0x7FFC104000ull
83#define MME4_RD_REGULATOR_MAX_OFFSET 0x74
84#define MME4_RD_REGULATOR_SECTION 0x1000
85#define mmMME4_WR_REGULATOR_BASE 0x7FFC105000ull
86#define MME4_WR_REGULATOR_MAX_OFFSET 0x74
87#define MME4_WR_REGULATOR_SECTION 0xB000
88#define mmSYNC_MNGR_BASE 0x7FFC110000ull
89#define SYNC_MNGR_MAX_OFFSET 0x4400
90#define SYNC_MNGR_SECTION 0x30000
91#define mmMME5_RTR_BASE 0x7FFC140000ull
92#define MME5_RTR_MAX_OFFSET 0x608
93#define MME5_RTR_SECTION 0x4000
94#define mmMME5_RD_REGULATOR_BASE 0x7FFC144000ull
95#define MME5_RD_REGULATOR_MAX_OFFSET 0x74
96#define MME5_RD_REGULATOR_SECTION 0x1000
97#define mmMME5_WR_REGULATOR_BASE 0x7FFC145000ull
98#define MME5_WR_REGULATOR_MAX_OFFSET 0x74
99#define MME5_WR_REGULATOR_SECTION 0x3B000
100#define mmMME6_RTR_BASE 0x7FFC180000ull
101#define MME6_RTR_MAX_OFFSET 0x608
102#define MME6_RTR_SECTION 0x4000
103#define mmMME6_RD_REGULATOR_BASE 0x7FFC184000ull
104#define MME6_RD_REGULATOR_MAX_OFFSET 0x74
105#define MME6_RD_REGULATOR_SECTION 0x1000
106#define mmMME6_WR_REGULATOR_BASE 0x7FFC185000ull
107#define MME6_WR_REGULATOR_MAX_OFFSET 0x74
108#define MME6_WR_REGULATOR_SECTION 0x3B000
109#define mmDMA_NRTR_BASE 0x7FFC1C0000ull
110#define DMA_NRTR_MAX_OFFSET 0x608
111#define DMA_NRTR_SECTION 0x4000
112#define mmDMA_RD_REGULATOR_BASE 0x7FFC1C4000ull
113#define DMA_RD_REGULATOR_MAX_OFFSET 0x74
114#define DMA_RD_REGULATOR_SECTION 0x1000
115#define mmDMA_WR_REGULATOR_BASE 0x7FFC1C5000ull
116#define DMA_WR_REGULATOR_MAX_OFFSET 0x74
117#define DMA_WR_REGULATOR_SECTION 0x3B000
118#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull
119#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4
120#define SRAM_Y0_X0_BANK_SECTION 0x1000
121#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull
122#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x334
123#define SRAM_Y0_X0_RTR_SECTION 0x3000
124#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC204000ull
125#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4
126#define SRAM_Y0_X1_BANK_SECTION 0x1000
127#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC205000ull
128#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x334
129#define SRAM_Y0_X1_RTR_SECTION 0x3000
130#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC208000ull
131#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4
132#define SRAM_Y0_X2_BANK_SECTION 0x1000
133#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC209000ull
134#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x334
135#define SRAM_Y0_X2_RTR_SECTION 0x3000
136#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC20C000ull
137#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4
138#define SRAM_Y0_X3_BANK_SECTION 0x1000
139#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC20D000ull
140#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x334
141#define SRAM_Y0_X3_RTR_SECTION 0x3000
142#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC210000ull
143#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4
144#define SRAM_Y0_X4_BANK_SECTION 0x1000
145#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC211000ull
146#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x334
147#define SRAM_Y0_X4_RTR_SECTION 0xF000
148#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC220000ull
149#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4
150#define SRAM_Y1_X0_BANK_SECTION 0x1000
151#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC221000ull
152#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x334
153#define SRAM_Y1_X0_RTR_SECTION 0x3000
154#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC224000ull
155#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4
156#define SRAM_Y1_X1_BANK_SECTION 0x1000
157#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC225000ull
158#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x334
159#define SRAM_Y1_X1_RTR_SECTION 0x3000
160#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC228000ull
161#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4
162#define SRAM_Y1_X2_BANK_SECTION 0x1000
163#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC229000ull
164#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x334
165#define SRAM_Y1_X2_RTR_SECTION 0x3000
166#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC22C000ull
167#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4
168#define SRAM_Y1_X3_BANK_SECTION 0x1000
169#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC22D000ull
170#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x334
171#define SRAM_Y1_X3_RTR_SECTION 0x3000
172#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC230000ull
173#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4
174#define SRAM_Y1_X4_BANK_SECTION 0x1000
175#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC231000ull
176#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x334
177#define SRAM_Y1_X4_RTR_SECTION 0xF000
178#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC240000ull
179#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4
180#define SRAM_Y2_X0_BANK_SECTION 0x1000
181#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC241000ull
182#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x334
183#define SRAM_Y2_X0_RTR_SECTION 0x3000
184#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC244000ull
185#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4
186#define SRAM_Y2_X1_BANK_SECTION 0x1000
187#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC245000ull
188#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x334
189#define SRAM_Y2_X1_RTR_SECTION 0x3000
190#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC248000ull
191#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4
192#define SRAM_Y2_X2_BANK_SECTION 0x1000
193#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC249000ull
194#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x334
195#define SRAM_Y2_X2_RTR_SECTION 0x3000
196#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC24C000ull
197#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4
198#define SRAM_Y2_X3_BANK_SECTION 0x1000
199#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC24D000ull
200#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x334
201#define SRAM_Y2_X3_RTR_SECTION 0x3000
202#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC250000ull
203#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4
204#define SRAM_Y2_X4_BANK_SECTION 0x1000
205#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC251000ull
206#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x334
207#define SRAM_Y2_X4_RTR_SECTION 0xF000
208#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC260000ull
209#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4
210#define SRAM_Y3_X0_BANK_SECTION 0x1000
211#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC261000ull
212#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x334
213#define SRAM_Y3_X0_RTR_SECTION 0x3000
214#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC264000ull
215#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4
216#define SRAM_Y3_X1_BANK_SECTION 0x1000
217#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC265000ull
218#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x334
219#define SRAM_Y3_X1_RTR_SECTION 0x3000
220#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC268000ull
221#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4
222#define SRAM_Y3_X2_BANK_SECTION 0x1000
223#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC269000ull
224#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x334
225#define SRAM_Y3_X2_RTR_SECTION 0x3000
226#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC26C000ull
227#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4
228#define SRAM_Y3_X3_BANK_SECTION 0x1000
229#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC26D000ull
230#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x334
231#define SRAM_Y3_X3_RTR_SECTION 0x3000
232#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC270000ull
233#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4
234#define SRAM_Y3_X4_BANK_SECTION 0x1000
235#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC271000ull
236#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x334
237#define SRAM_Y3_X4_RTR_SECTION 0xF000
238#define mmSRAM_Y4_X0_BANK_BASE 0x7FFC280000ull
239#define SRAM_Y4_X0_BANK_MAX_OFFSET 0x4
240#define SRAM_Y4_X0_BANK_SECTION 0x1000
241#define mmSRAM_Y4_X0_RTR_BASE 0x7FFC281000ull
242#define SRAM_Y4_X0_RTR_MAX_OFFSET 0x334
243#define SRAM_Y4_X0_RTR_SECTION 0x3000
244#define mmSRAM_Y4_X1_BANK_BASE 0x7FFC284000ull
245#define SRAM_Y4_X1_BANK_MAX_OFFSET 0x4
246#define SRAM_Y4_X1_BANK_SECTION 0x1000
247#define mmSRAM_Y4_X1_RTR_BASE 0x7FFC285000ull
248#define SRAM_Y4_X1_RTR_MAX_OFFSET 0x334
249#define SRAM_Y4_X1_RTR_SECTION 0x3000
250#define mmSRAM_Y4_X2_BANK_BASE 0x7FFC288000ull
251#define SRAM_Y4_X2_BANK_MAX_OFFSET 0x4
252#define SRAM_Y4_X2_BANK_SECTION 0x1000
253#define mmSRAM_Y4_X2_RTR_BASE 0x7FFC289000ull
254#define SRAM_Y4_X2_RTR_MAX_OFFSET 0x334
255#define SRAM_Y4_X2_RTR_SECTION 0x3000
256#define mmSRAM_Y4_X3_BANK_BASE 0x7FFC28C000ull
257#define SRAM_Y4_X3_BANK_MAX_OFFSET 0x4
258#define SRAM_Y4_X3_BANK_SECTION 0x1000
259#define mmSRAM_Y4_X3_RTR_BASE 0x7FFC28D000ull
260#define SRAM_Y4_X3_RTR_MAX_OFFSET 0x334
261#define SRAM_Y4_X3_RTR_SECTION 0x3000
262#define mmSRAM_Y4_X4_BANK_BASE 0x7FFC290000ull
263#define SRAM_Y4_X4_BANK_MAX_OFFSET 0x4
264#define SRAM_Y4_X4_BANK_SECTION 0x1000
265#define mmSRAM_Y4_X4_RTR_BASE 0x7FFC291000ull
266#define SRAM_Y4_X4_RTR_MAX_OFFSET 0x334
267#define SRAM_Y4_X4_RTR_SECTION 0xF000
268#define mmSRAM_Y5_X0_BANK_BASE 0x7FFC2A0000ull
269#define SRAM_Y5_X0_BANK_MAX_OFFSET 0x4
270#define SRAM_Y5_X0_BANK_SECTION 0x1000
271#define mmSRAM_Y5_X0_RTR_BASE 0x7FFC2A1000ull
272#define SRAM_Y5_X0_RTR_MAX_OFFSET 0x334
273#define SRAM_Y5_X0_RTR_SECTION 0x3000
274#define mmSRAM_Y5_X1_BANK_BASE 0x7FFC2A4000ull
275#define SRAM_Y5_X1_BANK_MAX_OFFSET 0x4
276#define SRAM_Y5_X1_BANK_SECTION 0x1000
277#define mmSRAM_Y5_X1_RTR_BASE 0x7FFC2A5000ull
278#define SRAM_Y5_X1_RTR_MAX_OFFSET 0x334
279#define SRAM_Y5_X1_RTR_SECTION 0x3000
280#define mmSRAM_Y5_X2_BANK_BASE 0x7FFC2A8000ull
281#define SRAM_Y5_X2_BANK_MAX_OFFSET 0x4
282#define SRAM_Y5_X2_BANK_SECTION 0x1000
283#define mmSRAM_Y5_X2_RTR_BASE 0x7FFC2A9000ull
284#define SRAM_Y5_X2_RTR_MAX_OFFSET 0x334
285#define SRAM_Y5_X2_RTR_SECTION 0x3000
286#define mmSRAM_Y5_X3_BANK_BASE 0x7FFC2AC000ull
287#define SRAM_Y5_X3_BANK_MAX_OFFSET 0x4
288#define SRAM_Y5_X3_BANK_SECTION 0x1000
289#define mmSRAM_Y5_X3_RTR_BASE 0x7FFC2AD000ull
290#define SRAM_Y5_X3_RTR_MAX_OFFSET 0x334
291#define SRAM_Y5_X3_RTR_SECTION 0x3000
292#define mmSRAM_Y5_X4_BANK_BASE 0x7FFC2B0000ull
293#define SRAM_Y5_X4_BANK_MAX_OFFSET 0x4
294#define SRAM_Y5_X4_BANK_SECTION 0x1000
295#define mmSRAM_Y5_X4_RTR_BASE 0x7FFC2B1000ull
296#define SRAM_Y5_X4_RTR_MAX_OFFSET 0x334
297#define SRAM_Y5_X4_RTR_SECTION 0x14F000
298#define mmDMA_QM_0_BASE 0x7FFC400000ull
299#define DMA_QM_0_MAX_OFFSET 0x310
300#define DMA_QM_0_SECTION 0x1000
301#define mmDMA_CH_0_BASE 0x7FFC401000ull
302#define DMA_CH_0_MAX_OFFSET 0x200
303#define DMA_CH_0_SECTION 0x7000
304#define mmDMA_QM_1_BASE 0x7FFC408000ull
305#define DMA_QM_1_MAX_OFFSET 0x310
306#define DMA_QM_1_SECTION 0x1000
307#define mmDMA_CH_1_BASE 0x7FFC409000ull
308#define DMA_CH_1_MAX_OFFSET 0x200
309#define DMA_CH_1_SECTION 0x7000
310#define mmDMA_QM_2_BASE 0x7FFC410000ull
311#define DMA_QM_2_MAX_OFFSET 0x310
312#define DMA_QM_2_SECTION 0x1000
313#define mmDMA_CH_2_BASE 0x7FFC411000ull
314#define DMA_CH_2_MAX_OFFSET 0x200
315#define DMA_CH_2_SECTION 0x7000
316#define mmDMA_QM_3_BASE 0x7FFC418000ull
317#define DMA_QM_3_MAX_OFFSET 0x310
318#define DMA_QM_3_SECTION 0x1000
319#define mmDMA_CH_3_BASE 0x7FFC419000ull
320#define DMA_CH_3_MAX_OFFSET 0x200
321#define DMA_CH_3_SECTION 0x7000
322#define mmDMA_QM_4_BASE 0x7FFC420000ull
323#define DMA_QM_4_MAX_OFFSET 0x310
324#define DMA_QM_4_SECTION 0x1000
325#define mmDMA_CH_4_BASE 0x7FFC421000ull
326#define DMA_CH_4_MAX_OFFSET 0x200
327#define DMA_CH_4_SECTION 0x20000
328#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull
329#define CPU_CA53_CFG_MAX_OFFSET 0x218
330#define CPU_CA53_CFG_SECTION 0x1000
331#define mmCPU_IF_BASE 0x7FFC442000ull
332#define CPU_IF_MAX_OFFSET 0x134
333#define CPU_IF_SECTION 0x2000
334#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull
335#define CPU_TIMESTAMP_MAX_OFFSET 0x1000
336#define CPU_TIMESTAMP_SECTION 0x3C000
337#define mmMMU_BASE 0x7FFC480000ull
338#define MMU_MAX_OFFSET 0x44
339#define MMU_SECTION 0x10000
340#define mmSTLB_BASE 0x7FFC490000ull
341#define STLB_MAX_OFFSET 0x50
342#define STLB_SECTION 0x10000
343#define mmNORTH_THERMAL_SENSOR_BASE 0x7FFC4A0000ull
344#define NORTH_THERMAL_SENSOR_MAX_OFFSET 0xE64
345#define NORTH_THERMAL_SENSOR_SECTION 0x1000
346#define mmMC_PLL_BASE 0x7FFC4A1000ull
347#define MC_PLL_MAX_OFFSET 0x444
348#define MC_PLL_SECTION 0x1000
349#define mmCPU_PLL_BASE 0x7FFC4A2000ull
350#define CPU_PLL_MAX_OFFSET 0x444
351#define CPU_PLL_SECTION 0x1000
352#define mmIC_PLL_BASE 0x7FFC4A3000ull
353#define IC_PLL_MAX_OFFSET 0x444
354#define IC_PLL_SECTION 0x1000
355#define mmDMA_PROCESS_MON_BASE 0x7FFC4A4000ull
356#define DMA_PROCESS_MON_MAX_OFFSET 0x4
357#define DMA_PROCESS_MON_SECTION 0xC000
358#define mmDMA_MACRO_BASE 0x7FFC4B0000ull
359#define DMA_MACRO_MAX_OFFSET 0x15C
360#define DMA_MACRO_SECTION 0x150000
361#define mmDDR_PHY_CH0_BASE 0x7FFC600000ull
362#define DDR_PHY_CH0_MAX_OFFSET 0x0
363#define DDR_PHY_CH0_SECTION 0x40000
364#define mmDDR_MC_CH0_BASE 0x7FFC640000ull
365#define DDR_MC_CH0_MAX_OFFSET 0xF34
366#define DDR_MC_CH0_SECTION 0x8000
367#define mmDDR_MISC_CH0_BASE 0x7FFC648000ull
368#define DDR_MISC_CH0_MAX_OFFSET 0x204
369#define DDR_MISC_CH0_SECTION 0xB8000
370#define mmDDR_PHY_CH1_BASE 0x7FFC700000ull
371#define DDR_PHY_CH1_MAX_OFFSET 0x0
372#define DDR_PHY_CH1_SECTION 0x40000
373#define mmDDR_MC_CH1_BASE 0x7FFC740000ull
374#define DDR_MC_CH1_MAX_OFFSET 0xF34
375#define DDR_MC_CH1_SECTION 0x8000
376#define mmDDR_MISC_CH1_BASE 0x7FFC748000ull
377#define DDR_MISC_CH1_MAX_OFFSET 0x204
378#define DDR_MISC_CH1_SECTION 0xB8000
379#define mmGIC_BASE 0x7FFC800000ull
380#define GIC_MAX_OFFSET 0x10000
381#define GIC_SECTION 0x401000
382#define mmPCIE_WRAP_BASE 0x7FFCC01000ull
383#define PCIE_WRAP_MAX_OFFSET 0xDF4
384#define PCIE_WRAP_SECTION 0x1000
385#define mmPCIE_DBI_BASE 0x7FFCC02000ull
386#define PCIE_DBI_MAX_OFFSET 0xC04
387#define PCIE_DBI_SECTION 0x2000
388#define mmPCIE_CORE_BASE 0x7FFCC04000ull
389#define PCIE_CORE_MAX_OFFSET 0x9B8
390#define PCIE_CORE_SECTION 0x1000
391#define mmPCIE_DB_CFG_BASE 0x7FFCC05000ull
392#define PCIE_DB_CFG_MAX_OFFSET 0xE34
393#define PCIE_DB_CFG_SECTION 0x1000
394#define mmPCIE_DB_CMD_BASE 0x7FFCC06000ull
395#define PCIE_DB_CMD_MAX_OFFSET 0x810
396#define PCIE_DB_CMD_SECTION 0x1000
397#define mmPCIE_AUX_BASE 0x7FFCC07000ull
398#define PCIE_AUX_MAX_OFFSET 0x9BC
399#define PCIE_AUX_SECTION 0x1000
400#define mmPCIE_DB_RSV_BASE 0x7FFCC08000ull
401#define PCIE_DB_RSV_MAX_OFFSET 0x800
402#define PCIE_DB_RSV_SECTION 0x8000
403#define mmPCIE_PHY_BASE 0x7FFCC10000ull
404#define PCIE_PHY_MAX_OFFSET 0x924
405#define PCIE_PHY_SECTION 0x30000
406#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull
407#define PSOC_I2C_M0_MAX_OFFSET 0x100
408#define PSOC_I2C_M0_SECTION 0x1000
409#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull
410#define PSOC_I2C_M1_MAX_OFFSET 0x100
411#define PSOC_I2C_M1_SECTION 0x1000
412#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull
413#define PSOC_I2C_S_MAX_OFFSET 0x100
414#define PSOC_I2C_S_SECTION 0x1000
415#define mmPSOC_SPI_BASE 0x7FFCC43000ull
416#define PSOC_SPI_MAX_OFFSET 0x100
417#define PSOC_SPI_SECTION 0x1000
418#define mmPSOC_EMMC_BASE 0x7FFCC44000ull
419#define PSOC_EMMC_MAX_OFFSET 0xF70
420#define PSOC_EMMC_SECTION 0x1000
421#define mmPSOC_UART_0_BASE 0x7FFCC45000ull
422#define PSOC_UART_0_MAX_OFFSET 0x1000
423#define PSOC_UART_0_SECTION 0x1000
424#define mmPSOC_UART_1_BASE 0x7FFCC46000ull
425#define PSOC_UART_1_MAX_OFFSET 0x1000
426#define PSOC_UART_1_SECTION 0x1000
427#define mmPSOC_TIMER_BASE 0x7FFCC47000ull
428#define PSOC_TIMER_MAX_OFFSET 0x1000
429#define PSOC_TIMER_SECTION 0x1000
430#define mmPSOC_WDOG_BASE 0x7FFCC48000ull
431#define PSOC_WDOG_MAX_OFFSET 0x1000
432#define PSOC_WDOG_SECTION 0x1000
433#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull
434#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000
435#define PSOC_TIMESTAMP_SECTION 0x1000
436#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull
437#define PSOC_EFUSE_MAX_OFFSET 0x10C
438#define PSOC_EFUSE_SECTION 0x1000
439#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull
440#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xA48
441#define PSOC_GLOBAL_CONF_SECTION 0x1000
442#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull
443#define PSOC_GPIO0_MAX_OFFSET 0x1000
444#define PSOC_GPIO0_SECTION 0x1000
445#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull
446#define PSOC_GPIO1_MAX_OFFSET 0x1000
447#define PSOC_GPIO1_SECTION 0x1000
448#define mmPSOC_BTL_BASE 0x7FFCC4E000ull
449#define PSOC_BTL_MAX_OFFSET 0x124
450#define PSOC_BTL_SECTION 0x1000
451#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull
452#define PSOC_CS_TRACE_MAX_OFFSET 0x0
453#define PSOC_CS_TRACE_SECTION 0x1000
454#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull
455#define PSOC_GPIO2_MAX_OFFSET 0x1000
456#define PSOC_GPIO2_SECTION 0x1000
457#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull
458#define PSOC_GPIO3_MAX_OFFSET 0x1000
459#define PSOC_GPIO3_SECTION 0x1000
460#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull
461#define PSOC_GPIO4_MAX_OFFSET 0x1000
462#define PSOC_GPIO4_SECTION 0x1000
463#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull
464#define PSOC_DFT_EFUSE_MAX_OFFSET 0x10C
465#define PSOC_DFT_EFUSE_SECTION 0x1000
466#define mmPSOC_PM_BASE 0x7FFCC54000ull
467#define PSOC_PM_MAX_OFFSET 0x4
468#define PSOC_PM_SECTION 0x1000
469#define mmPSOC_TS_BASE 0x7FFCC55000ull
470#define PSOC_TS_MAX_OFFSET 0xE64
471#define PSOC_TS_SECTION 0xB000
472#define mmPSOC_MII_BASE 0x7FFCC60000ull
473#define PSOC_MII_MAX_OFFSET 0x105C
474#define PSOC_MII_SECTION 0x10000
475#define mmPSOC_EMMC_PLL_BASE 0x7FFCC70000ull
476#define PSOC_EMMC_PLL_MAX_OFFSET 0x444
477#define PSOC_EMMC_PLL_SECTION 0x1000
478#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull
479#define PSOC_MME_PLL_MAX_OFFSET 0x444
480#define PSOC_MME_PLL_SECTION 0x1000
481#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull
482#define PSOC_PCI_PLL_MAX_OFFSET 0x444
483#define PSOC_PCI_PLL_SECTION 0x6000
484#define mmPSOC_PWM0_BASE 0x7FFCC78000ull
485#define PSOC_PWM0_MAX_OFFSET 0x58
486#define PSOC_PWM0_SECTION 0x1000
487#define mmPSOC_PWM1_BASE 0x7FFCC79000ull
488#define PSOC_PWM1_MAX_OFFSET 0x58
489#define PSOC_PWM1_SECTION 0x1000
490#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull
491#define PSOC_PWM2_MAX_OFFSET 0x58
492#define PSOC_PWM2_SECTION 0x1000
493#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull
494#define PSOC_PWM3_MAX_OFFSET 0x58
495#define PSOC_PWM3_SECTION 0x185000
496#define mmTPC0_NRTR_BASE 0x7FFCE00000ull
497#define TPC0_NRTR_MAX_OFFSET 0x608
498#define TPC0_NRTR_SECTION 0x1000
499#define mmTPC_PLL_BASE 0x7FFCE01000ull
500#define TPC_PLL_MAX_OFFSET 0x444
501#define TPC_PLL_SECTION 0x1000
502#define mmTPC_THEMAL_SENSOR_BASE 0x7FFCE02000ull
503#define TPC_THEMAL_SENSOR_MAX_OFFSET 0xE64
504#define TPC_THEMAL_SENSOR_SECTION 0x1000
505#define mmTPC_PROCESS_MON_BASE 0x7FFCE03000ull
506#define TPC_PROCESS_MON_MAX_OFFSET 0x4
507#define TPC_PROCESS_MON_SECTION 0x1000
508#define mmTPC0_RD_REGULATOR_BASE 0x7FFCE04000ull
509#define TPC0_RD_REGULATOR_MAX_OFFSET 0x74
510#define TPC0_RD_REGULATOR_SECTION 0x1000
511#define mmTPC0_WR_REGULATOR_BASE 0x7FFCE05000ull
512#define TPC0_WR_REGULATOR_MAX_OFFSET 0x74
513#define TPC0_WR_REGULATOR_SECTION 0x1000
514#define mmTPC0_CFG_BASE 0x7FFCE06000ull
515#define TPC0_CFG_MAX_OFFSET 0xE30
516#define TPC0_CFG_SECTION 0x2000
517#define mmTPC0_QM_BASE 0x7FFCE08000ull
518#define TPC0_QM_MAX_OFFSET 0x310
519#define TPC0_QM_SECTION 0x1000
520#define mmTPC0_CMDQ_BASE 0x7FFCE09000ull
521#define TPC0_CMDQ_MAX_OFFSET 0x310
522#define TPC0_CMDQ_SECTION 0x37000
523#define mmTPC1_RTR_BASE 0x7FFCE40000ull
524#define TPC1_RTR_MAX_OFFSET 0x608
525#define TPC1_RTR_SECTION 0x4000
526#define mmTPC1_WR_REGULATOR_BASE 0x7FFCE44000ull
527#define TPC1_WR_REGULATOR_MAX_OFFSET 0x74
528#define TPC1_WR_REGULATOR_SECTION 0x1000
529#define mmTPC1_RD_REGULATOR_BASE 0x7FFCE45000ull
530#define TPC1_RD_REGULATOR_MAX_OFFSET 0x74
531#define TPC1_RD_REGULATOR_SECTION 0x1000
532#define mmTPC1_CFG_BASE 0x7FFCE46000ull
533#define TPC1_CFG_MAX_OFFSET 0xE30
534#define TPC1_CFG_SECTION 0x2000
535#define mmTPC1_QM_BASE 0x7FFCE48000ull
536#define TPC1_QM_MAX_OFFSET 0x310
537#define TPC1_QM_SECTION 0x1000
538#define mmTPC1_CMDQ_BASE 0x7FFCE49000ull
539#define TPC1_CMDQ_MAX_OFFSET 0x310
540#define TPC1_CMDQ_SECTION 0x37000
541#define mmTPC2_RTR_BASE 0x7FFCE80000ull
542#define TPC2_RTR_MAX_OFFSET 0x608
543#define TPC2_RTR_SECTION 0x4000
544#define mmTPC2_RD_REGULATOR_BASE 0x7FFCE84000ull
545#define TPC2_RD_REGULATOR_MAX_OFFSET 0x74
546#define TPC2_RD_REGULATOR_SECTION 0x1000
547#define mmTPC2_WR_REGULATOR_BASE 0x7FFCE85000ull
548#define TPC2_WR_REGULATOR_MAX_OFFSET 0x74
549#define TPC2_WR_REGULATOR_SECTION 0x1000
550#define mmTPC2_CFG_BASE 0x7FFCE86000ull
551#define TPC2_CFG_MAX_OFFSET 0xE30
552#define TPC2_CFG_SECTION 0x2000
553#define mmTPC2_QM_BASE 0x7FFCE88000ull
554#define TPC2_QM_MAX_OFFSET 0x310
555#define TPC2_QM_SECTION 0x1000
556#define mmTPC2_CMDQ_BASE 0x7FFCE89000ull
557#define TPC2_CMDQ_MAX_OFFSET 0x310
558#define TPC2_CMDQ_SECTION 0x37000
559#define mmTPC3_RTR_BASE 0x7FFCEC0000ull
560#define TPC3_RTR_MAX_OFFSET 0x608
561#define TPC3_RTR_SECTION 0x4000
562#define mmTPC3_RD_REGULATOR_BASE 0x7FFCEC4000ull
563#define TPC3_RD_REGULATOR_MAX_OFFSET 0x74
564#define TPC3_RD_REGULATOR_SECTION 0x1000
565#define mmTPC3_WR_REGULATOR_BASE 0x7FFCEC5000ull
566#define TPC3_WR_REGULATOR_MAX_OFFSET 0x74
567#define TPC3_WR_REGULATOR_SECTION 0x1000
568#define mmTPC3_CFG_BASE 0x7FFCEC6000ull
569#define TPC3_CFG_MAX_OFFSET 0xE30
570#define TPC3_CFG_SECTION 0x2000
571#define mmTPC3_QM_BASE 0x7FFCEC8000ull
572#define TPC3_QM_MAX_OFFSET 0x310
573#define TPC3_QM_SECTION 0x1000
574#define mmTPC3_CMDQ_BASE 0x7FFCEC9000ull
575#define TPC3_CMDQ_MAX_OFFSET 0x310
576#define TPC3_CMDQ_SECTION 0x37000
577#define mmTPC4_RTR_BASE 0x7FFCF00000ull
578#define TPC4_RTR_MAX_OFFSET 0x608
579#define TPC4_RTR_SECTION 0x4000
580#define mmTPC4_RD_REGULATOR_BASE 0x7FFCF04000ull
581#define TPC4_RD_REGULATOR_MAX_OFFSET 0x74
582#define TPC4_RD_REGULATOR_SECTION 0x1000
583#define mmTPC4_WR_REGULATOR_BASE 0x7FFCF05000ull
584#define TPC4_WR_REGULATOR_MAX_OFFSET 0x74
585#define TPC4_WR_REGULATOR_SECTION 0x1000
586#define mmTPC4_CFG_BASE 0x7FFCF06000ull
587#define TPC4_CFG_MAX_OFFSET 0xE30
588#define TPC4_CFG_SECTION 0x2000
589#define mmTPC4_QM_BASE 0x7FFCF08000ull
590#define TPC4_QM_MAX_OFFSET 0x310
591#define TPC4_QM_SECTION 0x1000
592#define mmTPC4_CMDQ_BASE 0x7FFCF09000ull
593#define TPC4_CMDQ_MAX_OFFSET 0x310
594#define TPC4_CMDQ_SECTION 0x37000
595#define mmTPC5_RTR_BASE 0x7FFCF40000ull
596#define TPC5_RTR_MAX_OFFSET 0x608
597#define TPC5_RTR_SECTION 0x4000
598#define mmTPC5_RD_REGULATOR_BASE 0x7FFCF44000ull
599#define TPC5_RD_REGULATOR_MAX_OFFSET 0x74
600#define TPC5_RD_REGULATOR_SECTION 0x1000
601#define mmTPC5_WR_REGULATOR_BASE 0x7FFCF45000ull
602#define TPC5_WR_REGULATOR_MAX_OFFSET 0x74
603#define TPC5_WR_REGULATOR_SECTION 0x1000
604#define mmTPC5_CFG_BASE 0x7FFCF46000ull
605#define TPC5_CFG_MAX_OFFSET 0xE30
606#define TPC5_CFG_SECTION 0x2000
607#define mmTPC5_QM_BASE 0x7FFCF48000ull
608#define TPC5_QM_MAX_OFFSET 0x310
609#define TPC5_QM_SECTION 0x1000
610#define mmTPC5_CMDQ_BASE 0x7FFCF49000ull
611#define TPC5_CMDQ_MAX_OFFSET 0x310
612#define TPC5_CMDQ_SECTION 0x37000
613#define mmTPC6_RTR_BASE 0x7FFCF80000ull
614#define TPC6_RTR_MAX_OFFSET 0x608
615#define TPC6_RTR_SECTION 0x4000
616#define mmTPC6_RD_REGULATOR_BASE 0x7FFCF84000ull
617#define TPC6_RD_REGULATOR_MAX_OFFSET 0x74
618#define TPC6_RD_REGULATOR_SECTION 0x1000
619#define mmTPC6_WR_REGULATOR_BASE 0x7FFCF85000ull
620#define TPC6_WR_REGULATOR_MAX_OFFSET 0x74
621#define TPC6_WR_REGULATOR_SECTION 0x1000
622#define mmTPC6_CFG_BASE 0x7FFCF86000ull
623#define TPC6_CFG_MAX_OFFSET 0xE30
624#define TPC6_CFG_SECTION 0x2000
625#define mmTPC6_QM_BASE 0x7FFCF88000ull
626#define TPC6_QM_MAX_OFFSET 0x310
627#define TPC6_QM_SECTION 0x1000
628#define mmTPC6_CMDQ_BASE 0x7FFCF89000ull
629#define TPC6_CMDQ_MAX_OFFSET 0x310
630#define TPC6_CMDQ_SECTION 0x37000
631#define mmTPC7_NRTR_BASE 0x7FFCFC0000ull
632#define TPC7_NRTR_MAX_OFFSET 0x608
633#define TPC7_NRTR_SECTION 0x4000
634#define mmTPC7_RD_REGULATOR_BASE 0x7FFCFC4000ull
635#define TPC7_RD_REGULATOR_MAX_OFFSET 0x74
636#define TPC7_RD_REGULATOR_SECTION 0x1000
637#define mmTPC7_WR_REGULATOR_BASE 0x7FFCFC5000ull
638#define TPC7_WR_REGULATOR_MAX_OFFSET 0x74
639#define TPC7_WR_REGULATOR_SECTION 0x1000
640#define mmTPC7_CFG_BASE 0x7FFCFC6000ull
641#define TPC7_CFG_MAX_OFFSET 0xE30
642#define TPC7_CFG_SECTION 0x2000
643#define mmTPC7_QM_BASE 0x7FFCFC8000ull
644#define TPC7_QM_MAX_OFFSET 0x310
645#define TPC7_QM_SECTION 0x1000
646#define mmTPC7_CMDQ_BASE 0x7FFCFC9000ull
647#define TPC7_CMDQ_MAX_OFFSET 0x310
648#define TPC7_CMDQ_SECTION 0x1037000
649#define mmMME_TOP_TABLE_BASE 0x7FFE000000ull
650#define MME_TOP_TABLE_MAX_OFFSET 0x1000
651#define MME_TOP_TABLE_SECTION 0x1000
652#define mmMME0_RTR_FUNNEL_BASE 0x7FFE001000ull
653#define MME0_RTR_FUNNEL_MAX_OFFSET 0x1000
654#define MME0_RTR_FUNNEL_SECTION 0x40000
655#define mmMME1_RTR_FUNNEL_BASE 0x7FFE041000ull
656#define MME1_RTR_FUNNEL_MAX_OFFSET 0x1000
657#define MME1_RTR_FUNNEL_SECTION 0x1000
658#define mmMME1_SBA_STM_BASE 0x7FFE042000ull
659#define MME1_SBA_STM_MAX_OFFSET 0x1000
660#define MME1_SBA_STM_SECTION 0x1000
661#define mmMME1_SBA_CTI_BASE 0x7FFE043000ull
662#define MME1_SBA_CTI_MAX_OFFSET 0x1000
663#define MME1_SBA_CTI_SECTION 0x1000
664#define mmMME1_SBA_ETF_BASE 0x7FFE044000ull
665#define MME1_SBA_ETF_MAX_OFFSET 0x1000
666#define MME1_SBA_ETF_SECTION 0x1000
667#define mmMME1_SBA_SPMU_BASE 0x7FFE045000ull
668#define MME1_SBA_SPMU_MAX_OFFSET 0x1000
669#define MME1_SBA_SPMU_SECTION 0x1000
670#define mmMME1_SBA_CTI0_BASE 0x7FFE046000ull
671#define MME1_SBA_CTI0_MAX_OFFSET 0x1000
672#define MME1_SBA_CTI0_SECTION 0x1000
673#define mmMME1_SBA_CTI1_BASE 0x7FFE047000ull
674#define MME1_SBA_CTI1_MAX_OFFSET 0x1000
675#define MME1_SBA_CTI1_SECTION 0x1000
676#define mmMME1_SBA_BMON0_BASE 0x7FFE048000ull
677#define MME1_SBA_BMON0_MAX_OFFSET 0x1000
678#define MME1_SBA_BMON0_SECTION 0x1000
679#define mmMME1_SBA_BMON1_BASE 0x7FFE049000ull
680#define MME1_SBA_BMON1_MAX_OFFSET 0x1000
681#define MME1_SBA_BMON1_SECTION 0x38000
682#define mmMME2_RTR_FUNNEL_BASE 0x7FFE081000ull
683#define MME2_RTR_FUNNEL_MAX_OFFSET 0x1000
684#define MME2_RTR_FUNNEL_SECTION 0x40000
685#define mmMME3_RTR_FUNNEL_BASE 0x7FFE0C1000ull
686#define MME3_RTR_FUNNEL_MAX_OFFSET 0x1000
687#define MME3_RTR_FUNNEL_SECTION 0x1000
688#define mmMME3_SBB_STM_BASE 0x7FFE0C2000ull
689#define MME3_SBB_STM_MAX_OFFSET 0x1000
690#define MME3_SBB_STM_SECTION 0x1000
691#define mmMME3_SBB_CTI_BASE 0x7FFE0C3000ull
692#define MME3_SBB_CTI_MAX_OFFSET 0x1000
693#define MME3_SBB_CTI_SECTION 0x1000
694#define mmMME3_SBB_ETF_BASE 0x7FFE0C4000ull
695#define MME3_SBB_ETF_MAX_OFFSET 0x1000
696#define MME3_SBB_ETF_SECTION 0x1000
697#define mmMME3_SBB_SPMU_BASE 0x7FFE0C5000ull
698#define MME3_SBB_SPMU_MAX_OFFSET 0x1000
699#define MME3_SBB_SPMU_SECTION 0x1000
700#define mmMME3_SBB_CTI0_BASE 0x7FFE0C6000ull
701#define MME3_SBB_CTI0_MAX_OFFSET 0x1000
702#define MME3_SBB_CTI0_SECTION 0x1000
703#define mmMME3_SBB_CTI1_BASE 0x7FFE0C7000ull
704#define MME3_SBB_CTI1_MAX_OFFSET 0x1000
705#define MME3_SBB_CTI1_SECTION 0x1000
706#define mmMME3_SBB_BMON0_BASE 0x7FFE0C8000ull
707#define MME3_SBB_BMON0_MAX_OFFSET 0x1000
708#define MME3_SBB_BMON0_SECTION 0x1000
709#define mmMME3_SBB_BMON1_BASE 0x7FFE0C9000ull
710#define MME3_SBB_BMON1_MAX_OFFSET 0x1000
711#define MME3_SBB_BMON1_SECTION 0x38000
712#define mmMME4_RTR_FUNNEL_BASE 0x7FFE101000ull
713#define MME4_RTR_FUNNEL_MAX_OFFSET 0x1000
714#define MME4_RTR_FUNNEL_SECTION 0x1000
715#define mmMME4_WACS_STM_BASE 0x7FFE102000ull
716#define MME4_WACS_STM_MAX_OFFSET 0x1000
717#define MME4_WACS_STM_SECTION 0x1000
718#define mmMME4_WACS_CTI_BASE 0x7FFE103000ull
719#define MME4_WACS_CTI_MAX_OFFSET 0x1000
720#define MME4_WACS_CTI_SECTION 0x1000
721#define mmMME4_WACS_ETF_BASE 0x7FFE104000ull
722#define MME4_WACS_ETF_MAX_OFFSET 0x1000
723#define MME4_WACS_ETF_SECTION 0x1000
724#define mmMME4_WACS_SPMU_BASE 0x7FFE105000ull
725#define MME4_WACS_SPMU_MAX_OFFSET 0x1000
726#define MME4_WACS_SPMU_SECTION 0x1000
727#define mmMME4_WACS_CTI0_BASE 0x7FFE106000ull
728#define MME4_WACS_CTI0_MAX_OFFSET 0x1000
729#define MME4_WACS_CTI0_SECTION 0x1000
730#define mmMME4_WACS_CTI1_BASE 0x7FFE107000ull
731#define MME4_WACS_CTI1_MAX_OFFSET 0x1000
732#define MME4_WACS_CTI1_SECTION 0x1000
733#define mmMME4_WACS_BMON0_BASE 0x7FFE108000ull
734#define MME4_WACS_BMON0_MAX_OFFSET 0x1000
735#define MME4_WACS_BMON0_SECTION 0x1000
736#define mmMME4_WACS_BMON1_BASE 0x7FFE109000ull
737#define MME4_WACS_BMON1_MAX_OFFSET 0x1000
738#define MME4_WACS_BMON1_SECTION 0x1000
739#define mmMME4_WACS_BMON2_BASE 0x7FFE10A000ull
740#define MME4_WACS_BMON2_MAX_OFFSET 0x1000
741#define MME4_WACS_BMON2_SECTION 0x1000
742#define mmMME4_WACS_BMON3_BASE 0x7FFE10B000ull
743#define MME4_WACS_BMON3_MAX_OFFSET 0x1000
744#define MME4_WACS_BMON3_SECTION 0x1000
745#define mmMME4_WACS_BMON4_BASE 0x7FFE10C000ull
746#define MME4_WACS_BMON4_MAX_OFFSET 0x1000
747#define MME4_WACS_BMON4_SECTION 0x1000
748#define mmMME4_WACS_BMON5_BASE 0x7FFE10D000ull
749#define MME4_WACS_BMON5_MAX_OFFSET 0x1000
750#define MME4_WACS_BMON5_SECTION 0x1000
751#define mmMME4_WACS_BMON6_BASE 0x7FFE10E000ull
752#define MME4_WACS_BMON6_MAX_OFFSET 0x1000
753#define MME4_WACS_BMON6_SECTION 0x4000
754#define mmMME4_WACS2_STM_BASE 0x7FFE112000ull
755#define MME4_WACS2_STM_MAX_OFFSET 0x1000
756#define MME4_WACS2_STM_SECTION 0x1000
757#define mmMME4_WACS2_CTI_BASE 0x7FFE113000ull
758#define MME4_WACS2_CTI_MAX_OFFSET 0x1000
759#define MME4_WACS2_CTI_SECTION 0x1000
760#define mmMME4_WACS2_ETF_BASE 0x7FFE114000ull
761#define MME4_WACS2_ETF_MAX_OFFSET 0x1000
762#define MME4_WACS2_ETF_SECTION 0x1000
763#define mmMME4_WACS2_SPMU_BASE 0x7FFE115000ull
764#define MME4_WACS2_SPMU_MAX_OFFSET 0x1000
765#define MME4_WACS2_SPMU_SECTION 0x1000
766#define mmMME4_WACS2_CTI0_BASE 0x7FFE116000ull
767#define MME4_WACS2_CTI0_MAX_OFFSET 0x1000
768#define MME4_WACS2_CTI0_SECTION 0x1000
769#define mmMME4_WACS2_CTI1_BASE 0x7FFE117000ull
770#define MME4_WACS2_CTI1_MAX_OFFSET 0x1000
771#define MME4_WACS2_CTI1_SECTION 0x1000
772#define mmMME4_WACS2_BMON0_BASE 0x7FFE118000ull
773#define MME4_WACS2_BMON0_MAX_OFFSET 0x1000
774#define MME4_WACS2_BMON0_SECTION 0x1000
775#define mmMME4_WACS2_BMON1_BASE 0x7FFE119000ull
776#define MME4_WACS2_BMON1_MAX_OFFSET 0x1000
777#define MME4_WACS2_BMON1_SECTION 0x1000
778#define mmMME4_WACS2_BMON2_BASE 0x7FFE11A000ull
779#define MME4_WACS2_BMON2_MAX_OFFSET 0x1000
780#define MME4_WACS2_BMON2_SECTION 0x27000
781#define mmMME5_RTR_FUNNEL_BASE 0x7FFE141000ull
782#define MME5_RTR_FUNNEL_MAX_OFFSET 0x1000
783#define MME5_RTR_FUNNEL_SECTION 0x2BF000
784#define mmDMA_ROM_TABLE_BASE 0x7FFE400000ull
785#define DMA_ROM_TABLE_MAX_OFFSET 0x1000
786#define DMA_ROM_TABLE_SECTION 0x1000
787#define mmDMA_CH_0_CS_STM_BASE 0x7FFE401000ull
788#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000
789#define DMA_CH_0_CS_STM_SECTION 0x1000
790#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE402000ull
791#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000
792#define DMA_CH_0_CS_CTI_SECTION 0x1000
793#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE403000ull
794#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000
795#define DMA_CH_0_CS_ETF_SECTION 0x1000
796#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE404000ull
797#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000
798#define DMA_CH_0_CS_SPMU_SECTION 0x1000
799#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE405000ull
800#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000
801#define DMA_CH_0_BMON_CTI_SECTION 0x1000
802#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE406000ull
803#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000
804#define DMA_CH_0_USER_CTI_SECTION 0x1000
805#define mmDMA_CH_0_BMON_0_BASE 0x7FFE407000ull
806#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000
807#define DMA_CH_0_BMON_0_SECTION 0x1000
808#define mmDMA_CH_0_BMON_1_BASE 0x7FFE408000ull
809#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000
810#define DMA_CH_0_BMON_1_SECTION 0x9000
811#define mmDMA_CH_1_CS_STM_BASE 0x7FFE411000ull
812#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000
813#define DMA_CH_1_CS_STM_SECTION 0x1000
814#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE412000ull
815#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000
816#define DMA_CH_1_CS_CTI_SECTION 0x1000
817#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE413000ull
818#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000
819#define DMA_CH_1_CS_ETF_SECTION 0x1000
820#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE414000ull
821#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000
822#define DMA_CH_1_CS_SPMU_SECTION 0x1000
823#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE415000ull
824#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000
825#define DMA_CH_1_BMON_CTI_SECTION 0x1000
826#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE416000ull
827#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000
828#define DMA_CH_1_USER_CTI_SECTION 0x1000
829#define mmDMA_CH_1_BMON_0_BASE 0x7FFE417000ull
830#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000
831#define DMA_CH_1_BMON_0_SECTION 0x1000
832#define mmDMA_CH_1_BMON_1_BASE 0x7FFE418000ull
833#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000
834#define DMA_CH_1_BMON_1_SECTION 0x9000
835#define mmDMA_CH_2_CS_STM_BASE 0x7FFE421000ull
836#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000
837#define DMA_CH_2_CS_STM_SECTION 0x1000
838#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE422000ull
839#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000
840#define DMA_CH_2_CS_CTI_SECTION 0x1000
841#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE423000ull
842#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000
843#define DMA_CH_2_CS_ETF_SECTION 0x1000
844#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE424000ull
845#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000
846#define DMA_CH_2_CS_SPMU_SECTION 0x1000
847#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE425000ull
848#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000
849#define DMA_CH_2_BMON_CTI_SECTION 0x1000
850#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE426000ull
851#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000
852#define DMA_CH_2_USER_CTI_SECTION 0x1000
853#define mmDMA_CH_2_BMON_0_BASE 0x7FFE427000ull
854#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000
855#define DMA_CH_2_BMON_0_SECTION 0x1000
856#define mmDMA_CH_2_BMON_1_BASE 0x7FFE428000ull
857#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000
858#define DMA_CH_2_BMON_1_SECTION 0x9000
859#define mmDMA_CH_3_CS_STM_BASE 0x7FFE431000ull
860#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000
861#define DMA_CH_3_CS_STM_SECTION 0x1000
862#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE432000ull
863#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000
864#define DMA_CH_3_CS_CTI_SECTION 0x1000
865#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE433000ull
866#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000
867#define DMA_CH_3_CS_ETF_SECTION 0x1000
868#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE434000ull
869#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000
870#define DMA_CH_3_CS_SPMU_SECTION 0x1000
871#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE435000ull
872#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000
873#define DMA_CH_3_BMON_CTI_SECTION 0x1000
874#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE436000ull
875#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000
876#define DMA_CH_3_USER_CTI_SECTION 0x1000
877#define mmDMA_CH_3_BMON_0_BASE 0x7FFE437000ull
878#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000
879#define DMA_CH_3_BMON_0_SECTION 0x1000
880#define mmDMA_CH_3_BMON_1_BASE 0x7FFE438000ull
881#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000
882#define DMA_CH_3_BMON_1_SECTION 0x9000
883#define mmDMA_CH_4_CS_STM_BASE 0x7FFE441000ull
884#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000
885#define DMA_CH_4_CS_STM_SECTION 0x1000
886#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE442000ull
887#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000
888#define DMA_CH_4_CS_CTI_SECTION 0x1000
889#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE443000ull
890#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000
891#define DMA_CH_4_CS_ETF_SECTION 0x1000
892#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE444000ull
893#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000
894#define DMA_CH_4_CS_SPMU_SECTION 0x1000
895#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE445000ull
896#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000
897#define DMA_CH_4_BMON_CTI_SECTION 0x1000
898#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE446000ull
899#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000
900#define DMA_CH_4_USER_CTI_SECTION 0x1000
901#define mmDMA_CH_4_BMON_0_BASE 0x7FFE447000ull
902#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000
903#define DMA_CH_4_BMON_0_SECTION 0x1000
904#define mmDMA_CH_4_BMON_1_BASE 0x7FFE448000ull
905#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000
906#define DMA_CH_4_BMON_1_SECTION 0x8000
907#define mmDMA_CH_FUNNEL_6_1_BASE 0x7FFE450000ull
908#define DMA_CH_FUNNEL_6_1_MAX_OFFSET 0x1000
909#define DMA_CH_FUNNEL_6_1_SECTION 0x11000
910#define mmDMA_MACRO_CS_STM_BASE 0x7FFE461000ull
911#define DMA_MACRO_CS_STM_MAX_OFFSET 0x1000
912#define DMA_MACRO_CS_STM_SECTION 0x1000
913#define mmDMA_MACRO_CS_CTI_BASE 0x7FFE462000ull
914#define DMA_MACRO_CS_CTI_MAX_OFFSET 0x1000
915#define DMA_MACRO_CS_CTI_SECTION 0x1000
916#define mmDMA_MACRO_CS_ETF_BASE 0x7FFE463000ull
917#define DMA_MACRO_CS_ETF_MAX_OFFSET 0x1000
918#define DMA_MACRO_CS_ETF_SECTION 0x1000
919#define mmDMA_MACRO_CS_SPMU_BASE 0x7FFE464000ull
920#define DMA_MACRO_CS_SPMU_MAX_OFFSET 0x1000
921#define DMA_MACRO_CS_SPMU_SECTION 0x1000
922#define mmDMA_MACRO_BMON_CTI_BASE 0x7FFE465000ull
923#define DMA_MACRO_BMON_CTI_MAX_OFFSET 0x1000
924#define DMA_MACRO_BMON_CTI_SECTION 0x1000
925#define mmDMA_MACRO_USER_CTI_BASE 0x7FFE466000ull
926#define DMA_MACRO_USER_CTI_MAX_OFFSET 0x1000
927#define DMA_MACRO_USER_CTI_SECTION 0x1000
928#define mmDMA_MACRO_BMON_0_BASE 0x7FFE467000ull
929#define DMA_MACRO_BMON_0_MAX_OFFSET 0x1000
930#define DMA_MACRO_BMON_0_SECTION 0x1000
931#define mmDMA_MACRO_BMON_1_BASE 0x7FFE468000ull
932#define DMA_MACRO_BMON_1_MAX_OFFSET 0x1000
933#define DMA_MACRO_BMON_1_SECTION 0x1000
934#define mmDMA_MACRO_BMON_2_BASE 0x7FFE469000ull
935#define DMA_MACRO_BMON_2_MAX_OFFSET 0x1000
936#define DMA_MACRO_BMON_2_SECTION 0x1000
937#define mmDMA_MACRO_BMON_3_BASE 0x7FFE46A000ull
938#define DMA_MACRO_BMON_3_MAX_OFFSET 0x1000
939#define DMA_MACRO_BMON_3_SECTION 0x1000
940#define mmDMA_MACRO_BMON_4_BASE 0x7FFE46B000ull
941#define DMA_MACRO_BMON_4_MAX_OFFSET 0x1000
942#define DMA_MACRO_BMON_4_SECTION 0x1000
943#define mmDMA_MACRO_BMON_5_BASE 0x7FFE46C000ull
944#define DMA_MACRO_BMON_5_MAX_OFFSET 0x1000
945#define DMA_MACRO_BMON_5_SECTION 0x1000
946#define mmDMA_MACRO_BMON_6_BASE 0x7FFE46D000ull
947#define DMA_MACRO_BMON_6_MAX_OFFSET 0x1000
948#define DMA_MACRO_BMON_6_SECTION 0x1000
949#define mmDMA_MACRO_BMON_7_BASE 0x7FFE46E000ull
950#define DMA_MACRO_BMON_7_MAX_OFFSET 0x1000
951#define DMA_MACRO_BMON_7_SECTION 0x2000
952#define mmDMA_MACRO_FUNNEL_3_1_BASE 0x7FFE470000ull
953#define DMA_MACRO_FUNNEL_3_1_MAX_OFFSET 0x1000
954#define DMA_MACRO_FUNNEL_3_1_SECTION 0x10000
955#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull
956#define CPU_ROM_TABLE_MAX_OFFSET 0x1000
957#define CPU_ROM_TABLE_SECTION 0x1000
958#define mmCPU_ETF_0_BASE 0x7FFE481000ull
959#define CPU_ETF_0_MAX_OFFSET 0x1000
960#define CPU_ETF_0_SECTION 0x1000
961#define mmCPU_ETF_1_BASE 0x7FFE482000ull
962#define CPU_ETF_1_MAX_OFFSET 0x1000
963#define CPU_ETF_1_SECTION 0x2000
964#define mmCPU_CTI_BASE 0x7FFE484000ull
965#define CPU_CTI_MAX_OFFSET 0x1000
966#define CPU_CTI_SECTION 0x1000
967#define mmCPU_FUNNEL_BASE 0x7FFE485000ull
968#define CPU_FUNNEL_MAX_OFFSET 0x1000
969#define CPU_FUNNEL_SECTION 0x1000
970#define mmCPU_STM_BASE 0x7FFE486000ull
971#define CPU_STM_MAX_OFFSET 0x1000
972#define CPU_STM_SECTION 0x1000
973#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull
974#define CPU_CTI_TRACE_MAX_OFFSET 0x1000
975#define CPU_CTI_TRACE_SECTION 0x1000
976#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull
977#define CPU_ETF_TRACE_MAX_OFFSET 0x1000
978#define CPU_ETF_TRACE_SECTION 0x1000
979#define mmCPU_WR_BMON_BASE 0x7FFE489000ull
980#define CPU_WR_BMON_MAX_OFFSET 0x1000
981#define CPU_WR_BMON_SECTION 0x1000
982#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull
983#define CPU_RD_BMON_MAX_OFFSET 0x1000
984#define CPU_RD_BMON_SECTION 0x37000
985#define mmMMU_CS_STM_BASE 0x7FFE4C1000ull
986#define MMU_CS_STM_MAX_OFFSET 0x1000
987#define MMU_CS_STM_SECTION 0x1000
988#define mmMMU_CS_CTI_BASE 0x7FFE4C2000ull
989#define MMU_CS_CTI_MAX_OFFSET 0x1000
990#define MMU_CS_CTI_SECTION 0x1000
991#define mmMMU_CS_ETF_BASE 0x7FFE4C3000ull
992#define MMU_CS_ETF_MAX_OFFSET 0x1000
993#define MMU_CS_ETF_SECTION 0x1000
994#define mmMMU_CS_SPMU_BASE 0x7FFE4C4000ull
995#define MMU_CS_SPMU_MAX_OFFSET 0x1000
996#define MMU_CS_SPMU_SECTION 0x1000
997#define mmMMU_BMON_CTI_BASE 0x7FFE4C5000ull
998#define MMU_BMON_CTI_MAX_OFFSET 0x1000
999#define MMU_BMON_CTI_SECTION 0x1000
1000#define mmMMU_USER_CTI_BASE 0x7FFE4C6000ull
1001#define MMU_USER_CTI_MAX_OFFSET 0x1000
1002#define MMU_USER_CTI_SECTION 0x1000
1003#define mmMMU_BMON_0_BASE 0x7FFE4C7000ull
1004#define MMU_BMON_0_MAX_OFFSET 0x1000
1005#define MMU_BMON_0_SECTION 0x1000
1006#define mmMMU_BMON_1_BASE 0x7FFE4C8000ull
1007#define MMU_BMON_1_MAX_OFFSET 0x1000
1008#define MMU_BMON_1_SECTION 0x338000
1009#define mmCA53_BASE 0x7FFE800000ull
1010#define CA53_MAX_OFFSET 0x1000
1011#define CA53_SECTION 0x400000
1012#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull
1013#define PCI_ROM_TABLE_MAX_OFFSET 0x1000
1014#define PCI_ROM_TABLE_SECTION 0x1000
1015#define mmPCIE_STM_BASE 0x7FFEC01000ull
1016#define PCIE_STM_MAX_OFFSET 0x1000
1017#define PCIE_STM_SECTION 0x1000
1018#define mmPCIE_ETF_BASE 0x7FFEC02000ull
1019#define PCIE_ETF_MAX_OFFSET 0x1000
1020#define PCIE_ETF_SECTION 0x1000
1021#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull
1022#define PCIE_CTI_0_MAX_OFFSET 0x1000
1023#define PCIE_CTI_0_SECTION 0x1000
1024#define mmPCIE_SPMU_BASE 0x7FFEC04000ull
1025#define PCIE_SPMU_MAX_OFFSET 0x1000
1026#define PCIE_SPMU_SECTION 0x1000
1027#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull
1028#define PCIE_CTI_1_MAX_OFFSET 0x1000
1029#define PCIE_CTI_1_SECTION 0x1000
1030#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull
1031#define PCIE_FUNNEL_MAX_OFFSET 0x1000
1032#define PCIE_FUNNEL_SECTION 0x1000
1033#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull
1034#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000
1035#define PCIE_BMON_MSTR_WR_SECTION 0x1000
1036#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull
1037#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000
1038#define PCIE_BMON_MSTR_RD_SECTION 0x1000
1039#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull
1040#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000
1041#define PCIE_BMON_SLV_WR_SECTION 0x1000
1042#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull
1043#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000
1044#define PCIE_BMON_SLV_RD_SECTION 0x36000
1045#define mmPSOC_CTI_BASE 0x7FFEC40000ull
1046#define PSOC_CTI_MAX_OFFSET 0x1000
1047#define PSOC_CTI_SECTION 0x1000
1048#define mmPSOC_STM_BASE 0x7FFEC41000ull
1049#define PSOC_STM_MAX_OFFSET 0x1000
1050#define PSOC_STM_SECTION 0x1000
1051#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull
1052#define PSOC_FUNNEL_MAX_OFFSET 0x1000
1053#define PSOC_FUNNEL_SECTION 0x1000
1054#define mmPSOC_ETR_BASE 0x7FFEC43000ull
1055#define PSOC_ETR_MAX_OFFSET 0x1000
1056#define PSOC_ETR_SECTION 0x1000
1057#define mmPSOC_ETF_BASE 0x7FFEC44000ull
1058#define PSOC_ETF_MAX_OFFSET 0x1000
1059#define PSOC_ETF_SECTION 0x1000
1060#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull
1061#define PSOC_TS_CTI_MAX_OFFSET 0x1000
1062#define PSOC_TS_CTI_SECTION 0xB000
1063#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull
1064#define TOP_ROM_TABLE_MAX_OFFSET 0x1000
1065#define TOP_ROM_TABLE_SECTION 0x1F0000
1066#define mmTPC1_RTR_FUNNEL_BASE 0x7FFEE40000ull
1067#define TPC1_RTR_FUNNEL_MAX_OFFSET 0x1000
1068#define TPC1_RTR_FUNNEL_SECTION 0x40000
1069#define mmTPC2_RTR_FUNNEL_BASE 0x7FFEE80000ull
1070#define TPC2_RTR_FUNNEL_MAX_OFFSET 0x1000
1071#define TPC2_RTR_FUNNEL_SECTION 0x40000
1072#define mmTPC3_RTR_FUNNEL_BASE 0x7FFEEC0000ull
1073#define TPC3_RTR_FUNNEL_MAX_OFFSET 0x1000
1074#define TPC3_RTR_FUNNEL_SECTION 0x40000
1075#define mmTPC4_RTR_FUNNEL_BASE 0x7FFEF00000ull
1076#define TPC4_RTR_FUNNEL_MAX_OFFSET 0x1000
1077#define TPC4_RTR_FUNNEL_SECTION 0x40000
1078#define mmTPC5_RTR_FUNNEL_BASE 0x7FFEF40000ull
1079#define TPC5_RTR_FUNNEL_MAX_OFFSET 0x1000
1080#define TPC5_RTR_FUNNEL_SECTION 0x40000
1081#define mmTPC6_RTR_FUNNEL_BASE 0x7FFEF80000ull
1082#define TPC6_RTR_FUNNEL_MAX_OFFSET 0x1000
1083#define TPC6_RTR_FUNNEL_SECTION 0x81000
1084#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull
1085#define TPC0_EML_SPMU_MAX_OFFSET 0x1000
1086#define TPC0_EML_SPMU_SECTION 0x1000
1087#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull
1088#define TPC0_EML_ETF_MAX_OFFSET 0x1000
1089#define TPC0_EML_ETF_SECTION 0x1000
1090#define mmTPC0_EML_STM_BASE 0x7FFF003000ull
1091#define TPC0_EML_STM_MAX_OFFSET 0x1000
1092#define TPC0_EML_STM_SECTION 0x1000
1093#define mmTPC0_EML_ETM_R4_BASE 0x7FFF004000ull
1094#define TPC0_EML_ETM_R4_MAX_OFFSET 0x0
1095#define TPC0_EML_ETM_R4_SECTION 0x1000
1096#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull
1097#define TPC0_EML_CTI_MAX_OFFSET 0x1000
1098#define TPC0_EML_CTI_SECTION 0x1000
1099#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull
1100#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
1101#define TPC0_EML_FUNNEL_SECTION 0x1000
1102#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull
1103#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
1104#define TPC0_EML_BUSMON_0_SECTION 0x1000
1105#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull
1106#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
1107#define TPC0_EML_BUSMON_1_SECTION 0x1000
1108#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull
1109#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
1110#define TPC0_EML_BUSMON_2_SECTION 0x1000
1111#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull
1112#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
1113#define TPC0_EML_BUSMON_3_SECTION 0x36000
1114#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull
1115#define TPC0_EML_CFG_MAX_OFFSET 0x338
1116#define TPC0_EML_CFG_SECTION 0x1BF000
1117#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull
1118#define TPC0_EML_CS_MAX_OFFSET 0x1000
1119#define TPC0_EML_CS_SECTION 0x2000
1120#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull
1121#define TPC1_EML_SPMU_MAX_OFFSET 0x1000
1122#define TPC1_EML_SPMU_SECTION 0x1000
1123#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull
1124#define TPC1_EML_ETF_MAX_OFFSET 0x1000
1125#define TPC1_EML_ETF_SECTION 0x1000
1126#define mmTPC1_EML_STM_BASE 0x7FFF203000ull
1127#define TPC1_EML_STM_MAX_OFFSET 0x1000
1128#define TPC1_EML_STM_SECTION 0x1000
1129#define mmTPC1_EML_ETM_R4_BASE 0x7FFF204000ull
1130#define TPC1_EML_ETM_R4_MAX_OFFSET 0x0
1131#define TPC1_EML_ETM_R4_SECTION 0x1000
1132#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull
1133#define TPC1_EML_CTI_MAX_OFFSET 0x1000
1134#define TPC1_EML_CTI_SECTION 0x1000
1135#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull
1136#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
1137#define TPC1_EML_FUNNEL_SECTION 0x1000
1138#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull
1139#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
1140#define TPC1_EML_BUSMON_0_SECTION 0x1000
1141#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull
1142#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
1143#define TPC1_EML_BUSMON_1_SECTION 0x1000
1144#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull
1145#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
1146#define TPC1_EML_BUSMON_2_SECTION 0x1000
1147#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull
1148#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
1149#define TPC1_EML_BUSMON_3_SECTION 0x36000
1150#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull
1151#define TPC1_EML_CFG_MAX_OFFSET 0x338
1152#define TPC1_EML_CFG_SECTION 0x1BF000
1153#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull
1154#define TPC1_EML_CS_MAX_OFFSET 0x1000
1155#define TPC1_EML_CS_SECTION 0x2000
1156#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull
1157#define TPC2_EML_SPMU_MAX_OFFSET 0x1000
1158#define TPC2_EML_SPMU_SECTION 0x1000
1159#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull
1160#define TPC2_EML_ETF_MAX_OFFSET 0x1000
1161#define TPC2_EML_ETF_SECTION 0x1000
1162#define mmTPC2_EML_STM_BASE 0x7FFF403000ull
1163#define TPC2_EML_STM_MAX_OFFSET 0x1000
1164#define TPC2_EML_STM_SECTION 0x1000
1165#define mmTPC2_EML_ETM_R4_BASE 0x7FFF404000ull
1166#define TPC2_EML_ETM_R4_MAX_OFFSET 0x0
1167#define TPC2_EML_ETM_R4_SECTION 0x1000
1168#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull
1169#define TPC2_EML_CTI_MAX_OFFSET 0x1000
1170#define TPC2_EML_CTI_SECTION 0x1000
1171#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull
1172#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
1173#define TPC2_EML_FUNNEL_SECTION 0x1000
1174#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull
1175#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
1176#define TPC2_EML_BUSMON_0_SECTION 0x1000
1177#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull
1178#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
1179#define TPC2_EML_BUSMON_1_SECTION 0x1000
1180#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull
1181#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
1182#define TPC2_EML_BUSMON_2_SECTION 0x1000
1183#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull
1184#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
1185#define TPC2_EML_BUSMON_3_SECTION 0x36000
1186#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull
1187#define TPC2_EML_CFG_MAX_OFFSET 0x338
1188#define TPC2_EML_CFG_SECTION 0x1BF000
1189#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull
1190#define TPC2_EML_CS_MAX_OFFSET 0x1000
1191#define TPC2_EML_CS_SECTION 0x2000
1192#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull
1193#define TPC3_EML_SPMU_MAX_OFFSET 0x1000
1194#define TPC3_EML_SPMU_SECTION 0x1000
1195#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull
1196#define TPC3_EML_ETF_MAX_OFFSET 0x1000
1197#define TPC3_EML_ETF_SECTION 0x1000
1198#define mmTPC3_EML_STM_BASE 0x7FFF603000ull
1199#define TPC3_EML_STM_MAX_OFFSET 0x1000
1200#define TPC3_EML_STM_SECTION 0x1000
1201#define mmTPC3_EML_ETM_R4_BASE 0x7FFF604000ull
1202#define TPC3_EML_ETM_R4_MAX_OFFSET 0x0
1203#define TPC3_EML_ETM_R4_SECTION 0x1000
1204#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull
1205#define TPC3_EML_CTI_MAX_OFFSET 0x1000
1206#define TPC3_EML_CTI_SECTION 0x1000
1207#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull
1208#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
1209#define TPC3_EML_FUNNEL_SECTION 0x1000
1210#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull
1211#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
1212#define TPC3_EML_BUSMON_0_SECTION 0x1000
1213#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull
1214#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
1215#define TPC3_EML_BUSMON_1_SECTION 0x1000
1216#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull
1217#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
1218#define TPC3_EML_BUSMON_2_SECTION 0x1000
1219#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull
1220#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
1221#define TPC3_EML_BUSMON_3_SECTION 0x36000
1222#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull
1223#define TPC3_EML_CFG_MAX_OFFSET 0x338
1224#define TPC3_EML_CFG_SECTION 0x1BF000
1225#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull
1226#define TPC3_EML_CS_MAX_OFFSET 0x1000
1227#define TPC3_EML_CS_SECTION 0x2000
1228#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull
1229#define TPC4_EML_SPMU_MAX_OFFSET 0x1000
1230#define TPC4_EML_SPMU_SECTION 0x1000
1231#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull
1232#define TPC4_EML_ETF_MAX_OFFSET 0x1000
1233#define TPC4_EML_ETF_SECTION 0x1000
1234#define mmTPC4_EML_STM_BASE 0x7FFF803000ull
1235#define TPC4_EML_STM_MAX_OFFSET 0x1000
1236#define TPC4_EML_STM_SECTION 0x1000
1237#define mmTPC4_EML_ETM_R4_BASE 0x7FFF804000ull
1238#define TPC4_EML_ETM_R4_MAX_OFFSET 0x0
1239#define TPC4_EML_ETM_R4_SECTION 0x1000
1240#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull
1241#define TPC4_EML_CTI_MAX_OFFSET 0x1000
1242#define TPC4_EML_CTI_SECTION 0x1000
1243#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull
1244#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
1245#define TPC4_EML_FUNNEL_SECTION 0x1000
1246#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull
1247#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
1248#define TPC4_EML_BUSMON_0_SECTION 0x1000
1249#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull
1250#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
1251#define TPC4_EML_BUSMON_1_SECTION 0x1000
1252#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull
1253#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
1254#define TPC4_EML_BUSMON_2_SECTION 0x1000
1255#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull
1256#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
1257#define TPC4_EML_BUSMON_3_SECTION 0x36000
1258#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull
1259#define TPC4_EML_CFG_MAX_OFFSET 0x338
1260#define TPC4_EML_CFG_SECTION 0x1BF000
1261#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull
1262#define TPC4_EML_CS_MAX_OFFSET 0x1000
1263#define TPC4_EML_CS_SECTION 0x2000
1264#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull
1265#define TPC5_EML_SPMU_MAX_OFFSET 0x1000
1266#define TPC5_EML_SPMU_SECTION 0x1000
1267#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull
1268#define TPC5_EML_ETF_MAX_OFFSET 0x1000
1269#define TPC5_EML_ETF_SECTION 0x1000
1270#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull
1271#define TPC5_EML_STM_MAX_OFFSET 0x1000
1272#define TPC5_EML_STM_SECTION 0x1000
1273#define mmTPC5_EML_ETM_R4_BASE 0x7FFFA04000ull
1274#define TPC5_EML_ETM_R4_MAX_OFFSET 0x0
1275#define TPC5_EML_ETM_R4_SECTION 0x1000
1276#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull
1277#define TPC5_EML_CTI_MAX_OFFSET 0x1000
1278#define TPC5_EML_CTI_SECTION 0x1000
1279#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull
1280#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
1281#define TPC5_EML_FUNNEL_SECTION 0x1000
1282#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull
1283#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
1284#define TPC5_EML_BUSMON_0_SECTION 0x1000
1285#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull
1286#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
1287#define TPC5_EML_BUSMON_1_SECTION 0x1000
1288#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull
1289#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
1290#define TPC5_EML_BUSMON_2_SECTION 0x1000
1291#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull
1292#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
1293#define TPC5_EML_BUSMON_3_SECTION 0x36000
1294#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull
1295#define TPC5_EML_CFG_MAX_OFFSET 0x338
1296#define TPC5_EML_CFG_SECTION 0x1BF000
1297#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull
1298#define TPC5_EML_CS_MAX_OFFSET 0x1000
1299#define TPC5_EML_CS_SECTION 0x2000
1300#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull
1301#define TPC6_EML_SPMU_MAX_OFFSET 0x1000
1302#define TPC6_EML_SPMU_SECTION 0x1000
1303#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull
1304#define TPC6_EML_ETF_MAX_OFFSET 0x1000
1305#define TPC6_EML_ETF_SECTION 0x1000
1306#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull
1307#define TPC6_EML_STM_MAX_OFFSET 0x1000
1308#define TPC6_EML_STM_SECTION 0x1000
1309#define mmTPC6_EML_ETM_R4_BASE 0x7FFFC04000ull
1310#define TPC6_EML_ETM_R4_MAX_OFFSET 0x0
1311#define TPC6_EML_ETM_R4_SECTION 0x1000
1312#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull
1313#define TPC6_EML_CTI_MAX_OFFSET 0x1000
1314#define TPC6_EML_CTI_SECTION 0x1000
1315#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull
1316#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000
1317#define TPC6_EML_FUNNEL_SECTION 0x1000
1318#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull
1319#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000
1320#define TPC6_EML_BUSMON_0_SECTION 0x1000
1321#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull
1322#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000
1323#define TPC6_EML_BUSMON_1_SECTION 0x1000
1324#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull
1325#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000
1326#define TPC6_EML_BUSMON_2_SECTION 0x1000
1327#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull
1328#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000
1329#define TPC6_EML_BUSMON_3_SECTION 0x36000
1330#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull
1331#define TPC6_EML_CFG_MAX_OFFSET 0x338
1332#define TPC6_EML_CFG_SECTION 0x1BF000
1333#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull
1334#define TPC6_EML_CS_MAX_OFFSET 0x1000
1335#define TPC6_EML_CS_SECTION 0x2000
1336#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull
1337#define TPC7_EML_SPMU_MAX_OFFSET 0x1000
1338#define TPC7_EML_SPMU_SECTION 0x1000
1339#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull
1340#define TPC7_EML_ETF_MAX_OFFSET 0x1000
1341#define TPC7_EML_ETF_SECTION 0x1000
1342#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull
1343#define TPC7_EML_STM_MAX_OFFSET 0x1000
1344#define TPC7_EML_STM_SECTION 0x1000
1345#define mmTPC7_EML_ETM_R4_BASE 0x7FFFE04000ull
1346#define TPC7_EML_ETM_R4_MAX_OFFSET 0x0
1347#define TPC7_EML_ETM_R4_SECTION 0x1000
1348#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull
1349#define TPC7_EML_CTI_MAX_OFFSET 0x1000
1350#define TPC7_EML_CTI_SECTION 0x1000
1351#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull
1352#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000
1353#define TPC7_EML_FUNNEL_SECTION 0x1000
1354#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull
1355#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000
1356#define TPC7_EML_BUSMON_0_SECTION 0x1000
1357#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull
1358#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000
1359#define TPC7_EML_BUSMON_1_SECTION 0x1000
1360#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull
1361#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000
1362#define TPC7_EML_BUSMON_2_SECTION 0x1000
1363#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull
1364#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000
1365#define TPC7_EML_BUSMON_3_SECTION 0x36000
1366#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull
1367#define TPC7_EML_CFG_MAX_OFFSET 0x338
1368#define TPC7_EML_CFG_SECTION 0x1BF000
1369#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull
1370#define TPC7_EML_CS_MAX_OFFSET 0x1000
1371
1372#endif /* GOYA_BLOCKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
new file mode 100644
index 000000000000..a161ecfe74de
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -0,0 +1,275 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef ASIC_REG_GOYA_MASKS_H_
9#define ASIC_REG_GOYA_MASKS_H_
10
11#include "goya_regs.h"
12
13/* Useful masks for bits in various registers */
14#define QMAN_DMA_ENABLE (\
15 (1 << DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT) | \
16 (1 << DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT) | \
17 (1 << DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT) | \
18 (1 << DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT))
19
20#define QMAN_DMA_FULLY_TRUSTED (\
21 (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \
22 (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \
23 (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \
24 (1 << DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT) | \
25 (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
26 (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
27 (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
28 (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT))
29
30#define QMAN_DMA_PARTLY_TRUSTED (\
31 (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \
32 (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \
33 (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \
34 (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
35 (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
36 (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
37 (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT))
38
39#define QMAN_DMA_STOP (\
40 (1 << DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT) | \
41 (1 << DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT) | \
42 (1 << DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT) | \
43 (1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT))
44
45#define QMAN_DMA_IS_STOPPED (\
46 (1 << DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT) | \
47 (1 << DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT) | \
48 (1 << DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT) | \
49 (1 << DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT))
50
51#define QMAN_DMA_ERR_MSG_EN (\
52 (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
53 (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
54 (1 << DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
55 (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
56 (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
57 (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
58 (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
59 (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
60
61#define QMAN_MME_ENABLE (\
62 (1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
63 (1 << MME_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
64 (1 << MME_QM_GLBL_CFG0_CP_EN_SHIFT))
65
66#define CMDQ_MME_ENABLE (\
67 (1 << MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \
68 (1 << MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT))
69
70#define QMAN_MME_STOP (\
71 (1 << MME_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \
72 (1 << MME_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \
73 (1 << MME_QM_GLBL_CFG1_CP_STOP_SHIFT))
74
75#define CMDQ_MME_STOP (\
76 (1 << MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \
77 (1 << MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT))
78
79#define QMAN_MME_ERR_MSG_EN (\
80 (1 << MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
81 (1 << MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
82 (1 << MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
83 (1 << MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
84 (1 << MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
85 (1 << MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
86 (1 << MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
87 (1 << MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
88
89#define CMDQ_MME_ERR_MSG_EN (\
90 (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
91 (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
92 (1 << MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
93 (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
94 (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
95 (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
96 (1 << MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
97 (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
98
99#define QMAN_MME_ERR_PROT (\
100 (1 << MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
101 (1 << MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
102 (1 << MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
103 (1 << MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT))
104
105#define CMDQ_MME_ERR_PROT (\
106 (1 << MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
107 (1 << MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
108 (1 << MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
109 (1 << MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT))
110
111#define QMAN_TPC_ENABLE (\
112 (1 << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
113 (1 << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
114 (1 << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT))
115
116#define CMDQ_TPC_ENABLE (\
117 (1 << TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \
118 (1 << TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT))
119
120#define QMAN_TPC_STOP (\
121 (1 << TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \
122 (1 << TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \
123 (1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT))
124
125#define CMDQ_TPC_STOP (\
126 (1 << TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \
127 (1 << TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT))
128
129#define QMAN_TPC_ERR_MSG_EN (\
130 (1 << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
131 (1 << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
132 (1 << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
133 (1 << TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
134 (1 << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
135 (1 << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
136 (1 << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
137 (1 << TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
138
139#define CMDQ_TPC_ERR_MSG_EN (\
140 (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
141 (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
142 (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
143 (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
144 (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
145 (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
146 (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
147 (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
148
149#define QMAN_TPC_ERR_PROT (\
150 (1 << TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
151 (1 << TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
152 (1 << TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
153 (1 << TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT))
154
155#define CMDQ_TPC_ERR_PROT (\
156 (1 << TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
157 (1 << TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
158 (1 << TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
159 (1 << TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT))
160
161/* RESETS */
162#define DMA_MME_TPC_RESET (\
163 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\
164 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\
165 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT)
166
167#define RESET_ALL (\
168 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\
169 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\
170 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT |\
171 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT |\
172 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT |\
173 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT |\
174 PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK |\
175 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT |\
176 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT)
177
178#define CA53_RESET (\
179 (~\
180 (1 << PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT)\
181 ) & 0x7FFFFF)
182
183#define CPU_RESET_ASSERT (\
184 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
185
186#define CPU_RESET_CORE0_DEASSERT (\
187 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\
188 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\
189 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
190 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
191
192/* PCI CONFIGURATION SPACE */
193#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
194#define mmPCI_CONFIG_ELBI_DATA 0xFF4
195#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
196#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
197
198#define mmPCI_CONFIG_ELBI_STS 0xFFC
199#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
200#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
201#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
202 PCI_CONFIG_ELBI_STS_DONE)
203
204#define GOYA_IRQ_HBW_ID_MASK 0x1FFF
205#define GOYA_IRQ_HBW_ID_SHIFT 0
206#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000
207#define GOYA_IRQ_HBW_INTERNAL_ID_SHIFT 13
208#define GOYA_IRQ_HBW_AGENT_ID_MASK 0x1F0000
209#define GOYA_IRQ_HBW_AGENT_ID_SHIFT 16
210#define GOYA_IRQ_HBW_Y_MASK 0xE00000
211#define GOYA_IRQ_HBW_Y_SHIFT 21
212#define GOYA_IRQ_HBW_X_MASK 0x7000000
213#define GOYA_IRQ_HBW_X_SHIFT 24
214#define GOYA_IRQ_LBW_ID_MASK 0xFF
215#define GOYA_IRQ_LBW_ID_SHIFT 0
216#define GOYA_IRQ_LBW_INTERNAL_ID_MASK 0x700
217#define GOYA_IRQ_LBW_INTERNAL_ID_SHIFT 8
218#define GOYA_IRQ_LBW_AGENT_ID_MASK 0xF800
219#define GOYA_IRQ_LBW_AGENT_ID_SHIFT 11
220#define GOYA_IRQ_LBW_Y_MASK 0x70000
221#define GOYA_IRQ_LBW_Y_SHIFT 16
222#define GOYA_IRQ_LBW_X_MASK 0x380000
223#define GOYA_IRQ_LBW_X_SHIFT 19
224
225#define DMA_QM_IDLE_MASK (DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK | \
226 DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK | \
227 DMA_QM_0_GLBL_STS0_CP_IDLE_MASK | \
228 DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK)
229
230#define TPC_QM_IDLE_MASK (TPC0_QM_GLBL_STS0_PQF_IDLE_MASK | \
231 TPC0_QM_GLBL_STS0_CQF_IDLE_MASK | \
232 TPC0_QM_GLBL_STS0_CP_IDLE_MASK)
233
234#define TPC_CMDQ_IDLE_MASK (TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \
235 TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK)
236
237#define TPC_CFG_IDLE_MASK (TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
238 TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
239 TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
240 TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK)
241
242#define MME_QM_IDLE_MASK (MME_QM_GLBL_STS0_PQF_IDLE_MASK | \
243 MME_QM_GLBL_STS0_CQF_IDLE_MASK | \
244 MME_QM_GLBL_STS0_CP_IDLE_MASK)
245
246#define MME_CMDQ_IDLE_MASK (MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \
247 MME_CMDQ_GLBL_STS0_CP_IDLE_MASK)
248
249#define MME_ARCH_IDLE_MASK (MME_ARCH_STATUS_SB_A_EMPTY_MASK | \
250 MME_ARCH_STATUS_SB_B_EMPTY_MASK | \
251 MME_ARCH_STATUS_SB_CIN_EMPTY_MASK | \
252 MME_ARCH_STATUS_SB_COUT_EMPTY_MASK)
253
254#define MME_SHADOW_IDLE_MASK (MME_SHADOW_0_STATUS_A_MASK | \
255 MME_SHADOW_0_STATUS_B_MASK | \
256 MME_SHADOW_0_STATUS_CIN_MASK | \
257 MME_SHADOW_0_STATUS_COUT_MASK | \
258 MME_SHADOW_0_STATUS_TE_MASK | \
259 MME_SHADOW_0_STATUS_LD_MASK | \
260 MME_SHADOW_0_STATUS_ST_MASK)
261
262#define TPC1_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
263#define TPC2_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
264#define TPC3_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
265#define TPC4_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
266#define TPC5_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
267#define TPC6_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
268#define TPC7_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
269
270#define DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
271#define DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
272#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
273#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
274
275#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
new file mode 100644
index 000000000000..6cb0b6e54d41
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -0,0 +1,118 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef ASIC_REG_GOYA_REGS_H_
9#define ASIC_REG_GOYA_REGS_H_
10
11#include "goya_blocks.h"
12#include "stlb_regs.h"
13#include "mmu_regs.h"
14#include "pcie_aux_regs.h"
15#include "psoc_global_conf_regs.h"
16#include "psoc_spi_regs.h"
17#include "psoc_mme_pll_regs.h"
18#include "psoc_pci_pll_regs.h"
19#include "psoc_emmc_pll_regs.h"
20#include "cpu_if_regs.h"
21#include "cpu_ca53_cfg_regs.h"
22#include "cpu_pll_regs.h"
23#include "ic_pll_regs.h"
24#include "mc_pll_regs.h"
25#include "tpc_pll_regs.h"
26#include "dma_qm_0_regs.h"
27#include "dma_qm_1_regs.h"
28#include "dma_qm_2_regs.h"
29#include "dma_qm_3_regs.h"
30#include "dma_qm_4_regs.h"
31#include "dma_ch_0_regs.h"
32#include "dma_ch_1_regs.h"
33#include "dma_ch_2_regs.h"
34#include "dma_ch_3_regs.h"
35#include "dma_ch_4_regs.h"
36#include "dma_macro_regs.h"
37#include "dma_nrtr_regs.h"
38#include "pci_nrtr_regs.h"
39#include "sram_y0_x0_rtr_regs.h"
40#include "sram_y0_x1_rtr_regs.h"
41#include "sram_y0_x2_rtr_regs.h"
42#include "sram_y0_x3_rtr_regs.h"
43#include "sram_y0_x4_rtr_regs.h"
44#include "mme_regs.h"
45#include "mme_qm_regs.h"
46#include "mme_cmdq_regs.h"
47#include "mme1_rtr_regs.h"
48#include "mme2_rtr_regs.h"
49#include "mme3_rtr_regs.h"
50#include "mme4_rtr_regs.h"
51#include "mme5_rtr_regs.h"
52#include "mme6_rtr_regs.h"
53#include "tpc0_cfg_regs.h"
54#include "tpc1_cfg_regs.h"
55#include "tpc2_cfg_regs.h"
56#include "tpc3_cfg_regs.h"
57#include "tpc4_cfg_regs.h"
58#include "tpc5_cfg_regs.h"
59#include "tpc6_cfg_regs.h"
60#include "tpc7_cfg_regs.h"
61#include "tpc0_qm_regs.h"
62#include "tpc1_qm_regs.h"
63#include "tpc2_qm_regs.h"
64#include "tpc3_qm_regs.h"
65#include "tpc4_qm_regs.h"
66#include "tpc5_qm_regs.h"
67#include "tpc6_qm_regs.h"
68#include "tpc7_qm_regs.h"
69#include "tpc0_cmdq_regs.h"
70#include "tpc1_cmdq_regs.h"
71#include "tpc2_cmdq_regs.h"
72#include "tpc3_cmdq_regs.h"
73#include "tpc4_cmdq_regs.h"
74#include "tpc5_cmdq_regs.h"
75#include "tpc6_cmdq_regs.h"
76#include "tpc7_cmdq_regs.h"
77#include "tpc0_nrtr_regs.h"
78#include "tpc1_rtr_regs.h"
79#include "tpc2_rtr_regs.h"
80#include "tpc3_rtr_regs.h"
81#include "tpc4_rtr_regs.h"
82#include "tpc5_rtr_regs.h"
83#include "tpc6_rtr_regs.h"
84#include "tpc7_nrtr_regs.h"
85#include "tpc0_eml_cfg_regs.h"
86
87#include "psoc_global_conf_masks.h"
88#include "dma_macro_masks.h"
89#include "dma_qm_0_masks.h"
90#include "tpc0_qm_masks.h"
91#include "tpc0_cmdq_masks.h"
92#include "mme_qm_masks.h"
93#include "mme_cmdq_masks.h"
94#include "tpc0_cfg_masks.h"
95#include "tpc0_eml_cfg_masks.h"
96#include "mme1_rtr_masks.h"
97#include "tpc0_nrtr_masks.h"
98#include "dma_nrtr_masks.h"
99#include "pci_nrtr_masks.h"
100#include "stlb_masks.h"
101#include "cpu_ca53_cfg_masks.h"
102#include "mmu_masks.h"
103#include "mme_masks.h"
104
105#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
106#define mmPCIE_DBI_MSIX_DOORBELL_OFF 0xC02948
107
108#define mmSYNC_MNGR_MON_PAY_ADDRL_0 0x113000
109#define mmSYNC_MNGR_SOB_OBJ_0 0x112000
110#define mmSYNC_MNGR_SOB_OBJ_1000 0x112FA0
111#define mmSYNC_MNGR_SOB_OBJ_1007 0x112FBC
112#define mmSYNC_MNGR_SOB_OBJ_1023 0x112FFC
113#define mmSYNC_MNGR_MON_STATUS_0 0x114000
114#define mmSYNC_MNGR_MON_STATUS_255 0x1143FC
115
116#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040
117
118#endif /* ASIC_REG_GOYA_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
new file mode 100644
index 000000000000..0a743817aad7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_IC_PLL_REGS_H_
14#define ASIC_REG_IC_PLL_REGS_H_
15
16/*
17 *****************************************
18 * IC_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmIC_PLL_NR 0x4A3100
23
24#define mmIC_PLL_NF 0x4A3104
25
26#define mmIC_PLL_OD 0x4A3108
27
28#define mmIC_PLL_NB 0x4A310C
29
30#define mmIC_PLL_CFG 0x4A3110
31
32#define mmIC_PLL_LOSE_MASK 0x4A3120
33
34#define mmIC_PLL_LOCK_INTR 0x4A3128
35
36#define mmIC_PLL_LOCK_BYPASS 0x4A312C
37
38#define mmIC_PLL_DATA_CHNG 0x4A3130
39
40#define mmIC_PLL_RST 0x4A3134
41
42#define mmIC_PLL_SLIP_WD_CNTR 0x4A3150
43
44#define mmIC_PLL_DIV_FACTOR_0 0x4A3200
45
46#define mmIC_PLL_DIV_FACTOR_1 0x4A3204
47
48#define mmIC_PLL_DIV_FACTOR_2 0x4A3208
49
50#define mmIC_PLL_DIV_FACTOR_3 0x4A320C
51
52#define mmIC_PLL_DIV_FACTOR_CMD_0 0x4A3220
53
54#define mmIC_PLL_DIV_FACTOR_CMD_1 0x4A3224
55
56#define mmIC_PLL_DIV_FACTOR_CMD_2 0x4A3228
57
58#define mmIC_PLL_DIV_FACTOR_CMD_3 0x4A322C
59
60#define mmIC_PLL_DIV_SEL_0 0x4A3280
61
62#define mmIC_PLL_DIV_SEL_1 0x4A3284
63
64#define mmIC_PLL_DIV_SEL_2 0x4A3288
65
66#define mmIC_PLL_DIV_SEL_3 0x4A328C
67
68#define mmIC_PLL_DIV_EN_0 0x4A32A0
69
70#define mmIC_PLL_DIV_EN_1 0x4A32A4
71
72#define mmIC_PLL_DIV_EN_2 0x4A32A8
73
74#define mmIC_PLL_DIV_EN_3 0x4A32AC
75
76#define mmIC_PLL_DIV_FACTOR_BUSY_0 0x4A32C0
77
78#define mmIC_PLL_DIV_FACTOR_BUSY_1 0x4A32C4
79
80#define mmIC_PLL_DIV_FACTOR_BUSY_2 0x4A32C8
81
82#define mmIC_PLL_DIV_FACTOR_BUSY_3 0x4A32CC
83
84#define mmIC_PLL_CLK_GATER 0x4A3300
85
86#define mmIC_PLL_CLK_RLX_0 0x4A3310
87
88#define mmIC_PLL_CLK_RLX_1 0x4A3314
89
90#define mmIC_PLL_CLK_RLX_2 0x4A3318
91
92#define mmIC_PLL_CLK_RLX_3 0x4A331C
93
94#define mmIC_PLL_REF_CNTR_PERIOD 0x4A3400
95
96#define mmIC_PLL_REF_LOW_THRESHOLD 0x4A3410
97
98#define mmIC_PLL_REF_HIGH_THRESHOLD 0x4A3420
99
100#define mmIC_PLL_PLL_NOT_STABLE 0x4A3430
101
102#define mmIC_PLL_FREQ_CALC_EN 0x4A3440
103
104#endif /* ASIC_REG_IC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
new file mode 100644
index 000000000000..4408188aa067
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MC_PLL_REGS_H_
14#define ASIC_REG_MC_PLL_REGS_H_
15
16/*
17 *****************************************
18 * MC_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmMC_PLL_NR 0x4A1100
23
24#define mmMC_PLL_NF 0x4A1104
25
26#define mmMC_PLL_OD 0x4A1108
27
28#define mmMC_PLL_NB 0x4A110C
29
30#define mmMC_PLL_CFG 0x4A1110
31
32#define mmMC_PLL_LOSE_MASK 0x4A1120
33
34#define mmMC_PLL_LOCK_INTR 0x4A1128
35
36#define mmMC_PLL_LOCK_BYPASS 0x4A112C
37
38#define mmMC_PLL_DATA_CHNG 0x4A1130
39
40#define mmMC_PLL_RST 0x4A1134
41
42#define mmMC_PLL_SLIP_WD_CNTR 0x4A1150
43
44#define mmMC_PLL_DIV_FACTOR_0 0x4A1200
45
46#define mmMC_PLL_DIV_FACTOR_1 0x4A1204
47
48#define mmMC_PLL_DIV_FACTOR_2 0x4A1208
49
50#define mmMC_PLL_DIV_FACTOR_3 0x4A120C
51
52#define mmMC_PLL_DIV_FACTOR_CMD_0 0x4A1220
53
54#define mmMC_PLL_DIV_FACTOR_CMD_1 0x4A1224
55
56#define mmMC_PLL_DIV_FACTOR_CMD_2 0x4A1228
57
58#define mmMC_PLL_DIV_FACTOR_CMD_3 0x4A122C
59
60#define mmMC_PLL_DIV_SEL_0 0x4A1280
61
62#define mmMC_PLL_DIV_SEL_1 0x4A1284
63
64#define mmMC_PLL_DIV_SEL_2 0x4A1288
65
66#define mmMC_PLL_DIV_SEL_3 0x4A128C
67
68#define mmMC_PLL_DIV_EN_0 0x4A12A0
69
70#define mmMC_PLL_DIV_EN_1 0x4A12A4
71
72#define mmMC_PLL_DIV_EN_2 0x4A12A8
73
74#define mmMC_PLL_DIV_EN_3 0x4A12AC
75
76#define mmMC_PLL_DIV_FACTOR_BUSY_0 0x4A12C0
77
78#define mmMC_PLL_DIV_FACTOR_BUSY_1 0x4A12C4
79
80#define mmMC_PLL_DIV_FACTOR_BUSY_2 0x4A12C8
81
82#define mmMC_PLL_DIV_FACTOR_BUSY_3 0x4A12CC
83
84#define mmMC_PLL_CLK_GATER 0x4A1300
85
86#define mmMC_PLL_CLK_RLX_0 0x4A1310
87
88#define mmMC_PLL_CLK_RLX_1 0x4A1314
89
90#define mmMC_PLL_CLK_RLX_2 0x4A1318
91
92#define mmMC_PLL_CLK_RLX_3 0x4A131C
93
94#define mmMC_PLL_REF_CNTR_PERIOD 0x4A1400
95
96#define mmMC_PLL_REF_LOW_THRESHOLD 0x4A1410
97
98#define mmMC_PLL_REF_HIGH_THRESHOLD 0x4A1420
99
100#define mmMC_PLL_PLL_NOT_STABLE 0x4A1430
101
102#define mmMC_PLL_FREQ_CALC_EN 0x4A1440
103
104#endif /* ASIC_REG_MC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
new file mode 100644
index 000000000000..687bca5c5fe3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
@@ -0,0 +1,653 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME1_RTR_MASKS_H_
14#define ASIC_REG_MME1_RTR_MASKS_H_
15
16/*
17 *****************************************
18 * MME1_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22/* MME1_RTR_HBW_RD_RQ_E_ARB */
23#define MME1_RTR_HBW_RD_RQ_E_ARB_W_SHIFT 0
24#define MME1_RTR_HBW_RD_RQ_E_ARB_W_MASK 0x7
25#define MME1_RTR_HBW_RD_RQ_E_ARB_S_SHIFT 8
26#define MME1_RTR_HBW_RD_RQ_E_ARB_S_MASK 0x700
27#define MME1_RTR_HBW_RD_RQ_E_ARB_N_SHIFT 16
28#define MME1_RTR_HBW_RD_RQ_E_ARB_N_MASK 0x70000
29#define MME1_RTR_HBW_RD_RQ_E_ARB_L_SHIFT 24
30#define MME1_RTR_HBW_RD_RQ_E_ARB_L_MASK 0x7000000
31
32/* MME1_RTR_HBW_RD_RQ_W_ARB */
33#define MME1_RTR_HBW_RD_RQ_W_ARB_E_SHIFT 0
34#define MME1_RTR_HBW_RD_RQ_W_ARB_E_MASK 0x7
35#define MME1_RTR_HBW_RD_RQ_W_ARB_S_SHIFT 8
36#define MME1_RTR_HBW_RD_RQ_W_ARB_S_MASK 0x700
37#define MME1_RTR_HBW_RD_RQ_W_ARB_N_SHIFT 16
38#define MME1_RTR_HBW_RD_RQ_W_ARB_N_MASK 0x70000
39#define MME1_RTR_HBW_RD_RQ_W_ARB_L_SHIFT 24
40#define MME1_RTR_HBW_RD_RQ_W_ARB_L_MASK 0x7000000
41
42/* MME1_RTR_HBW_RD_RQ_N_ARB */
43#define MME1_RTR_HBW_RD_RQ_N_ARB_W_SHIFT 0
44#define MME1_RTR_HBW_RD_RQ_N_ARB_W_MASK 0x7
45#define MME1_RTR_HBW_RD_RQ_N_ARB_E_SHIFT 8
46#define MME1_RTR_HBW_RD_RQ_N_ARB_E_MASK 0x700
47#define MME1_RTR_HBW_RD_RQ_N_ARB_S_SHIFT 16
48#define MME1_RTR_HBW_RD_RQ_N_ARB_S_MASK 0x70000
49#define MME1_RTR_HBW_RD_RQ_N_ARB_L_SHIFT 24
50#define MME1_RTR_HBW_RD_RQ_N_ARB_L_MASK 0x7000000
51
52/* MME1_RTR_HBW_RD_RQ_S_ARB */
53#define MME1_RTR_HBW_RD_RQ_S_ARB_W_SHIFT 0
54#define MME1_RTR_HBW_RD_RQ_S_ARB_W_MASK 0x7
55#define MME1_RTR_HBW_RD_RQ_S_ARB_E_SHIFT 8
56#define MME1_RTR_HBW_RD_RQ_S_ARB_E_MASK 0x700
57#define MME1_RTR_HBW_RD_RQ_S_ARB_N_SHIFT 16
58#define MME1_RTR_HBW_RD_RQ_S_ARB_N_MASK 0x70000
59#define MME1_RTR_HBW_RD_RQ_S_ARB_L_SHIFT 24
60#define MME1_RTR_HBW_RD_RQ_S_ARB_L_MASK 0x7000000
61
62/* MME1_RTR_HBW_RD_RQ_L_ARB */
63#define MME1_RTR_HBW_RD_RQ_L_ARB_W_SHIFT 0
64#define MME1_RTR_HBW_RD_RQ_L_ARB_W_MASK 0x7
65#define MME1_RTR_HBW_RD_RQ_L_ARB_E_SHIFT 8
66#define MME1_RTR_HBW_RD_RQ_L_ARB_E_MASK 0x700
67#define MME1_RTR_HBW_RD_RQ_L_ARB_S_SHIFT 16
68#define MME1_RTR_HBW_RD_RQ_L_ARB_S_MASK 0x70000
69#define MME1_RTR_HBW_RD_RQ_L_ARB_N_SHIFT 24
70#define MME1_RTR_HBW_RD_RQ_L_ARB_N_MASK 0x7000000
71
72/* MME1_RTR_HBW_E_ARB_MAX */
73#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_SHIFT 0
74#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_MASK 0x3F
75
76/* MME1_RTR_HBW_W_ARB_MAX */
77#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_SHIFT 0
78#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_MASK 0x3F
79
80/* MME1_RTR_HBW_N_ARB_MAX */
81#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_SHIFT 0
82#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_MASK 0x3F
83
84/* MME1_RTR_HBW_S_ARB_MAX */
85#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_SHIFT 0
86#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_MASK 0x3F
87
88/* MME1_RTR_HBW_L_ARB_MAX */
89#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_SHIFT 0
90#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_MASK 0x3F
91
92/* MME1_RTR_HBW_RD_RS_MAX_CREDIT */
93#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_SHIFT 0
94#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_MASK 0x3F
95#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_SHIFT 8
96#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_MASK 0x3F00
97
98/* MME1_RTR_HBW_WR_RQ_MAX_CREDIT */
99#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_SHIFT 0
100#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_MASK 0x3F
101
102/* MME1_RTR_HBW_RD_RQ_MAX_CREDIT */
103#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_SHIFT 0
104#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_MASK 0x3F
105#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_SHIFT 8
106#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_MASK 0x3F00
107#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_SHIFT 16
108#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_MASK 0x3F0000
109
110/* MME1_RTR_HBW_RD_RS_E_ARB */
111#define MME1_RTR_HBW_RD_RS_E_ARB_W_SHIFT 0
112#define MME1_RTR_HBW_RD_RS_E_ARB_W_MASK 0x7
113#define MME1_RTR_HBW_RD_RS_E_ARB_S_SHIFT 8
114#define MME1_RTR_HBW_RD_RS_E_ARB_S_MASK 0x700
115#define MME1_RTR_HBW_RD_RS_E_ARB_N_SHIFT 16
116#define MME1_RTR_HBW_RD_RS_E_ARB_N_MASK 0x70000
117#define MME1_RTR_HBW_RD_RS_E_ARB_L_SHIFT 24
118#define MME1_RTR_HBW_RD_RS_E_ARB_L_MASK 0x7000000
119
120/* MME1_RTR_HBW_RD_RS_W_ARB */
121#define MME1_RTR_HBW_RD_RS_W_ARB_E_SHIFT 0
122#define MME1_RTR_HBW_RD_RS_W_ARB_E_MASK 0x7
123#define MME1_RTR_HBW_RD_RS_W_ARB_S_SHIFT 8
124#define MME1_RTR_HBW_RD_RS_W_ARB_S_MASK 0x700
125#define MME1_RTR_HBW_RD_RS_W_ARB_N_SHIFT 16
126#define MME1_RTR_HBW_RD_RS_W_ARB_N_MASK 0x70000
127#define MME1_RTR_HBW_RD_RS_W_ARB_L_SHIFT 24
128#define MME1_RTR_HBW_RD_RS_W_ARB_L_MASK 0x7000000
129
130/* MME1_RTR_HBW_RD_RS_N_ARB */
131#define MME1_RTR_HBW_RD_RS_N_ARB_W_SHIFT 0
132#define MME1_RTR_HBW_RD_RS_N_ARB_W_MASK 0x7
133#define MME1_RTR_HBW_RD_RS_N_ARB_E_SHIFT 8
134#define MME1_RTR_HBW_RD_RS_N_ARB_E_MASK 0x700
135#define MME1_RTR_HBW_RD_RS_N_ARB_S_SHIFT 16
136#define MME1_RTR_HBW_RD_RS_N_ARB_S_MASK 0x70000
137#define MME1_RTR_HBW_RD_RS_N_ARB_L_SHIFT 24
138#define MME1_RTR_HBW_RD_RS_N_ARB_L_MASK 0x7000000
139
140/* MME1_RTR_HBW_RD_RS_S_ARB */
141#define MME1_RTR_HBW_RD_RS_S_ARB_W_SHIFT 0
142#define MME1_RTR_HBW_RD_RS_S_ARB_W_MASK 0x7
143#define MME1_RTR_HBW_RD_RS_S_ARB_E_SHIFT 8
144#define MME1_RTR_HBW_RD_RS_S_ARB_E_MASK 0x700
145#define MME1_RTR_HBW_RD_RS_S_ARB_N_SHIFT 16
146#define MME1_RTR_HBW_RD_RS_S_ARB_N_MASK 0x70000
147#define MME1_RTR_HBW_RD_RS_S_ARB_L_SHIFT 24
148#define MME1_RTR_HBW_RD_RS_S_ARB_L_MASK 0x7000000
149
150/* MME1_RTR_HBW_RD_RS_L_ARB */
151#define MME1_RTR_HBW_RD_RS_L_ARB_W_SHIFT 0
152#define MME1_RTR_HBW_RD_RS_L_ARB_W_MASK 0x7
153#define MME1_RTR_HBW_RD_RS_L_ARB_E_SHIFT 8
154#define MME1_RTR_HBW_RD_RS_L_ARB_E_MASK 0x700
155#define MME1_RTR_HBW_RD_RS_L_ARB_S_SHIFT 16
156#define MME1_RTR_HBW_RD_RS_L_ARB_S_MASK 0x70000
157#define MME1_RTR_HBW_RD_RS_L_ARB_N_SHIFT 24
158#define MME1_RTR_HBW_RD_RS_L_ARB_N_MASK 0x7000000
159
160/* MME1_RTR_HBW_WR_RQ_E_ARB */
161#define MME1_RTR_HBW_WR_RQ_E_ARB_W_SHIFT 0
162#define MME1_RTR_HBW_WR_RQ_E_ARB_W_MASK 0x7
163#define MME1_RTR_HBW_WR_RQ_E_ARB_S_SHIFT 8
164#define MME1_RTR_HBW_WR_RQ_E_ARB_S_MASK 0x700
165#define MME1_RTR_HBW_WR_RQ_E_ARB_N_SHIFT 16
166#define MME1_RTR_HBW_WR_RQ_E_ARB_N_MASK 0x70000
167#define MME1_RTR_HBW_WR_RQ_E_ARB_L_SHIFT 24
168#define MME1_RTR_HBW_WR_RQ_E_ARB_L_MASK 0x7000000
169
170/* MME1_RTR_HBW_WR_RQ_W_ARB */
171#define MME1_RTR_HBW_WR_RQ_W_ARB_E_SHIFT 0
172#define MME1_RTR_HBW_WR_RQ_W_ARB_E_MASK 0x7
173#define MME1_RTR_HBW_WR_RQ_W_ARB_S_SHIFT 8
174#define MME1_RTR_HBW_WR_RQ_W_ARB_S_MASK 0x700
175#define MME1_RTR_HBW_WR_RQ_W_ARB_N_SHIFT 16
176#define MME1_RTR_HBW_WR_RQ_W_ARB_N_MASK 0x70000
177#define MME1_RTR_HBW_WR_RQ_W_ARB_L_SHIFT 24
178#define MME1_RTR_HBW_WR_RQ_W_ARB_L_MASK 0x7000000
179
180/* MME1_RTR_HBW_WR_RQ_N_ARB */
181#define MME1_RTR_HBW_WR_RQ_N_ARB_W_SHIFT 0
182#define MME1_RTR_HBW_WR_RQ_N_ARB_W_MASK 0x7
183#define MME1_RTR_HBW_WR_RQ_N_ARB_E_SHIFT 8
184#define MME1_RTR_HBW_WR_RQ_N_ARB_E_MASK 0x700
185#define MME1_RTR_HBW_WR_RQ_N_ARB_S_SHIFT 16
186#define MME1_RTR_HBW_WR_RQ_N_ARB_S_MASK 0x70000
187#define MME1_RTR_HBW_WR_RQ_N_ARB_L_SHIFT 24
188#define MME1_RTR_HBW_WR_RQ_N_ARB_L_MASK 0x7000000
189
190/* MME1_RTR_HBW_WR_RQ_S_ARB */
191#define MME1_RTR_HBW_WR_RQ_S_ARB_W_SHIFT 0
192#define MME1_RTR_HBW_WR_RQ_S_ARB_W_MASK 0x7
193#define MME1_RTR_HBW_WR_RQ_S_ARB_E_SHIFT 8
194#define MME1_RTR_HBW_WR_RQ_S_ARB_E_MASK 0x700
195#define MME1_RTR_HBW_WR_RQ_S_ARB_N_SHIFT 16
196#define MME1_RTR_HBW_WR_RQ_S_ARB_N_MASK 0x70000
197#define MME1_RTR_HBW_WR_RQ_S_ARB_L_SHIFT 24
198#define MME1_RTR_HBW_WR_RQ_S_ARB_L_MASK 0x7000000
199
200/* MME1_RTR_HBW_WR_RQ_L_ARB */
201#define MME1_RTR_HBW_WR_RQ_L_ARB_W_SHIFT 0
202#define MME1_RTR_HBW_WR_RQ_L_ARB_W_MASK 0x7
203#define MME1_RTR_HBW_WR_RQ_L_ARB_E_SHIFT 8
204#define MME1_RTR_HBW_WR_RQ_L_ARB_E_MASK 0x700
205#define MME1_RTR_HBW_WR_RQ_L_ARB_S_SHIFT 16
206#define MME1_RTR_HBW_WR_RQ_L_ARB_S_MASK 0x70000
207#define MME1_RTR_HBW_WR_RQ_L_ARB_N_SHIFT 24
208#define MME1_RTR_HBW_WR_RQ_L_ARB_N_MASK 0x7000000
209
210/* MME1_RTR_HBW_WR_RS_E_ARB */
211#define MME1_RTR_HBW_WR_RS_E_ARB_W_SHIFT 0
212#define MME1_RTR_HBW_WR_RS_E_ARB_W_MASK 0x7
213#define MME1_RTR_HBW_WR_RS_E_ARB_S_SHIFT 8
214#define MME1_RTR_HBW_WR_RS_E_ARB_S_MASK 0x700
215#define MME1_RTR_HBW_WR_RS_E_ARB_N_SHIFT 16
216#define MME1_RTR_HBW_WR_RS_E_ARB_N_MASK 0x70000
217#define MME1_RTR_HBW_WR_RS_E_ARB_L_SHIFT 24
218#define MME1_RTR_HBW_WR_RS_E_ARB_L_MASK 0x7000000
219
220/* MME1_RTR_HBW_WR_RS_W_ARB */
221#define MME1_RTR_HBW_WR_RS_W_ARB_E_SHIFT 0
222#define MME1_RTR_HBW_WR_RS_W_ARB_E_MASK 0x7
223#define MME1_RTR_HBW_WR_RS_W_ARB_S_SHIFT 8
224#define MME1_RTR_HBW_WR_RS_W_ARB_S_MASK 0x700
225#define MME1_RTR_HBW_WR_RS_W_ARB_N_SHIFT 16
226#define MME1_RTR_HBW_WR_RS_W_ARB_N_MASK 0x70000
227#define MME1_RTR_HBW_WR_RS_W_ARB_L_SHIFT 24
228#define MME1_RTR_HBW_WR_RS_W_ARB_L_MASK 0x7000000
229
230/* MME1_RTR_HBW_WR_RS_N_ARB */
231#define MME1_RTR_HBW_WR_RS_N_ARB_W_SHIFT 0
232#define MME1_RTR_HBW_WR_RS_N_ARB_W_MASK 0x7
233#define MME1_RTR_HBW_WR_RS_N_ARB_E_SHIFT 8
234#define MME1_RTR_HBW_WR_RS_N_ARB_E_MASK 0x700
235#define MME1_RTR_HBW_WR_RS_N_ARB_S_SHIFT 16
236#define MME1_RTR_HBW_WR_RS_N_ARB_S_MASK 0x70000
237#define MME1_RTR_HBW_WR_RS_N_ARB_L_SHIFT 24
238#define MME1_RTR_HBW_WR_RS_N_ARB_L_MASK 0x7000000
239
240/* MME1_RTR_HBW_WR_RS_S_ARB */
241#define MME1_RTR_HBW_WR_RS_S_ARB_W_SHIFT 0
242#define MME1_RTR_HBW_WR_RS_S_ARB_W_MASK 0x7
243#define MME1_RTR_HBW_WR_RS_S_ARB_E_SHIFT 8
244#define MME1_RTR_HBW_WR_RS_S_ARB_E_MASK 0x700
245#define MME1_RTR_HBW_WR_RS_S_ARB_N_SHIFT 16
246#define MME1_RTR_HBW_WR_RS_S_ARB_N_MASK 0x70000
247#define MME1_RTR_HBW_WR_RS_S_ARB_L_SHIFT 24
248#define MME1_RTR_HBW_WR_RS_S_ARB_L_MASK 0x7000000
249
250/* MME1_RTR_HBW_WR_RS_L_ARB */
251#define MME1_RTR_HBW_WR_RS_L_ARB_W_SHIFT 0
252#define MME1_RTR_HBW_WR_RS_L_ARB_W_MASK 0x7
253#define MME1_RTR_HBW_WR_RS_L_ARB_E_SHIFT 8
254#define MME1_RTR_HBW_WR_RS_L_ARB_E_MASK 0x700
255#define MME1_RTR_HBW_WR_RS_L_ARB_S_SHIFT 16
256#define MME1_RTR_HBW_WR_RS_L_ARB_S_MASK 0x70000
257#define MME1_RTR_HBW_WR_RS_L_ARB_N_SHIFT 24
258#define MME1_RTR_HBW_WR_RS_L_ARB_N_MASK 0x7000000
259
260/* MME1_RTR_LBW_RD_RQ_E_ARB */
261#define MME1_RTR_LBW_RD_RQ_E_ARB_W_SHIFT 0
262#define MME1_RTR_LBW_RD_RQ_E_ARB_W_MASK 0x7
263#define MME1_RTR_LBW_RD_RQ_E_ARB_S_SHIFT 8
264#define MME1_RTR_LBW_RD_RQ_E_ARB_S_MASK 0x700
265#define MME1_RTR_LBW_RD_RQ_E_ARB_N_SHIFT 16
266#define MME1_RTR_LBW_RD_RQ_E_ARB_N_MASK 0x70000
267#define MME1_RTR_LBW_RD_RQ_E_ARB_L_SHIFT 24
268#define MME1_RTR_LBW_RD_RQ_E_ARB_L_MASK 0x7000000
269
270/* MME1_RTR_LBW_RD_RQ_W_ARB */
271#define MME1_RTR_LBW_RD_RQ_W_ARB_E_SHIFT 0
272#define MME1_RTR_LBW_RD_RQ_W_ARB_E_MASK 0x7
273#define MME1_RTR_LBW_RD_RQ_W_ARB_S_SHIFT 8
274#define MME1_RTR_LBW_RD_RQ_W_ARB_S_MASK 0x700
275#define MME1_RTR_LBW_RD_RQ_W_ARB_N_SHIFT 16
276#define MME1_RTR_LBW_RD_RQ_W_ARB_N_MASK 0x70000
277#define MME1_RTR_LBW_RD_RQ_W_ARB_L_SHIFT 24
278#define MME1_RTR_LBW_RD_RQ_W_ARB_L_MASK 0x7000000
279
280/* MME1_RTR_LBW_RD_RQ_N_ARB */
281#define MME1_RTR_LBW_RD_RQ_N_ARB_W_SHIFT 0
282#define MME1_RTR_LBW_RD_RQ_N_ARB_W_MASK 0x7
283#define MME1_RTR_LBW_RD_RQ_N_ARB_E_SHIFT 8
284#define MME1_RTR_LBW_RD_RQ_N_ARB_E_MASK 0x700
285#define MME1_RTR_LBW_RD_RQ_N_ARB_S_SHIFT 16
286#define MME1_RTR_LBW_RD_RQ_N_ARB_S_MASK 0x70000
287#define MME1_RTR_LBW_RD_RQ_N_ARB_L_SHIFT 24
288#define MME1_RTR_LBW_RD_RQ_N_ARB_L_MASK 0x7000000
289
290/* MME1_RTR_LBW_RD_RQ_S_ARB */
291#define MME1_RTR_LBW_RD_RQ_S_ARB_W_SHIFT 0
292#define MME1_RTR_LBW_RD_RQ_S_ARB_W_MASK 0x7
293#define MME1_RTR_LBW_RD_RQ_S_ARB_E_SHIFT 8
294#define MME1_RTR_LBW_RD_RQ_S_ARB_E_MASK 0x700
295#define MME1_RTR_LBW_RD_RQ_S_ARB_N_SHIFT 16
296#define MME1_RTR_LBW_RD_RQ_S_ARB_N_MASK 0x70000
297#define MME1_RTR_LBW_RD_RQ_S_ARB_L_SHIFT 24
298#define MME1_RTR_LBW_RD_RQ_S_ARB_L_MASK 0x7000000
299
300/* MME1_RTR_LBW_RD_RQ_L_ARB */
301#define MME1_RTR_LBW_RD_RQ_L_ARB_W_SHIFT 0
302#define MME1_RTR_LBW_RD_RQ_L_ARB_W_MASK 0x7
303#define MME1_RTR_LBW_RD_RQ_L_ARB_E_SHIFT 8
304#define MME1_RTR_LBW_RD_RQ_L_ARB_E_MASK 0x700
305#define MME1_RTR_LBW_RD_RQ_L_ARB_S_SHIFT 16
306#define MME1_RTR_LBW_RD_RQ_L_ARB_S_MASK 0x70000
307#define MME1_RTR_LBW_RD_RQ_L_ARB_N_SHIFT 24
308#define MME1_RTR_LBW_RD_RQ_L_ARB_N_MASK 0x7000000
309
310/* MME1_RTR_LBW_E_ARB_MAX */
311#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_SHIFT 0
312#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_MASK 0x3F
313
314/* MME1_RTR_LBW_W_ARB_MAX */
315#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_SHIFT 0
316#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_MASK 0x3F
317
318/* MME1_RTR_LBW_N_ARB_MAX */
319#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_SHIFT 0
320#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_MASK 0x3F
321
322/* MME1_RTR_LBW_S_ARB_MAX */
323#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_SHIFT 0
324#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_MASK 0x3F
325
326/* MME1_RTR_LBW_L_ARB_MAX */
327#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_SHIFT 0
328#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_MASK 0x3F
329
330/* MME1_RTR_LBW_SRAM_MAX_CREDIT */
331#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_SHIFT 0
332#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_MASK 0x3F
333#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_SHIFT 8
334#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_MASK 0x3F00
335
336/* MME1_RTR_LBW_RD_RS_E_ARB */
337#define MME1_RTR_LBW_RD_RS_E_ARB_W_SHIFT 0
338#define MME1_RTR_LBW_RD_RS_E_ARB_W_MASK 0x7
339#define MME1_RTR_LBW_RD_RS_E_ARB_S_SHIFT 8
340#define MME1_RTR_LBW_RD_RS_E_ARB_S_MASK 0x700
341#define MME1_RTR_LBW_RD_RS_E_ARB_N_SHIFT 16
342#define MME1_RTR_LBW_RD_RS_E_ARB_N_MASK 0x70000
343#define MME1_RTR_LBW_RD_RS_E_ARB_L_SHIFT 24
344#define MME1_RTR_LBW_RD_RS_E_ARB_L_MASK 0x7000000
345
346/* MME1_RTR_LBW_RD_RS_W_ARB */
347#define MME1_RTR_LBW_RD_RS_W_ARB_E_SHIFT 0
348#define MME1_RTR_LBW_RD_RS_W_ARB_E_MASK 0x7
349#define MME1_RTR_LBW_RD_RS_W_ARB_S_SHIFT 8
350#define MME1_RTR_LBW_RD_RS_W_ARB_S_MASK 0x700
351#define MME1_RTR_LBW_RD_RS_W_ARB_N_SHIFT 16
352#define MME1_RTR_LBW_RD_RS_W_ARB_N_MASK 0x70000
353#define MME1_RTR_LBW_RD_RS_W_ARB_L_SHIFT 24
354#define MME1_RTR_LBW_RD_RS_W_ARB_L_MASK 0x7000000
355
356/* MME1_RTR_LBW_RD_RS_N_ARB */
357#define MME1_RTR_LBW_RD_RS_N_ARB_W_SHIFT 0
358#define MME1_RTR_LBW_RD_RS_N_ARB_W_MASK 0x7
359#define MME1_RTR_LBW_RD_RS_N_ARB_E_SHIFT 8
360#define MME1_RTR_LBW_RD_RS_N_ARB_E_MASK 0x700
361#define MME1_RTR_LBW_RD_RS_N_ARB_S_SHIFT 16
362#define MME1_RTR_LBW_RD_RS_N_ARB_S_MASK 0x70000
363#define MME1_RTR_LBW_RD_RS_N_ARB_L_SHIFT 24
364#define MME1_RTR_LBW_RD_RS_N_ARB_L_MASK 0x7000000
365
366/* MME1_RTR_LBW_RD_RS_S_ARB */
367#define MME1_RTR_LBW_RD_RS_S_ARB_W_SHIFT 0
368#define MME1_RTR_LBW_RD_RS_S_ARB_W_MASK 0x7
369#define MME1_RTR_LBW_RD_RS_S_ARB_E_SHIFT 8
370#define MME1_RTR_LBW_RD_RS_S_ARB_E_MASK 0x700
371#define MME1_RTR_LBW_RD_RS_S_ARB_N_SHIFT 16
372#define MME1_RTR_LBW_RD_RS_S_ARB_N_MASK 0x70000
373#define MME1_RTR_LBW_RD_RS_S_ARB_L_SHIFT 24
374#define MME1_RTR_LBW_RD_RS_S_ARB_L_MASK 0x7000000
375
376/* MME1_RTR_LBW_RD_RS_L_ARB */
377#define MME1_RTR_LBW_RD_RS_L_ARB_W_SHIFT 0
378#define MME1_RTR_LBW_RD_RS_L_ARB_W_MASK 0x7
379#define MME1_RTR_LBW_RD_RS_L_ARB_E_SHIFT 8
380#define MME1_RTR_LBW_RD_RS_L_ARB_E_MASK 0x700
381#define MME1_RTR_LBW_RD_RS_L_ARB_S_SHIFT 16
382#define MME1_RTR_LBW_RD_RS_L_ARB_S_MASK 0x70000
383#define MME1_RTR_LBW_RD_RS_L_ARB_N_SHIFT 24
384#define MME1_RTR_LBW_RD_RS_L_ARB_N_MASK 0x7000000
385
386/* MME1_RTR_LBW_WR_RQ_E_ARB */
387#define MME1_RTR_LBW_WR_RQ_E_ARB_W_SHIFT 0
388#define MME1_RTR_LBW_WR_RQ_E_ARB_W_MASK 0x7
389#define MME1_RTR_LBW_WR_RQ_E_ARB_S_SHIFT 8
390#define MME1_RTR_LBW_WR_RQ_E_ARB_S_MASK 0x700
391#define MME1_RTR_LBW_WR_RQ_E_ARB_N_SHIFT 16
392#define MME1_RTR_LBW_WR_RQ_E_ARB_N_MASK 0x70000
393#define MME1_RTR_LBW_WR_RQ_E_ARB_L_SHIFT 24
394#define MME1_RTR_LBW_WR_RQ_E_ARB_L_MASK 0x7000000
395
396/* MME1_RTR_LBW_WR_RQ_W_ARB */
397#define MME1_RTR_LBW_WR_RQ_W_ARB_E_SHIFT 0
398#define MME1_RTR_LBW_WR_RQ_W_ARB_E_MASK 0x7
399#define MME1_RTR_LBW_WR_RQ_W_ARB_S_SHIFT 8
400#define MME1_RTR_LBW_WR_RQ_W_ARB_S_MASK 0x700
401#define MME1_RTR_LBW_WR_RQ_W_ARB_N_SHIFT 16
402#define MME1_RTR_LBW_WR_RQ_W_ARB_N_MASK 0x70000
403#define MME1_RTR_LBW_WR_RQ_W_ARB_L_SHIFT 24
404#define MME1_RTR_LBW_WR_RQ_W_ARB_L_MASK 0x7000000
405
406/* MME1_RTR_LBW_WR_RQ_N_ARB */
407#define MME1_RTR_LBW_WR_RQ_N_ARB_W_SHIFT 0
408#define MME1_RTR_LBW_WR_RQ_N_ARB_W_MASK 0x7
409#define MME1_RTR_LBW_WR_RQ_N_ARB_E_SHIFT 8
410#define MME1_RTR_LBW_WR_RQ_N_ARB_E_MASK 0x700
411#define MME1_RTR_LBW_WR_RQ_N_ARB_S_SHIFT 16
412#define MME1_RTR_LBW_WR_RQ_N_ARB_S_MASK 0x70000
413#define MME1_RTR_LBW_WR_RQ_N_ARB_L_SHIFT 24
414#define MME1_RTR_LBW_WR_RQ_N_ARB_L_MASK 0x7000000
415
416/* MME1_RTR_LBW_WR_RQ_S_ARB */
417#define MME1_RTR_LBW_WR_RQ_S_ARB_W_SHIFT 0
418#define MME1_RTR_LBW_WR_RQ_S_ARB_W_MASK 0x7
419#define MME1_RTR_LBW_WR_RQ_S_ARB_E_SHIFT 8
420#define MME1_RTR_LBW_WR_RQ_S_ARB_E_MASK 0x700
421#define MME1_RTR_LBW_WR_RQ_S_ARB_N_SHIFT 16
422#define MME1_RTR_LBW_WR_RQ_S_ARB_N_MASK 0x70000
423#define MME1_RTR_LBW_WR_RQ_S_ARB_L_SHIFT 24
424#define MME1_RTR_LBW_WR_RQ_S_ARB_L_MASK 0x7000000
425
426/* MME1_RTR_LBW_WR_RQ_L_ARB */
427#define MME1_RTR_LBW_WR_RQ_L_ARB_W_SHIFT 0
428#define MME1_RTR_LBW_WR_RQ_L_ARB_W_MASK 0x7
429#define MME1_RTR_LBW_WR_RQ_L_ARB_E_SHIFT 8
430#define MME1_RTR_LBW_WR_RQ_L_ARB_E_MASK 0x700
431#define MME1_RTR_LBW_WR_RQ_L_ARB_S_SHIFT 16
432#define MME1_RTR_LBW_WR_RQ_L_ARB_S_MASK 0x70000
433#define MME1_RTR_LBW_WR_RQ_L_ARB_N_SHIFT 24
434#define MME1_RTR_LBW_WR_RQ_L_ARB_N_MASK 0x7000000
435
436/* MME1_RTR_LBW_WR_RS_E_ARB */
437#define MME1_RTR_LBW_WR_RS_E_ARB_W_SHIFT 0
438#define MME1_RTR_LBW_WR_RS_E_ARB_W_MASK 0x7
439#define MME1_RTR_LBW_WR_RS_E_ARB_S_SHIFT 8
440#define MME1_RTR_LBW_WR_RS_E_ARB_S_MASK 0x700
441#define MME1_RTR_LBW_WR_RS_E_ARB_N_SHIFT 16
442#define MME1_RTR_LBW_WR_RS_E_ARB_N_MASK 0x70000
443#define MME1_RTR_LBW_WR_RS_E_ARB_L_SHIFT 24
444#define MME1_RTR_LBW_WR_RS_E_ARB_L_MASK 0x7000000
445
446/* MME1_RTR_LBW_WR_RS_W_ARB */
447#define MME1_RTR_LBW_WR_RS_W_ARB_E_SHIFT 0
448#define MME1_RTR_LBW_WR_RS_W_ARB_E_MASK 0x7
449#define MME1_RTR_LBW_WR_RS_W_ARB_S_SHIFT 8
450#define MME1_RTR_LBW_WR_RS_W_ARB_S_MASK 0x700
451#define MME1_RTR_LBW_WR_RS_W_ARB_N_SHIFT 16
452#define MME1_RTR_LBW_WR_RS_W_ARB_N_MASK 0x70000
453#define MME1_RTR_LBW_WR_RS_W_ARB_L_SHIFT 24
454#define MME1_RTR_LBW_WR_RS_W_ARB_L_MASK 0x7000000
455
456/* MME1_RTR_LBW_WR_RS_N_ARB */
457#define MME1_RTR_LBW_WR_RS_N_ARB_W_SHIFT 0
458#define MME1_RTR_LBW_WR_RS_N_ARB_W_MASK 0x7
459#define MME1_RTR_LBW_WR_RS_N_ARB_E_SHIFT 8
460#define MME1_RTR_LBW_WR_RS_N_ARB_E_MASK 0x700
461#define MME1_RTR_LBW_WR_RS_N_ARB_S_SHIFT 16
462#define MME1_RTR_LBW_WR_RS_N_ARB_S_MASK 0x70000
463#define MME1_RTR_LBW_WR_RS_N_ARB_L_SHIFT 24
464#define MME1_RTR_LBW_WR_RS_N_ARB_L_MASK 0x7000000
465
466/* MME1_RTR_LBW_WR_RS_S_ARB */
467#define MME1_RTR_LBW_WR_RS_S_ARB_W_SHIFT 0
468#define MME1_RTR_LBW_WR_RS_S_ARB_W_MASK 0x7
469#define MME1_RTR_LBW_WR_RS_S_ARB_E_SHIFT 8
470#define MME1_RTR_LBW_WR_RS_S_ARB_E_MASK 0x700
471#define MME1_RTR_LBW_WR_RS_S_ARB_N_SHIFT 16
472#define MME1_RTR_LBW_WR_RS_S_ARB_N_MASK 0x70000
473#define MME1_RTR_LBW_WR_RS_S_ARB_L_SHIFT 24
474#define MME1_RTR_LBW_WR_RS_S_ARB_L_MASK 0x7000000
475
476/* MME1_RTR_LBW_WR_RS_L_ARB */
477#define MME1_RTR_LBW_WR_RS_L_ARB_W_SHIFT 0
478#define MME1_RTR_LBW_WR_RS_L_ARB_W_MASK 0x7
479#define MME1_RTR_LBW_WR_RS_L_ARB_E_SHIFT 8
480#define MME1_RTR_LBW_WR_RS_L_ARB_E_MASK 0x700
481#define MME1_RTR_LBW_WR_RS_L_ARB_S_SHIFT 16
482#define MME1_RTR_LBW_WR_RS_L_ARB_S_MASK 0x70000
483#define MME1_RTR_LBW_WR_RS_L_ARB_N_SHIFT 24
484#define MME1_RTR_LBW_WR_RS_L_ARB_N_MASK 0x7000000
485
486/* MME1_RTR_DBG_E_ARB */
487#define MME1_RTR_DBG_E_ARB_W_SHIFT 0
488#define MME1_RTR_DBG_E_ARB_W_MASK 0x7
489#define MME1_RTR_DBG_E_ARB_S_SHIFT 8
490#define MME1_RTR_DBG_E_ARB_S_MASK 0x700
491#define MME1_RTR_DBG_E_ARB_N_SHIFT 16
492#define MME1_RTR_DBG_E_ARB_N_MASK 0x70000
493#define MME1_RTR_DBG_E_ARB_L_SHIFT 24
494#define MME1_RTR_DBG_E_ARB_L_MASK 0x7000000
495
496/* MME1_RTR_DBG_W_ARB */
497#define MME1_RTR_DBG_W_ARB_E_SHIFT 0
498#define MME1_RTR_DBG_W_ARB_E_MASK 0x7
499#define MME1_RTR_DBG_W_ARB_S_SHIFT 8
500#define MME1_RTR_DBG_W_ARB_S_MASK 0x700
501#define MME1_RTR_DBG_W_ARB_N_SHIFT 16
502#define MME1_RTR_DBG_W_ARB_N_MASK 0x70000
503#define MME1_RTR_DBG_W_ARB_L_SHIFT 24
504#define MME1_RTR_DBG_W_ARB_L_MASK 0x7000000
505
506/* MME1_RTR_DBG_N_ARB */
507#define MME1_RTR_DBG_N_ARB_W_SHIFT 0
508#define MME1_RTR_DBG_N_ARB_W_MASK 0x7
509#define MME1_RTR_DBG_N_ARB_E_SHIFT 8
510#define MME1_RTR_DBG_N_ARB_E_MASK 0x700
511#define MME1_RTR_DBG_N_ARB_S_SHIFT 16
512#define MME1_RTR_DBG_N_ARB_S_MASK 0x70000
513#define MME1_RTR_DBG_N_ARB_L_SHIFT 24
514#define MME1_RTR_DBG_N_ARB_L_MASK 0x7000000
515
516/* MME1_RTR_DBG_S_ARB */
517#define MME1_RTR_DBG_S_ARB_W_SHIFT 0
518#define MME1_RTR_DBG_S_ARB_W_MASK 0x7
519#define MME1_RTR_DBG_S_ARB_E_SHIFT 8
520#define MME1_RTR_DBG_S_ARB_E_MASK 0x700
521#define MME1_RTR_DBG_S_ARB_N_SHIFT 16
522#define MME1_RTR_DBG_S_ARB_N_MASK 0x70000
523#define MME1_RTR_DBG_S_ARB_L_SHIFT 24
524#define MME1_RTR_DBG_S_ARB_L_MASK 0x7000000
525
526/* MME1_RTR_DBG_L_ARB */
527#define MME1_RTR_DBG_L_ARB_W_SHIFT 0
528#define MME1_RTR_DBG_L_ARB_W_MASK 0x7
529#define MME1_RTR_DBG_L_ARB_E_SHIFT 8
530#define MME1_RTR_DBG_L_ARB_E_MASK 0x700
531#define MME1_RTR_DBG_L_ARB_S_SHIFT 16
532#define MME1_RTR_DBG_L_ARB_S_MASK 0x70000
533#define MME1_RTR_DBG_L_ARB_N_SHIFT 24
534#define MME1_RTR_DBG_L_ARB_N_MASK 0x7000000
535
536/* MME1_RTR_DBG_E_ARB_MAX */
537#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
538#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
539
540/* MME1_RTR_DBG_W_ARB_MAX */
541#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
542#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
543
544/* MME1_RTR_DBG_N_ARB_MAX */
545#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
546#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
547
548/* MME1_RTR_DBG_S_ARB_MAX */
549#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
550#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
551
552/* MME1_RTR_DBG_L_ARB_MAX */
553#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
554#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
555
556/* MME1_RTR_SPLIT_COEF */
557#define MME1_RTR_SPLIT_COEF_VAL_SHIFT 0
558#define MME1_RTR_SPLIT_COEF_VAL_MASK 0xFFFF
559
560/* MME1_RTR_SPLIT_CFG */
561#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
562#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
563#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
564#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
565#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
566#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
567#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 4
568#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x10
569#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 5
570#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x20
571#define MME1_RTR_SPLIT_CFG_B2B_OPT_SHIFT 6
572#define MME1_RTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
573
574/* MME1_RTR_SPLIT_RD_SAT */
575#define MME1_RTR_SPLIT_RD_SAT_VAL_SHIFT 0
576#define MME1_RTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
577
578/* MME1_RTR_SPLIT_RD_RST_TOKEN */
579#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
580#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
581
582/* MME1_RTR_SPLIT_RD_TIMEOUT */
583#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
584#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
585
586/* MME1_RTR_SPLIT_WR_SAT */
587#define MME1_RTR_SPLIT_WR_SAT_VAL_SHIFT 0
588#define MME1_RTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
589
590/* MME1_RTR_WPLIT_WR_TST_TOLEN */
591#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
592#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
593
594/* MME1_RTR_SPLIT_WR_TIMEOUT */
595#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
596#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
597
598/* MME1_RTR_HBW_RANGE_HIT */
599#define MME1_RTR_HBW_RANGE_HIT_IND_SHIFT 0
600#define MME1_RTR_HBW_RANGE_HIT_IND_MASK 0xFF
601
602/* MME1_RTR_HBW_RANGE_MASK_L */
603#define MME1_RTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
604#define MME1_RTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
605
606/* MME1_RTR_HBW_RANGE_MASK_H */
607#define MME1_RTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
608#define MME1_RTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
609
610/* MME1_RTR_HBW_RANGE_BASE_L */
611#define MME1_RTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
612#define MME1_RTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
613
614/* MME1_RTR_HBW_RANGE_BASE_H */
615#define MME1_RTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
616#define MME1_RTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
617
618/* MME1_RTR_LBW_RANGE_HIT */
619#define MME1_RTR_LBW_RANGE_HIT_IND_SHIFT 0
620#define MME1_RTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
621
622/* MME1_RTR_LBW_RANGE_MASK */
623#define MME1_RTR_LBW_RANGE_MASK_VAL_SHIFT 0
624#define MME1_RTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
625
626/* MME1_RTR_LBW_RANGE_BASE */
627#define MME1_RTR_LBW_RANGE_BASE_VAL_SHIFT 0
628#define MME1_RTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
629
630/* MME1_RTR_RGLTR */
631#define MME1_RTR_RGLTR_WR_EN_SHIFT 0
632#define MME1_RTR_RGLTR_WR_EN_MASK 0x1
633#define MME1_RTR_RGLTR_RD_EN_SHIFT 4
634#define MME1_RTR_RGLTR_RD_EN_MASK 0x10
635
636/* MME1_RTR_RGLTR_WR_RESULT */
637#define MME1_RTR_RGLTR_WR_RESULT_VAL_SHIFT 0
638#define MME1_RTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
639
640/* MME1_RTR_RGLTR_RD_RESULT */
641#define MME1_RTR_RGLTR_RD_RESULT_VAL_SHIFT 0
642#define MME1_RTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
643
644/* MME1_RTR_SCRAMB_EN */
645#define MME1_RTR_SCRAMB_EN_VAL_SHIFT 0
646#define MME1_RTR_SCRAMB_EN_VAL_MASK 0x1
647
648/* MME1_RTR_NON_LIN_SCRAMB */
649#define MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT 0
650#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1
651
652#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
653
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
new file mode 100644
index 000000000000..c248339a1cbe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
@@ -0,0 +1,331 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME1_RTR_REGS_H_
14#define ASIC_REG_MME1_RTR_REGS_H_
15
16/*
17 *****************************************
18 * MME1_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22#define mmMME1_RTR_HBW_RD_RQ_E_ARB 0x40100
23
24#define mmMME1_RTR_HBW_RD_RQ_W_ARB 0x40104
25
26#define mmMME1_RTR_HBW_RD_RQ_N_ARB 0x40108
27
28#define mmMME1_RTR_HBW_RD_RQ_S_ARB 0x4010C
29
30#define mmMME1_RTR_HBW_RD_RQ_L_ARB 0x40110
31
32#define mmMME1_RTR_HBW_E_ARB_MAX 0x40120
33
34#define mmMME1_RTR_HBW_W_ARB_MAX 0x40124
35
36#define mmMME1_RTR_HBW_N_ARB_MAX 0x40128
37
38#define mmMME1_RTR_HBW_S_ARB_MAX 0x4012C
39
40#define mmMME1_RTR_HBW_L_ARB_MAX 0x40130
41
42#define mmMME1_RTR_HBW_RD_RS_MAX_CREDIT 0x40140
43
44#define mmMME1_RTR_HBW_WR_RQ_MAX_CREDIT 0x40144
45
46#define mmMME1_RTR_HBW_RD_RQ_MAX_CREDIT 0x40148
47
48#define mmMME1_RTR_HBW_RD_RS_E_ARB 0x40150
49
50#define mmMME1_RTR_HBW_RD_RS_W_ARB 0x40154
51
52#define mmMME1_RTR_HBW_RD_RS_N_ARB 0x40158
53
54#define mmMME1_RTR_HBW_RD_RS_S_ARB 0x4015C
55
56#define mmMME1_RTR_HBW_RD_RS_L_ARB 0x40160
57
58#define mmMME1_RTR_HBW_WR_RQ_E_ARB 0x40170
59
60#define mmMME1_RTR_HBW_WR_RQ_W_ARB 0x40174
61
62#define mmMME1_RTR_HBW_WR_RQ_N_ARB 0x40178
63
64#define mmMME1_RTR_HBW_WR_RQ_S_ARB 0x4017C
65
66#define mmMME1_RTR_HBW_WR_RQ_L_ARB 0x40180
67
68#define mmMME1_RTR_HBW_WR_RS_E_ARB 0x40190
69
70#define mmMME1_RTR_HBW_WR_RS_W_ARB 0x40194
71
72#define mmMME1_RTR_HBW_WR_RS_N_ARB 0x40198
73
74#define mmMME1_RTR_HBW_WR_RS_S_ARB 0x4019C
75
76#define mmMME1_RTR_HBW_WR_RS_L_ARB 0x401A0
77
78#define mmMME1_RTR_LBW_RD_RQ_E_ARB 0x40200
79
80#define mmMME1_RTR_LBW_RD_RQ_W_ARB 0x40204
81
82#define mmMME1_RTR_LBW_RD_RQ_N_ARB 0x40208
83
84#define mmMME1_RTR_LBW_RD_RQ_S_ARB 0x4020C
85
86#define mmMME1_RTR_LBW_RD_RQ_L_ARB 0x40210
87
88#define mmMME1_RTR_LBW_E_ARB_MAX 0x40220
89
90#define mmMME1_RTR_LBW_W_ARB_MAX 0x40224
91
92#define mmMME1_RTR_LBW_N_ARB_MAX 0x40228
93
94#define mmMME1_RTR_LBW_S_ARB_MAX 0x4022C
95
96#define mmMME1_RTR_LBW_L_ARB_MAX 0x40230
97
98#define mmMME1_RTR_LBW_SRAM_MAX_CREDIT 0x40240
99
100#define mmMME1_RTR_LBW_RD_RS_E_ARB 0x40250
101
102#define mmMME1_RTR_LBW_RD_RS_W_ARB 0x40254
103
104#define mmMME1_RTR_LBW_RD_RS_N_ARB 0x40258
105
106#define mmMME1_RTR_LBW_RD_RS_S_ARB 0x4025C
107
108#define mmMME1_RTR_LBW_RD_RS_L_ARB 0x40260
109
110#define mmMME1_RTR_LBW_WR_RQ_E_ARB 0x40270
111
112#define mmMME1_RTR_LBW_WR_RQ_W_ARB 0x40274
113
114#define mmMME1_RTR_LBW_WR_RQ_N_ARB 0x40278
115
116#define mmMME1_RTR_LBW_WR_RQ_S_ARB 0x4027C
117
118#define mmMME1_RTR_LBW_WR_RQ_L_ARB 0x40280
119
120#define mmMME1_RTR_LBW_WR_RS_E_ARB 0x40290
121
122#define mmMME1_RTR_LBW_WR_RS_W_ARB 0x40294
123
124#define mmMME1_RTR_LBW_WR_RS_N_ARB 0x40298
125
126#define mmMME1_RTR_LBW_WR_RS_S_ARB 0x4029C
127
128#define mmMME1_RTR_LBW_WR_RS_L_ARB 0x402A0
129
130#define mmMME1_RTR_DBG_E_ARB 0x40300
131
132#define mmMME1_RTR_DBG_W_ARB 0x40304
133
134#define mmMME1_RTR_DBG_N_ARB 0x40308
135
136#define mmMME1_RTR_DBG_S_ARB 0x4030C
137
138#define mmMME1_RTR_DBG_L_ARB 0x40310
139
140#define mmMME1_RTR_DBG_E_ARB_MAX 0x40320
141
142#define mmMME1_RTR_DBG_W_ARB_MAX 0x40324
143
144#define mmMME1_RTR_DBG_N_ARB_MAX 0x40328
145
146#define mmMME1_RTR_DBG_S_ARB_MAX 0x4032C
147
148#define mmMME1_RTR_DBG_L_ARB_MAX 0x40330
149
150#define mmMME1_RTR_SPLIT_COEF_0 0x40400
151
152#define mmMME1_RTR_SPLIT_COEF_1 0x40404
153
154#define mmMME1_RTR_SPLIT_COEF_2 0x40408
155
156#define mmMME1_RTR_SPLIT_COEF_3 0x4040C
157
158#define mmMME1_RTR_SPLIT_COEF_4 0x40410
159
160#define mmMME1_RTR_SPLIT_COEF_5 0x40414
161
162#define mmMME1_RTR_SPLIT_COEF_6 0x40418
163
164#define mmMME1_RTR_SPLIT_COEF_7 0x4041C
165
166#define mmMME1_RTR_SPLIT_COEF_8 0x40420
167
168#define mmMME1_RTR_SPLIT_COEF_9 0x40424
169
170#define mmMME1_RTR_SPLIT_CFG 0x40440
171
172#define mmMME1_RTR_SPLIT_RD_SAT 0x40444
173
174#define mmMME1_RTR_SPLIT_RD_RST_TOKEN 0x40448
175
176#define mmMME1_RTR_SPLIT_RD_TIMEOUT_0 0x4044C
177
178#define mmMME1_RTR_SPLIT_RD_TIMEOUT_1 0x40450
179
180#define mmMME1_RTR_SPLIT_WR_SAT 0x40454
181
182#define mmMME1_RTR_WPLIT_WR_TST_TOLEN 0x40458
183
184#define mmMME1_RTR_SPLIT_WR_TIMEOUT_0 0x4045C
185
186#define mmMME1_RTR_SPLIT_WR_TIMEOUT_1 0x40460
187
188#define mmMME1_RTR_HBW_RANGE_HIT 0x40470
189
190#define mmMME1_RTR_HBW_RANGE_MASK_L_0 0x40480
191
192#define mmMME1_RTR_HBW_RANGE_MASK_L_1 0x40484
193
194#define mmMME1_RTR_HBW_RANGE_MASK_L_2 0x40488
195
196#define mmMME1_RTR_HBW_RANGE_MASK_L_3 0x4048C
197
198#define mmMME1_RTR_HBW_RANGE_MASK_L_4 0x40490
199
200#define mmMME1_RTR_HBW_RANGE_MASK_L_5 0x40494
201
202#define mmMME1_RTR_HBW_RANGE_MASK_L_6 0x40498
203
204#define mmMME1_RTR_HBW_RANGE_MASK_L_7 0x4049C
205
206#define mmMME1_RTR_HBW_RANGE_MASK_H_0 0x404A0
207
208#define mmMME1_RTR_HBW_RANGE_MASK_H_1 0x404A4
209
210#define mmMME1_RTR_HBW_RANGE_MASK_H_2 0x404A8
211
212#define mmMME1_RTR_HBW_RANGE_MASK_H_3 0x404AC
213
214#define mmMME1_RTR_HBW_RANGE_MASK_H_4 0x404B0
215
216#define mmMME1_RTR_HBW_RANGE_MASK_H_5 0x404B4
217
218#define mmMME1_RTR_HBW_RANGE_MASK_H_6 0x404B8
219
220#define mmMME1_RTR_HBW_RANGE_MASK_H_7 0x404BC
221
222#define mmMME1_RTR_HBW_RANGE_BASE_L_0 0x404C0
223
224#define mmMME1_RTR_HBW_RANGE_BASE_L_1 0x404C4
225
226#define mmMME1_RTR_HBW_RANGE_BASE_L_2 0x404C8
227
228#define mmMME1_RTR_HBW_RANGE_BASE_L_3 0x404CC
229
230#define mmMME1_RTR_HBW_RANGE_BASE_L_4 0x404D0
231
232#define mmMME1_RTR_HBW_RANGE_BASE_L_5 0x404D4
233
234#define mmMME1_RTR_HBW_RANGE_BASE_L_6 0x404D8
235
236#define mmMME1_RTR_HBW_RANGE_BASE_L_7 0x404DC
237
238#define mmMME1_RTR_HBW_RANGE_BASE_H_0 0x404E0
239
240#define mmMME1_RTR_HBW_RANGE_BASE_H_1 0x404E4
241
242#define mmMME1_RTR_HBW_RANGE_BASE_H_2 0x404E8
243
244#define mmMME1_RTR_HBW_RANGE_BASE_H_3 0x404EC
245
246#define mmMME1_RTR_HBW_RANGE_BASE_H_4 0x404F0
247
248#define mmMME1_RTR_HBW_RANGE_BASE_H_5 0x404F4
249
250#define mmMME1_RTR_HBW_RANGE_BASE_H_6 0x404F8
251
252#define mmMME1_RTR_HBW_RANGE_BASE_H_7 0x404FC
253
254#define mmMME1_RTR_LBW_RANGE_HIT 0x40500
255
256#define mmMME1_RTR_LBW_RANGE_MASK_0 0x40510
257
258#define mmMME1_RTR_LBW_RANGE_MASK_1 0x40514
259
260#define mmMME1_RTR_LBW_RANGE_MASK_2 0x40518
261
262#define mmMME1_RTR_LBW_RANGE_MASK_3 0x4051C
263
264#define mmMME1_RTR_LBW_RANGE_MASK_4 0x40520
265
266#define mmMME1_RTR_LBW_RANGE_MASK_5 0x40524
267
268#define mmMME1_RTR_LBW_RANGE_MASK_6 0x40528
269
270#define mmMME1_RTR_LBW_RANGE_MASK_7 0x4052C
271
272#define mmMME1_RTR_LBW_RANGE_MASK_8 0x40530
273
274#define mmMME1_RTR_LBW_RANGE_MASK_9 0x40534
275
276#define mmMME1_RTR_LBW_RANGE_MASK_10 0x40538
277
278#define mmMME1_RTR_LBW_RANGE_MASK_11 0x4053C
279
280#define mmMME1_RTR_LBW_RANGE_MASK_12 0x40540
281
282#define mmMME1_RTR_LBW_RANGE_MASK_13 0x40544
283
284#define mmMME1_RTR_LBW_RANGE_MASK_14 0x40548
285
286#define mmMME1_RTR_LBW_RANGE_MASK_15 0x4054C
287
288#define mmMME1_RTR_LBW_RANGE_BASE_0 0x40550
289
290#define mmMME1_RTR_LBW_RANGE_BASE_1 0x40554
291
292#define mmMME1_RTR_LBW_RANGE_BASE_2 0x40558
293
294#define mmMME1_RTR_LBW_RANGE_BASE_3 0x4055C
295
296#define mmMME1_RTR_LBW_RANGE_BASE_4 0x40560
297
298#define mmMME1_RTR_LBW_RANGE_BASE_5 0x40564
299
300#define mmMME1_RTR_LBW_RANGE_BASE_6 0x40568
301
302#define mmMME1_RTR_LBW_RANGE_BASE_7 0x4056C
303
304#define mmMME1_RTR_LBW_RANGE_BASE_8 0x40570
305
306#define mmMME1_RTR_LBW_RANGE_BASE_9 0x40574
307
308#define mmMME1_RTR_LBW_RANGE_BASE_10 0x40578
309
310#define mmMME1_RTR_LBW_RANGE_BASE_11 0x4057C
311
312#define mmMME1_RTR_LBW_RANGE_BASE_12 0x40580
313
314#define mmMME1_RTR_LBW_RANGE_BASE_13 0x40584
315
316#define mmMME1_RTR_LBW_RANGE_BASE_14 0x40588
317
318#define mmMME1_RTR_LBW_RANGE_BASE_15 0x4058C
319
320#define mmMME1_RTR_RGLTR 0x40590
321
322#define mmMME1_RTR_RGLTR_WR_RESULT 0x40594
323
324#define mmMME1_RTR_RGLTR_RD_RESULT 0x40598
325
326#define mmMME1_RTR_SCRAMB_EN 0x40600
327
328#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604
329
330#endif /* ASIC_REG_MME1_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
new file mode 100644
index 000000000000..7a2b777bdc4f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
@@ -0,0 +1,331 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME2_RTR_REGS_H_
14#define ASIC_REG_MME2_RTR_REGS_H_
15
16/*
17 *****************************************
18 * MME2_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22#define mmMME2_RTR_HBW_RD_RQ_E_ARB 0x80100
23
24#define mmMME2_RTR_HBW_RD_RQ_W_ARB 0x80104
25
26#define mmMME2_RTR_HBW_RD_RQ_N_ARB 0x80108
27
28#define mmMME2_RTR_HBW_RD_RQ_S_ARB 0x8010C
29
30#define mmMME2_RTR_HBW_RD_RQ_L_ARB 0x80110
31
32#define mmMME2_RTR_HBW_E_ARB_MAX 0x80120
33
34#define mmMME2_RTR_HBW_W_ARB_MAX 0x80124
35
36#define mmMME2_RTR_HBW_N_ARB_MAX 0x80128
37
38#define mmMME2_RTR_HBW_S_ARB_MAX 0x8012C
39
40#define mmMME2_RTR_HBW_L_ARB_MAX 0x80130
41
42#define mmMME2_RTR_HBW_RD_RS_MAX_CREDIT 0x80140
43
44#define mmMME2_RTR_HBW_WR_RQ_MAX_CREDIT 0x80144
45
46#define mmMME2_RTR_HBW_RD_RQ_MAX_CREDIT 0x80148
47
48#define mmMME2_RTR_HBW_RD_RS_E_ARB 0x80150
49
50#define mmMME2_RTR_HBW_RD_RS_W_ARB 0x80154
51
52#define mmMME2_RTR_HBW_RD_RS_N_ARB 0x80158
53
54#define mmMME2_RTR_HBW_RD_RS_S_ARB 0x8015C
55
56#define mmMME2_RTR_HBW_RD_RS_L_ARB 0x80160
57
58#define mmMME2_RTR_HBW_WR_RQ_E_ARB 0x80170
59
60#define mmMME2_RTR_HBW_WR_RQ_W_ARB 0x80174
61
62#define mmMME2_RTR_HBW_WR_RQ_N_ARB 0x80178
63
64#define mmMME2_RTR_HBW_WR_RQ_S_ARB 0x8017C
65
66#define mmMME2_RTR_HBW_WR_RQ_L_ARB 0x80180
67
68#define mmMME2_RTR_HBW_WR_RS_E_ARB 0x80190
69
70#define mmMME2_RTR_HBW_WR_RS_W_ARB 0x80194
71
72#define mmMME2_RTR_HBW_WR_RS_N_ARB 0x80198
73
74#define mmMME2_RTR_HBW_WR_RS_S_ARB 0x8019C
75
76#define mmMME2_RTR_HBW_WR_RS_L_ARB 0x801A0
77
78#define mmMME2_RTR_LBW_RD_RQ_E_ARB 0x80200
79
80#define mmMME2_RTR_LBW_RD_RQ_W_ARB 0x80204
81
82#define mmMME2_RTR_LBW_RD_RQ_N_ARB 0x80208
83
84#define mmMME2_RTR_LBW_RD_RQ_S_ARB 0x8020C
85
86#define mmMME2_RTR_LBW_RD_RQ_L_ARB 0x80210
87
88#define mmMME2_RTR_LBW_E_ARB_MAX 0x80220
89
90#define mmMME2_RTR_LBW_W_ARB_MAX 0x80224
91
92#define mmMME2_RTR_LBW_N_ARB_MAX 0x80228
93
94#define mmMME2_RTR_LBW_S_ARB_MAX 0x8022C
95
96#define mmMME2_RTR_LBW_L_ARB_MAX 0x80230
97
98#define mmMME2_RTR_LBW_SRAM_MAX_CREDIT 0x80240
99
100#define mmMME2_RTR_LBW_RD_RS_E_ARB 0x80250
101
102#define mmMME2_RTR_LBW_RD_RS_W_ARB 0x80254
103
104#define mmMME2_RTR_LBW_RD_RS_N_ARB 0x80258
105
106#define mmMME2_RTR_LBW_RD_RS_S_ARB 0x8025C
107
108#define mmMME2_RTR_LBW_RD_RS_L_ARB 0x80260
109
110#define mmMME2_RTR_LBW_WR_RQ_E_ARB 0x80270
111
112#define mmMME2_RTR_LBW_WR_RQ_W_ARB 0x80274
113
114#define mmMME2_RTR_LBW_WR_RQ_N_ARB 0x80278
115
116#define mmMME2_RTR_LBW_WR_RQ_S_ARB 0x8027C
117
118#define mmMME2_RTR_LBW_WR_RQ_L_ARB 0x80280
119
120#define mmMME2_RTR_LBW_WR_RS_E_ARB 0x80290
121
122#define mmMME2_RTR_LBW_WR_RS_W_ARB 0x80294
123
124#define mmMME2_RTR_LBW_WR_RS_N_ARB 0x80298
125
126#define mmMME2_RTR_LBW_WR_RS_S_ARB 0x8029C
127
128#define mmMME2_RTR_LBW_WR_RS_L_ARB 0x802A0
129
130#define mmMME2_RTR_DBG_E_ARB 0x80300
131
132#define mmMME2_RTR_DBG_W_ARB 0x80304
133
134#define mmMME2_RTR_DBG_N_ARB 0x80308
135
136#define mmMME2_RTR_DBG_S_ARB 0x8030C
137
138#define mmMME2_RTR_DBG_L_ARB 0x80310
139
140#define mmMME2_RTR_DBG_E_ARB_MAX 0x80320
141
142#define mmMME2_RTR_DBG_W_ARB_MAX 0x80324
143
144#define mmMME2_RTR_DBG_N_ARB_MAX 0x80328
145
146#define mmMME2_RTR_DBG_S_ARB_MAX 0x8032C
147
148#define mmMME2_RTR_DBG_L_ARB_MAX 0x80330
149
150#define mmMME2_RTR_SPLIT_COEF_0 0x80400
151
152#define mmMME2_RTR_SPLIT_COEF_1 0x80404
153
154#define mmMME2_RTR_SPLIT_COEF_2 0x80408
155
156#define mmMME2_RTR_SPLIT_COEF_3 0x8040C
157
158#define mmMME2_RTR_SPLIT_COEF_4 0x80410
159
160#define mmMME2_RTR_SPLIT_COEF_5 0x80414
161
162#define mmMME2_RTR_SPLIT_COEF_6 0x80418
163
164#define mmMME2_RTR_SPLIT_COEF_7 0x8041C
165
166#define mmMME2_RTR_SPLIT_COEF_8 0x80420
167
168#define mmMME2_RTR_SPLIT_COEF_9 0x80424
169
170#define mmMME2_RTR_SPLIT_CFG 0x80440
171
172#define mmMME2_RTR_SPLIT_RD_SAT 0x80444
173
174#define mmMME2_RTR_SPLIT_RD_RST_TOKEN 0x80448
175
176#define mmMME2_RTR_SPLIT_RD_TIMEOUT_0 0x8044C
177
178#define mmMME2_RTR_SPLIT_RD_TIMEOUT_1 0x80450
179
180#define mmMME2_RTR_SPLIT_WR_SAT 0x80454
181
182#define mmMME2_RTR_WPLIT_WR_TST_TOLEN 0x80458
183
184#define mmMME2_RTR_SPLIT_WR_TIMEOUT_0 0x8045C
185
186#define mmMME2_RTR_SPLIT_WR_TIMEOUT_1 0x80460
187
188#define mmMME2_RTR_HBW_RANGE_HIT 0x80470
189
190#define mmMME2_RTR_HBW_RANGE_MASK_L_0 0x80480
191
192#define mmMME2_RTR_HBW_RANGE_MASK_L_1 0x80484
193
194#define mmMME2_RTR_HBW_RANGE_MASK_L_2 0x80488
195
196#define mmMME2_RTR_HBW_RANGE_MASK_L_3 0x8048C
197
198#define mmMME2_RTR_HBW_RANGE_MASK_L_4 0x80490
199
200#define mmMME2_RTR_HBW_RANGE_MASK_L_5 0x80494
201
202#define mmMME2_RTR_HBW_RANGE_MASK_L_6 0x80498
203
204#define mmMME2_RTR_HBW_RANGE_MASK_L_7 0x8049C
205
206#define mmMME2_RTR_HBW_RANGE_MASK_H_0 0x804A0
207
208#define mmMME2_RTR_HBW_RANGE_MASK_H_1 0x804A4
209
210#define mmMME2_RTR_HBW_RANGE_MASK_H_2 0x804A8
211
212#define mmMME2_RTR_HBW_RANGE_MASK_H_3 0x804AC
213
214#define mmMME2_RTR_HBW_RANGE_MASK_H_4 0x804B0
215
216#define mmMME2_RTR_HBW_RANGE_MASK_H_5 0x804B4
217
218#define mmMME2_RTR_HBW_RANGE_MASK_H_6 0x804B8
219
220#define mmMME2_RTR_HBW_RANGE_MASK_H_7 0x804BC
221
222#define mmMME2_RTR_HBW_RANGE_BASE_L_0 0x804C0
223
224#define mmMME2_RTR_HBW_RANGE_BASE_L_1 0x804C4
225
226#define mmMME2_RTR_HBW_RANGE_BASE_L_2 0x804C8
227
228#define mmMME2_RTR_HBW_RANGE_BASE_L_3 0x804CC
229
230#define mmMME2_RTR_HBW_RANGE_BASE_L_4 0x804D0
231
232#define mmMME2_RTR_HBW_RANGE_BASE_L_5 0x804D4
233
234#define mmMME2_RTR_HBW_RANGE_BASE_L_6 0x804D8
235
236#define mmMME2_RTR_HBW_RANGE_BASE_L_7 0x804DC
237
238#define mmMME2_RTR_HBW_RANGE_BASE_H_0 0x804E0
239
240#define mmMME2_RTR_HBW_RANGE_BASE_H_1 0x804E4
241
242#define mmMME2_RTR_HBW_RANGE_BASE_H_2 0x804E8
243
244#define mmMME2_RTR_HBW_RANGE_BASE_H_3 0x804EC
245
246#define mmMME2_RTR_HBW_RANGE_BASE_H_4 0x804F0
247
248#define mmMME2_RTR_HBW_RANGE_BASE_H_5 0x804F4
249
250#define mmMME2_RTR_HBW_RANGE_BASE_H_6 0x804F8
251
252#define mmMME2_RTR_HBW_RANGE_BASE_H_7 0x804FC
253
254#define mmMME2_RTR_LBW_RANGE_HIT 0x80500
255
256#define mmMME2_RTR_LBW_RANGE_MASK_0 0x80510
257
258#define mmMME2_RTR_LBW_RANGE_MASK_1 0x80514
259
260#define mmMME2_RTR_LBW_RANGE_MASK_2 0x80518
261
262#define mmMME2_RTR_LBW_RANGE_MASK_3 0x8051C
263
264#define mmMME2_RTR_LBW_RANGE_MASK_4 0x80520
265
266#define mmMME2_RTR_LBW_RANGE_MASK_5 0x80524
267
268#define mmMME2_RTR_LBW_RANGE_MASK_6 0x80528
269
270#define mmMME2_RTR_LBW_RANGE_MASK_7 0x8052C
271
272#define mmMME2_RTR_LBW_RANGE_MASK_8 0x80530
273
274#define mmMME2_RTR_LBW_RANGE_MASK_9 0x80534
275
276#define mmMME2_RTR_LBW_RANGE_MASK_10 0x80538
277
278#define mmMME2_RTR_LBW_RANGE_MASK_11 0x8053C
279
280#define mmMME2_RTR_LBW_RANGE_MASK_12 0x80540
281
282#define mmMME2_RTR_LBW_RANGE_MASK_13 0x80544
283
284#define mmMME2_RTR_LBW_RANGE_MASK_14 0x80548
285
286#define mmMME2_RTR_LBW_RANGE_MASK_15 0x8054C
287
288#define mmMME2_RTR_LBW_RANGE_BASE_0 0x80550
289
290#define mmMME2_RTR_LBW_RANGE_BASE_1 0x80554
291
292#define mmMME2_RTR_LBW_RANGE_BASE_2 0x80558
293
294#define mmMME2_RTR_LBW_RANGE_BASE_3 0x8055C
295
296#define mmMME2_RTR_LBW_RANGE_BASE_4 0x80560
297
298#define mmMME2_RTR_LBW_RANGE_BASE_5 0x80564
299
300#define mmMME2_RTR_LBW_RANGE_BASE_6 0x80568
301
302#define mmMME2_RTR_LBW_RANGE_BASE_7 0x8056C
303
304#define mmMME2_RTR_LBW_RANGE_BASE_8 0x80570
305
306#define mmMME2_RTR_LBW_RANGE_BASE_9 0x80574
307
308#define mmMME2_RTR_LBW_RANGE_BASE_10 0x80578
309
310#define mmMME2_RTR_LBW_RANGE_BASE_11 0x8057C
311
312#define mmMME2_RTR_LBW_RANGE_BASE_12 0x80580
313
314#define mmMME2_RTR_LBW_RANGE_BASE_13 0x80584
315
316#define mmMME2_RTR_LBW_RANGE_BASE_14 0x80588
317
318#define mmMME2_RTR_LBW_RANGE_BASE_15 0x8058C
319
320#define mmMME2_RTR_RGLTR 0x80590
321
322#define mmMME2_RTR_RGLTR_WR_RESULT 0x80594
323
324#define mmMME2_RTR_RGLTR_RD_RESULT 0x80598
325
326#define mmMME2_RTR_SCRAMB_EN 0x80600
327
328#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604
329
330#endif /* ASIC_REG_MME2_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
new file mode 100644
index 000000000000..b78f8bc387fc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
@@ -0,0 +1,331 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME3_RTR_REGS_H_
14#define ASIC_REG_MME3_RTR_REGS_H_
15
16/*
17 *****************************************
18 * MME3_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22#define mmMME3_RTR_HBW_RD_RQ_E_ARB 0xC0100
23
24#define mmMME3_RTR_HBW_RD_RQ_W_ARB 0xC0104
25
26#define mmMME3_RTR_HBW_RD_RQ_N_ARB 0xC0108
27
28#define mmMME3_RTR_HBW_RD_RQ_S_ARB 0xC010C
29
30#define mmMME3_RTR_HBW_RD_RQ_L_ARB 0xC0110
31
32#define mmMME3_RTR_HBW_E_ARB_MAX 0xC0120
33
34#define mmMME3_RTR_HBW_W_ARB_MAX 0xC0124
35
36#define mmMME3_RTR_HBW_N_ARB_MAX 0xC0128
37
38#define mmMME3_RTR_HBW_S_ARB_MAX 0xC012C
39
40#define mmMME3_RTR_HBW_L_ARB_MAX 0xC0130
41
42#define mmMME3_RTR_HBW_RD_RS_MAX_CREDIT 0xC0140
43
44#define mmMME3_RTR_HBW_WR_RQ_MAX_CREDIT 0xC0144
45
46#define mmMME3_RTR_HBW_RD_RQ_MAX_CREDIT 0xC0148
47
48#define mmMME3_RTR_HBW_RD_RS_E_ARB 0xC0150
49
50#define mmMME3_RTR_HBW_RD_RS_W_ARB 0xC0154
51
52#define mmMME3_RTR_HBW_RD_RS_N_ARB 0xC0158
53
54#define mmMME3_RTR_HBW_RD_RS_S_ARB 0xC015C
55
56#define mmMME3_RTR_HBW_RD_RS_L_ARB 0xC0160
57
58#define mmMME3_RTR_HBW_WR_RQ_E_ARB 0xC0170
59
60#define mmMME3_RTR_HBW_WR_RQ_W_ARB 0xC0174
61
62#define mmMME3_RTR_HBW_WR_RQ_N_ARB 0xC0178
63
64#define mmMME3_RTR_HBW_WR_RQ_S_ARB 0xC017C
65
66#define mmMME3_RTR_HBW_WR_RQ_L_ARB 0xC0180
67
68#define mmMME3_RTR_HBW_WR_RS_E_ARB 0xC0190
69
70#define mmMME3_RTR_HBW_WR_RS_W_ARB 0xC0194
71
72#define mmMME3_RTR_HBW_WR_RS_N_ARB 0xC0198
73
74#define mmMME3_RTR_HBW_WR_RS_S_ARB 0xC019C
75
76#define mmMME3_RTR_HBW_WR_RS_L_ARB 0xC01A0
77
78#define mmMME3_RTR_LBW_RD_RQ_E_ARB 0xC0200
79
80#define mmMME3_RTR_LBW_RD_RQ_W_ARB 0xC0204
81
82#define mmMME3_RTR_LBW_RD_RQ_N_ARB 0xC0208
83
84#define mmMME3_RTR_LBW_RD_RQ_S_ARB 0xC020C
85
86#define mmMME3_RTR_LBW_RD_RQ_L_ARB 0xC0210
87
88#define mmMME3_RTR_LBW_E_ARB_MAX 0xC0220
89
90#define mmMME3_RTR_LBW_W_ARB_MAX 0xC0224
91
92#define mmMME3_RTR_LBW_N_ARB_MAX 0xC0228
93
94#define mmMME3_RTR_LBW_S_ARB_MAX 0xC022C
95
96#define mmMME3_RTR_LBW_L_ARB_MAX 0xC0230
97
98#define mmMME3_RTR_LBW_SRAM_MAX_CREDIT 0xC0240
99
100#define mmMME3_RTR_LBW_RD_RS_E_ARB 0xC0250
101
102#define mmMME3_RTR_LBW_RD_RS_W_ARB 0xC0254
103
104#define mmMME3_RTR_LBW_RD_RS_N_ARB 0xC0258
105
106#define mmMME3_RTR_LBW_RD_RS_S_ARB 0xC025C
107
108#define mmMME3_RTR_LBW_RD_RS_L_ARB 0xC0260
109
110#define mmMME3_RTR_LBW_WR_RQ_E_ARB 0xC0270
111
112#define mmMME3_RTR_LBW_WR_RQ_W_ARB 0xC0274
113
114#define mmMME3_RTR_LBW_WR_RQ_N_ARB 0xC0278
115
116#define mmMME3_RTR_LBW_WR_RQ_S_ARB 0xC027C
117
118#define mmMME3_RTR_LBW_WR_RQ_L_ARB 0xC0280
119
120#define mmMME3_RTR_LBW_WR_RS_E_ARB 0xC0290
121
122#define mmMME3_RTR_LBW_WR_RS_W_ARB 0xC0294
123
124#define mmMME3_RTR_LBW_WR_RS_N_ARB 0xC0298
125
126#define mmMME3_RTR_LBW_WR_RS_S_ARB 0xC029C
127
128#define mmMME3_RTR_LBW_WR_RS_L_ARB 0xC02A0
129
130#define mmMME3_RTR_DBG_E_ARB 0xC0300
131
132#define mmMME3_RTR_DBG_W_ARB 0xC0304
133
134#define mmMME3_RTR_DBG_N_ARB 0xC0308
135
136#define mmMME3_RTR_DBG_S_ARB 0xC030C
137
138#define mmMME3_RTR_DBG_L_ARB 0xC0310
139
140#define mmMME3_RTR_DBG_E_ARB_MAX 0xC0320
141
142#define mmMME3_RTR_DBG_W_ARB_MAX 0xC0324
143
144#define mmMME3_RTR_DBG_N_ARB_MAX 0xC0328
145
146#define mmMME3_RTR_DBG_S_ARB_MAX 0xC032C
147
148#define mmMME3_RTR_DBG_L_ARB_MAX 0xC0330
149
150#define mmMME3_RTR_SPLIT_COEF_0 0xC0400
151
152#define mmMME3_RTR_SPLIT_COEF_1 0xC0404
153
154#define mmMME3_RTR_SPLIT_COEF_2 0xC0408
155
156#define mmMME3_RTR_SPLIT_COEF_3 0xC040C
157
158#define mmMME3_RTR_SPLIT_COEF_4 0xC0410
159
160#define mmMME3_RTR_SPLIT_COEF_5 0xC0414
161
162#define mmMME3_RTR_SPLIT_COEF_6 0xC0418
163
164#define mmMME3_RTR_SPLIT_COEF_7 0xC041C
165
166#define mmMME3_RTR_SPLIT_COEF_8 0xC0420
167
168#define mmMME3_RTR_SPLIT_COEF_9 0xC0424
169
170#define mmMME3_RTR_SPLIT_CFG 0xC0440
171
172#define mmMME3_RTR_SPLIT_RD_SAT 0xC0444
173
174#define mmMME3_RTR_SPLIT_RD_RST_TOKEN 0xC0448
175
176#define mmMME3_RTR_SPLIT_RD_TIMEOUT_0 0xC044C
177
178#define mmMME3_RTR_SPLIT_RD_TIMEOUT_1 0xC0450
179
180#define mmMME3_RTR_SPLIT_WR_SAT 0xC0454
181
182#define mmMME3_RTR_WPLIT_WR_TST_TOLEN 0xC0458
183
184#define mmMME3_RTR_SPLIT_WR_TIMEOUT_0 0xC045C
185
186#define mmMME3_RTR_SPLIT_WR_TIMEOUT_1 0xC0460
187
188#define mmMME3_RTR_HBW_RANGE_HIT 0xC0470
189
190#define mmMME3_RTR_HBW_RANGE_MASK_L_0 0xC0480
191
192#define mmMME3_RTR_HBW_RANGE_MASK_L_1 0xC0484
193
194#define mmMME3_RTR_HBW_RANGE_MASK_L_2 0xC0488
195
196#define mmMME3_RTR_HBW_RANGE_MASK_L_3 0xC048C
197
198#define mmMME3_RTR_HBW_RANGE_MASK_L_4 0xC0490
199
200#define mmMME3_RTR_HBW_RANGE_MASK_L_5 0xC0494
201
202#define mmMME3_RTR_HBW_RANGE_MASK_L_6 0xC0498
203
204#define mmMME3_RTR_HBW_RANGE_MASK_L_7 0xC049C
205
206#define mmMME3_RTR_HBW_RANGE_MASK_H_0 0xC04A0
207
208#define mmMME3_RTR_HBW_RANGE_MASK_H_1 0xC04A4
209
210#define mmMME3_RTR_HBW_RANGE_MASK_H_2 0xC04A8
211
212#define mmMME3_RTR_HBW_RANGE_MASK_H_3 0xC04AC
213
214#define mmMME3_RTR_HBW_RANGE_MASK_H_4 0xC04B0
215
216#define mmMME3_RTR_HBW_RANGE_MASK_H_5 0xC04B4
217
218#define mmMME3_RTR_HBW_RANGE_MASK_H_6 0xC04B8
219
220#define mmMME3_RTR_HBW_RANGE_MASK_H_7 0xC04BC
221
222#define mmMME3_RTR_HBW_RANGE_BASE_L_0 0xC04C0
223
224#define mmMME3_RTR_HBW_RANGE_BASE_L_1 0xC04C4
225
226#define mmMME3_RTR_HBW_RANGE_BASE_L_2 0xC04C8
227
228#define mmMME3_RTR_HBW_RANGE_BASE_L_3 0xC04CC
229
230#define mmMME3_RTR_HBW_RANGE_BASE_L_4 0xC04D0
231
232#define mmMME3_RTR_HBW_RANGE_BASE_L_5 0xC04D4
233
234#define mmMME3_RTR_HBW_RANGE_BASE_L_6 0xC04D8
235
236#define mmMME3_RTR_HBW_RANGE_BASE_L_7 0xC04DC
237
238#define mmMME3_RTR_HBW_RANGE_BASE_H_0 0xC04E0
239
240#define mmMME3_RTR_HBW_RANGE_BASE_H_1 0xC04E4
241
242#define mmMME3_RTR_HBW_RANGE_BASE_H_2 0xC04E8
243
244#define mmMME3_RTR_HBW_RANGE_BASE_H_3 0xC04EC
245
246#define mmMME3_RTR_HBW_RANGE_BASE_H_4 0xC04F0
247
248#define mmMME3_RTR_HBW_RANGE_BASE_H_5 0xC04F4
249
250#define mmMME3_RTR_HBW_RANGE_BASE_H_6 0xC04F8
251
252#define mmMME3_RTR_HBW_RANGE_BASE_H_7 0xC04FC
253
254#define mmMME3_RTR_LBW_RANGE_HIT 0xC0500
255
256#define mmMME3_RTR_LBW_RANGE_MASK_0 0xC0510
257
258#define mmMME3_RTR_LBW_RANGE_MASK_1 0xC0514
259
260#define mmMME3_RTR_LBW_RANGE_MASK_2 0xC0518
261
262#define mmMME3_RTR_LBW_RANGE_MASK_3 0xC051C
263
264#define mmMME3_RTR_LBW_RANGE_MASK_4 0xC0520
265
266#define mmMME3_RTR_LBW_RANGE_MASK_5 0xC0524
267
268#define mmMME3_RTR_LBW_RANGE_MASK_6 0xC0528
269
270#define mmMME3_RTR_LBW_RANGE_MASK_7 0xC052C
271
272#define mmMME3_RTR_LBW_RANGE_MASK_8 0xC0530
273
274#define mmMME3_RTR_LBW_RANGE_MASK_9 0xC0534
275
276#define mmMME3_RTR_LBW_RANGE_MASK_10 0xC0538
277
278#define mmMME3_RTR_LBW_RANGE_MASK_11 0xC053C
279
280#define mmMME3_RTR_LBW_RANGE_MASK_12 0xC0540
281
282#define mmMME3_RTR_LBW_RANGE_MASK_13 0xC0544
283
284#define mmMME3_RTR_LBW_RANGE_MASK_14 0xC0548
285
286#define mmMME3_RTR_LBW_RANGE_MASK_15 0xC054C
287
288#define mmMME3_RTR_LBW_RANGE_BASE_0 0xC0550
289
290#define mmMME3_RTR_LBW_RANGE_BASE_1 0xC0554
291
292#define mmMME3_RTR_LBW_RANGE_BASE_2 0xC0558
293
294#define mmMME3_RTR_LBW_RANGE_BASE_3 0xC055C
295
296#define mmMME3_RTR_LBW_RANGE_BASE_4 0xC0560
297
298#define mmMME3_RTR_LBW_RANGE_BASE_5 0xC0564
299
300#define mmMME3_RTR_LBW_RANGE_BASE_6 0xC0568
301
302#define mmMME3_RTR_LBW_RANGE_BASE_7 0xC056C
303
304#define mmMME3_RTR_LBW_RANGE_BASE_8 0xC0570
305
306#define mmMME3_RTR_LBW_RANGE_BASE_9 0xC0574
307
308#define mmMME3_RTR_LBW_RANGE_BASE_10 0xC0578
309
310#define mmMME3_RTR_LBW_RANGE_BASE_11 0xC057C
311
312#define mmMME3_RTR_LBW_RANGE_BASE_12 0xC0580
313
314#define mmMME3_RTR_LBW_RANGE_BASE_13 0xC0584
315
316#define mmMME3_RTR_LBW_RANGE_BASE_14 0xC0588
317
318#define mmMME3_RTR_LBW_RANGE_BASE_15 0xC058C
319
320#define mmMME3_RTR_RGLTR 0xC0590
321
322#define mmMME3_RTR_RGLTR_WR_RESULT 0xC0594
323
324#define mmMME3_RTR_RGLTR_RD_RESULT 0xC0598
325
326#define mmMME3_RTR_SCRAMB_EN 0xC0600
327
328#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604
329
330#endif /* ASIC_REG_MME3_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
new file mode 100644
index 000000000000..d9a4a02cefa3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
@@ -0,0 +1,331 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME4_RTR_REGS_H_
14#define ASIC_REG_MME4_RTR_REGS_H_
15
16/*
17 *****************************************
18 * MME4_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22#define mmMME4_RTR_HBW_RD_RQ_E_ARB 0x100100
23
24#define mmMME4_RTR_HBW_RD_RQ_W_ARB 0x100104
25
26#define mmMME4_RTR_HBW_RD_RQ_N_ARB 0x100108
27
28#define mmMME4_RTR_HBW_RD_RQ_S_ARB 0x10010C
29
30#define mmMME4_RTR_HBW_RD_RQ_L_ARB 0x100110
31
32#define mmMME4_RTR_HBW_E_ARB_MAX 0x100120
33
34#define mmMME4_RTR_HBW_W_ARB_MAX 0x100124
35
36#define mmMME4_RTR_HBW_N_ARB_MAX 0x100128
37
38#define mmMME4_RTR_HBW_S_ARB_MAX 0x10012C
39
40#define mmMME4_RTR_HBW_L_ARB_MAX 0x100130
41
42#define mmMME4_RTR_HBW_RD_RS_MAX_CREDIT 0x100140
43
44#define mmMME4_RTR_HBW_WR_RQ_MAX_CREDIT 0x100144
45
46#define mmMME4_RTR_HBW_RD_RQ_MAX_CREDIT 0x100148
47
48#define mmMME4_RTR_HBW_RD_RS_E_ARB 0x100150
49
50#define mmMME4_RTR_HBW_RD_RS_W_ARB 0x100154
51
52#define mmMME4_RTR_HBW_RD_RS_N_ARB 0x100158
53
54#define mmMME4_RTR_HBW_RD_RS_S_ARB 0x10015C
55
56#define mmMME4_RTR_HBW_RD_RS_L_ARB 0x100160
57
58#define mmMME4_RTR_HBW_WR_RQ_E_ARB 0x100170
59
60#define mmMME4_RTR_HBW_WR_RQ_W_ARB 0x100174
61
62#define mmMME4_RTR_HBW_WR_RQ_N_ARB 0x100178
63
64#define mmMME4_RTR_HBW_WR_RQ_S_ARB 0x10017C
65
66#define mmMME4_RTR_HBW_WR_RQ_L_ARB 0x100180
67
68#define mmMME4_RTR_HBW_WR_RS_E_ARB 0x100190
69
70#define mmMME4_RTR_HBW_WR_RS_W_ARB 0x100194
71
72#define mmMME4_RTR_HBW_WR_RS_N_ARB 0x100198
73
74#define mmMME4_RTR_HBW_WR_RS_S_ARB 0x10019C
75
76#define mmMME4_RTR_HBW_WR_RS_L_ARB 0x1001A0
77
78#define mmMME4_RTR_LBW_RD_RQ_E_ARB 0x100200
79
80#define mmMME4_RTR_LBW_RD_RQ_W_ARB 0x100204
81
82#define mmMME4_RTR_LBW_RD_RQ_N_ARB 0x100208
83
84#define mmMME4_RTR_LBW_RD_RQ_S_ARB 0x10020C
85
86#define mmMME4_RTR_LBW_RD_RQ_L_ARB 0x100210
87
88#define mmMME4_RTR_LBW_E_ARB_MAX 0x100220
89
90#define mmMME4_RTR_LBW_W_ARB_MAX 0x100224
91
92#define mmMME4_RTR_LBW_N_ARB_MAX 0x100228
93
94#define mmMME4_RTR_LBW_S_ARB_MAX 0x10022C
95
96#define mmMME4_RTR_LBW_L_ARB_MAX 0x100230
97
98#define mmMME4_RTR_LBW_SRAM_MAX_CREDIT 0x100240
99
100#define mmMME4_RTR_LBW_RD_RS_E_ARB 0x100250
101
102#define mmMME4_RTR_LBW_RD_RS_W_ARB 0x100254
103
104#define mmMME4_RTR_LBW_RD_RS_N_ARB 0x100258
105
106#define mmMME4_RTR_LBW_RD_RS_S_ARB 0x10025C
107
108#define mmMME4_RTR_LBW_RD_RS_L_ARB 0x100260
109
110#define mmMME4_RTR_LBW_WR_RQ_E_ARB 0x100270
111
112#define mmMME4_RTR_LBW_WR_RQ_W_ARB 0x100274
113
114#define mmMME4_RTR_LBW_WR_RQ_N_ARB 0x100278
115
116#define mmMME4_RTR_LBW_WR_RQ_S_ARB 0x10027C
117
118#define mmMME4_RTR_LBW_WR_RQ_L_ARB 0x100280
119
120#define mmMME4_RTR_LBW_WR_RS_E_ARB 0x100290
121
122#define mmMME4_RTR_LBW_WR_RS_W_ARB 0x100294
123
124#define mmMME4_RTR_LBW_WR_RS_N_ARB 0x100298
125
126#define mmMME4_RTR_LBW_WR_RS_S_ARB 0x10029C
127
128#define mmMME4_RTR_LBW_WR_RS_L_ARB 0x1002A0
129
130#define mmMME4_RTR_DBG_E_ARB 0x100300
131
132#define mmMME4_RTR_DBG_W_ARB 0x100304
133
134#define mmMME4_RTR_DBG_N_ARB 0x100308
135
136#define mmMME4_RTR_DBG_S_ARB 0x10030C
137
138#define mmMME4_RTR_DBG_L_ARB 0x100310
139
140#define mmMME4_RTR_DBG_E_ARB_MAX 0x100320
141
142#define mmMME4_RTR_DBG_W_ARB_MAX 0x100324
143
144#define mmMME4_RTR_DBG_N_ARB_MAX 0x100328
145
146#define mmMME4_RTR_DBG_S_ARB_MAX 0x10032C
147
148#define mmMME4_RTR_DBG_L_ARB_MAX 0x100330
149
150#define mmMME4_RTR_SPLIT_COEF_0 0x100400
151
152#define mmMME4_RTR_SPLIT_COEF_1 0x100404
153
154#define mmMME4_RTR_SPLIT_COEF_2 0x100408
155
156#define mmMME4_RTR_SPLIT_COEF_3 0x10040C
157
158#define mmMME4_RTR_SPLIT_COEF_4 0x100410
159
160#define mmMME4_RTR_SPLIT_COEF_5 0x100414
161
162#define mmMME4_RTR_SPLIT_COEF_6 0x100418
163
164#define mmMME4_RTR_SPLIT_COEF_7 0x10041C
165
166#define mmMME4_RTR_SPLIT_COEF_8 0x100420
167
168#define mmMME4_RTR_SPLIT_COEF_9 0x100424
169
170#define mmMME4_RTR_SPLIT_CFG 0x100440
171
172#define mmMME4_RTR_SPLIT_RD_SAT 0x100444
173
174#define mmMME4_RTR_SPLIT_RD_RST_TOKEN 0x100448
175
176#define mmMME4_RTR_SPLIT_RD_TIMEOUT_0 0x10044C
177
178#define mmMME4_RTR_SPLIT_RD_TIMEOUT_1 0x100450
179
180#define mmMME4_RTR_SPLIT_WR_SAT 0x100454
181
182#define mmMME4_RTR_WPLIT_WR_TST_TOLEN 0x100458
183
184#define mmMME4_RTR_SPLIT_WR_TIMEOUT_0 0x10045C
185
186#define mmMME4_RTR_SPLIT_WR_TIMEOUT_1 0x100460
187
188#define mmMME4_RTR_HBW_RANGE_HIT 0x100470
189
190#define mmMME4_RTR_HBW_RANGE_MASK_L_0 0x100480
191
192#define mmMME4_RTR_HBW_RANGE_MASK_L_1 0x100484
193
194#define mmMME4_RTR_HBW_RANGE_MASK_L_2 0x100488
195
196#define mmMME4_RTR_HBW_RANGE_MASK_L_3 0x10048C
197
198#define mmMME4_RTR_HBW_RANGE_MASK_L_4 0x100490
199
200#define mmMME4_RTR_HBW_RANGE_MASK_L_5 0x100494
201
202#define mmMME4_RTR_HBW_RANGE_MASK_L_6 0x100498
203
204#define mmMME4_RTR_HBW_RANGE_MASK_L_7 0x10049C
205
206#define mmMME4_RTR_HBW_RANGE_MASK_H_0 0x1004A0
207
208#define mmMME4_RTR_HBW_RANGE_MASK_H_1 0x1004A4
209
210#define mmMME4_RTR_HBW_RANGE_MASK_H_2 0x1004A8
211
212#define mmMME4_RTR_HBW_RANGE_MASK_H_3 0x1004AC
213
214#define mmMME4_RTR_HBW_RANGE_MASK_H_4 0x1004B0
215
216#define mmMME4_RTR_HBW_RANGE_MASK_H_5 0x1004B4
217
218#define mmMME4_RTR_HBW_RANGE_MASK_H_6 0x1004B8
219
220#define mmMME4_RTR_HBW_RANGE_MASK_H_7 0x1004BC
221
222#define mmMME4_RTR_HBW_RANGE_BASE_L_0 0x1004C0
223
224#define mmMME4_RTR_HBW_RANGE_BASE_L_1 0x1004C4
225
226#define mmMME4_RTR_HBW_RANGE_BASE_L_2 0x1004C8
227
228#define mmMME4_RTR_HBW_RANGE_BASE_L_3 0x1004CC
229
230#define mmMME4_RTR_HBW_RANGE_BASE_L_4 0x1004D0
231
232#define mmMME4_RTR_HBW_RANGE_BASE_L_5 0x1004D4
233
234#define mmMME4_RTR_HBW_RANGE_BASE_L_6 0x1004D8
235
236#define mmMME4_RTR_HBW_RANGE_BASE_L_7 0x1004DC
237
238#define mmMME4_RTR_HBW_RANGE_BASE_H_0 0x1004E0
239
240#define mmMME4_RTR_HBW_RANGE_BASE_H_1 0x1004E4
241
242#define mmMME4_RTR_HBW_RANGE_BASE_H_2 0x1004E8
243
244#define mmMME4_RTR_HBW_RANGE_BASE_H_3 0x1004EC
245
246#define mmMME4_RTR_HBW_RANGE_BASE_H_4 0x1004F0
247
248#define mmMME4_RTR_HBW_RANGE_BASE_H_5 0x1004F4
249
250#define mmMME4_RTR_HBW_RANGE_BASE_H_6 0x1004F8
251
252#define mmMME4_RTR_HBW_RANGE_BASE_H_7 0x1004FC
253
254#define mmMME4_RTR_LBW_RANGE_HIT 0x100500
255
256#define mmMME4_RTR_LBW_RANGE_MASK_0 0x100510
257
258#define mmMME4_RTR_LBW_RANGE_MASK_1 0x100514
259
260#define mmMME4_RTR_LBW_RANGE_MASK_2 0x100518
261
262#define mmMME4_RTR_LBW_RANGE_MASK_3 0x10051C
263
264#define mmMME4_RTR_LBW_RANGE_MASK_4 0x100520
265
266#define mmMME4_RTR_LBW_RANGE_MASK_5 0x100524
267
268#define mmMME4_RTR_LBW_RANGE_MASK_6 0x100528
269
270#define mmMME4_RTR_LBW_RANGE_MASK_7 0x10052C
271
272#define mmMME4_RTR_LBW_RANGE_MASK_8 0x100530
273
274#define mmMME4_RTR_LBW_RANGE_MASK_9 0x100534
275
276#define mmMME4_RTR_LBW_RANGE_MASK_10 0x100538
277
278#define mmMME4_RTR_LBW_RANGE_MASK_11 0x10053C
279
280#define mmMME4_RTR_LBW_RANGE_MASK_12 0x100540
281
282#define mmMME4_RTR_LBW_RANGE_MASK_13 0x100544
283
284#define mmMME4_RTR_LBW_RANGE_MASK_14 0x100548
285
286#define mmMME4_RTR_LBW_RANGE_MASK_15 0x10054C
287
288#define mmMME4_RTR_LBW_RANGE_BASE_0 0x100550
289
290#define mmMME4_RTR_LBW_RANGE_BASE_1 0x100554
291
292#define mmMME4_RTR_LBW_RANGE_BASE_2 0x100558
293
294#define mmMME4_RTR_LBW_RANGE_BASE_3 0x10055C
295
296#define mmMME4_RTR_LBW_RANGE_BASE_4 0x100560
297
298#define mmMME4_RTR_LBW_RANGE_BASE_5 0x100564
299
300#define mmMME4_RTR_LBW_RANGE_BASE_6 0x100568
301
302#define mmMME4_RTR_LBW_RANGE_BASE_7 0x10056C
303
304#define mmMME4_RTR_LBW_RANGE_BASE_8 0x100570
305
306#define mmMME4_RTR_LBW_RANGE_BASE_9 0x100574
307
308#define mmMME4_RTR_LBW_RANGE_BASE_10 0x100578
309
310#define mmMME4_RTR_LBW_RANGE_BASE_11 0x10057C
311
312#define mmMME4_RTR_LBW_RANGE_BASE_12 0x100580
313
314#define mmMME4_RTR_LBW_RANGE_BASE_13 0x100584
315
316#define mmMME4_RTR_LBW_RANGE_BASE_14 0x100588
317
318#define mmMME4_RTR_LBW_RANGE_BASE_15 0x10058C
319
320#define mmMME4_RTR_RGLTR 0x100590
321
322#define mmMME4_RTR_RGLTR_WR_RESULT 0x100594
323
324#define mmMME4_RTR_RGLTR_RD_RESULT 0x100598
325
326#define mmMME4_RTR_SCRAMB_EN 0x100600
327
328#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604
329
330#endif /* ASIC_REG_MME4_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
new file mode 100644
index 000000000000..205adc988407
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
@@ -0,0 +1,331 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME5_RTR_REGS_H_
14#define ASIC_REG_MME5_RTR_REGS_H_
15
16/*
17 *****************************************
18 * MME5_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22#define mmMME5_RTR_HBW_RD_RQ_E_ARB 0x140100
23
24#define mmMME5_RTR_HBW_RD_RQ_W_ARB 0x140104
25
26#define mmMME5_RTR_HBW_RD_RQ_N_ARB 0x140108
27
28#define mmMME5_RTR_HBW_RD_RQ_S_ARB 0x14010C
29
30#define mmMME5_RTR_HBW_RD_RQ_L_ARB 0x140110
31
32#define mmMME5_RTR_HBW_E_ARB_MAX 0x140120
33
34#define mmMME5_RTR_HBW_W_ARB_MAX 0x140124
35
36#define mmMME5_RTR_HBW_N_ARB_MAX 0x140128
37
38#define mmMME5_RTR_HBW_S_ARB_MAX 0x14012C
39
40#define mmMME5_RTR_HBW_L_ARB_MAX 0x140130
41
42#define mmMME5_RTR_HBW_RD_RS_MAX_CREDIT 0x140140
43
44#define mmMME5_RTR_HBW_WR_RQ_MAX_CREDIT 0x140144
45
46#define mmMME5_RTR_HBW_RD_RQ_MAX_CREDIT 0x140148
47
48#define mmMME5_RTR_HBW_RD_RS_E_ARB 0x140150
49
50#define mmMME5_RTR_HBW_RD_RS_W_ARB 0x140154
51
52#define mmMME5_RTR_HBW_RD_RS_N_ARB 0x140158
53
54#define mmMME5_RTR_HBW_RD_RS_S_ARB 0x14015C
55
56#define mmMME5_RTR_HBW_RD_RS_L_ARB 0x140160
57
58#define mmMME5_RTR_HBW_WR_RQ_E_ARB 0x140170
59
60#define mmMME5_RTR_HBW_WR_RQ_W_ARB 0x140174
61
62#define mmMME5_RTR_HBW_WR_RQ_N_ARB 0x140178
63
64#define mmMME5_RTR_HBW_WR_RQ_S_ARB 0x14017C
65
66#define mmMME5_RTR_HBW_WR_RQ_L_ARB 0x140180
67
68#define mmMME5_RTR_HBW_WR_RS_E_ARB 0x140190
69
70#define mmMME5_RTR_HBW_WR_RS_W_ARB 0x140194
71
72#define mmMME5_RTR_HBW_WR_RS_N_ARB 0x140198
73
74#define mmMME5_RTR_HBW_WR_RS_S_ARB 0x14019C
75
76#define mmMME5_RTR_HBW_WR_RS_L_ARB 0x1401A0
77
78#define mmMME5_RTR_LBW_RD_RQ_E_ARB 0x140200
79
80#define mmMME5_RTR_LBW_RD_RQ_W_ARB 0x140204
81
82#define mmMME5_RTR_LBW_RD_RQ_N_ARB 0x140208
83
84#define mmMME5_RTR_LBW_RD_RQ_S_ARB 0x14020C
85
86#define mmMME5_RTR_LBW_RD_RQ_L_ARB 0x140210
87
88#define mmMME5_RTR_LBW_E_ARB_MAX 0x140220
89
90#define mmMME5_RTR_LBW_W_ARB_MAX 0x140224
91
92#define mmMME5_RTR_LBW_N_ARB_MAX 0x140228
93
94#define mmMME5_RTR_LBW_S_ARB_MAX 0x14022C
95
96#define mmMME5_RTR_LBW_L_ARB_MAX 0x140230
97
98#define mmMME5_RTR_LBW_SRAM_MAX_CREDIT 0x140240
99
100#define mmMME5_RTR_LBW_RD_RS_E_ARB 0x140250
101
102#define mmMME5_RTR_LBW_RD_RS_W_ARB 0x140254
103
104#define mmMME5_RTR_LBW_RD_RS_N_ARB 0x140258
105
106#define mmMME5_RTR_LBW_RD_RS_S_ARB 0x14025C
107
108#define mmMME5_RTR_LBW_RD_RS_L_ARB 0x140260
109
110#define mmMME5_RTR_LBW_WR_RQ_E_ARB 0x140270
111
112#define mmMME5_RTR_LBW_WR_RQ_W_ARB 0x140274
113
114#define mmMME5_RTR_LBW_WR_RQ_N_ARB 0x140278
115
116#define mmMME5_RTR_LBW_WR_RQ_S_ARB 0x14027C
117
118#define mmMME5_RTR_LBW_WR_RQ_L_ARB 0x140280
119
120#define mmMME5_RTR_LBW_WR_RS_E_ARB 0x140290
121
122#define mmMME5_RTR_LBW_WR_RS_W_ARB 0x140294
123
124#define mmMME5_RTR_LBW_WR_RS_N_ARB 0x140298
125
126#define mmMME5_RTR_LBW_WR_RS_S_ARB 0x14029C
127
128#define mmMME5_RTR_LBW_WR_RS_L_ARB 0x1402A0
129
130#define mmMME5_RTR_DBG_E_ARB 0x140300
131
132#define mmMME5_RTR_DBG_W_ARB 0x140304
133
134#define mmMME5_RTR_DBG_N_ARB 0x140308
135
136#define mmMME5_RTR_DBG_S_ARB 0x14030C
137
138#define mmMME5_RTR_DBG_L_ARB 0x140310
139
140#define mmMME5_RTR_DBG_E_ARB_MAX 0x140320
141
142#define mmMME5_RTR_DBG_W_ARB_MAX 0x140324
143
144#define mmMME5_RTR_DBG_N_ARB_MAX 0x140328
145
146#define mmMME5_RTR_DBG_S_ARB_MAX 0x14032C
147
148#define mmMME5_RTR_DBG_L_ARB_MAX 0x140330
149
150#define mmMME5_RTR_SPLIT_COEF_0 0x140400
151
152#define mmMME5_RTR_SPLIT_COEF_1 0x140404
153
154#define mmMME5_RTR_SPLIT_COEF_2 0x140408
155
156#define mmMME5_RTR_SPLIT_COEF_3 0x14040C
157
158#define mmMME5_RTR_SPLIT_COEF_4 0x140410
159
160#define mmMME5_RTR_SPLIT_COEF_5 0x140414
161
162#define mmMME5_RTR_SPLIT_COEF_6 0x140418
163
164#define mmMME5_RTR_SPLIT_COEF_7 0x14041C
165
166#define mmMME5_RTR_SPLIT_COEF_8 0x140420
167
168#define mmMME5_RTR_SPLIT_COEF_9 0x140424
169
170#define mmMME5_RTR_SPLIT_CFG 0x140440
171
172#define mmMME5_RTR_SPLIT_RD_SAT 0x140444
173
174#define mmMME5_RTR_SPLIT_RD_RST_TOKEN 0x140448
175
176#define mmMME5_RTR_SPLIT_RD_TIMEOUT_0 0x14044C
177
178#define mmMME5_RTR_SPLIT_RD_TIMEOUT_1 0x140450
179
180#define mmMME5_RTR_SPLIT_WR_SAT 0x140454
181
182#define mmMME5_RTR_WPLIT_WR_TST_TOLEN 0x140458
183
184#define mmMME5_RTR_SPLIT_WR_TIMEOUT_0 0x14045C
185
186#define mmMME5_RTR_SPLIT_WR_TIMEOUT_1 0x140460
187
188#define mmMME5_RTR_HBW_RANGE_HIT 0x140470
189
190#define mmMME5_RTR_HBW_RANGE_MASK_L_0 0x140480
191
192#define mmMME5_RTR_HBW_RANGE_MASK_L_1 0x140484
193
194#define mmMME5_RTR_HBW_RANGE_MASK_L_2 0x140488
195
196#define mmMME5_RTR_HBW_RANGE_MASK_L_3 0x14048C
197
198#define mmMME5_RTR_HBW_RANGE_MASK_L_4 0x140490
199
200#define mmMME5_RTR_HBW_RANGE_MASK_L_5 0x140494
201
202#define mmMME5_RTR_HBW_RANGE_MASK_L_6 0x140498
203
204#define mmMME5_RTR_HBW_RANGE_MASK_L_7 0x14049C
205
206#define mmMME5_RTR_HBW_RANGE_MASK_H_0 0x1404A0
207
208#define mmMME5_RTR_HBW_RANGE_MASK_H_1 0x1404A4
209
210#define mmMME5_RTR_HBW_RANGE_MASK_H_2 0x1404A8
211
212#define mmMME5_RTR_HBW_RANGE_MASK_H_3 0x1404AC
213
214#define mmMME5_RTR_HBW_RANGE_MASK_H_4 0x1404B0
215
216#define mmMME5_RTR_HBW_RANGE_MASK_H_5 0x1404B4
217
218#define mmMME5_RTR_HBW_RANGE_MASK_H_6 0x1404B8
219
220#define mmMME5_RTR_HBW_RANGE_MASK_H_7 0x1404BC
221
222#define mmMME5_RTR_HBW_RANGE_BASE_L_0 0x1404C0
223
224#define mmMME5_RTR_HBW_RANGE_BASE_L_1 0x1404C4
225
226#define mmMME5_RTR_HBW_RANGE_BASE_L_2 0x1404C8
227
228#define mmMME5_RTR_HBW_RANGE_BASE_L_3 0x1404CC
229
230#define mmMME5_RTR_HBW_RANGE_BASE_L_4 0x1404D0
231
232#define mmMME5_RTR_HBW_RANGE_BASE_L_5 0x1404D4
233
234#define mmMME5_RTR_HBW_RANGE_BASE_L_6 0x1404D8
235
236#define mmMME5_RTR_HBW_RANGE_BASE_L_7 0x1404DC
237
238#define mmMME5_RTR_HBW_RANGE_BASE_H_0 0x1404E0
239
240#define mmMME5_RTR_HBW_RANGE_BASE_H_1 0x1404E4
241
242#define mmMME5_RTR_HBW_RANGE_BASE_H_2 0x1404E8
243
244#define mmMME5_RTR_HBW_RANGE_BASE_H_3 0x1404EC
245
246#define mmMME5_RTR_HBW_RANGE_BASE_H_4 0x1404F0
247
248#define mmMME5_RTR_HBW_RANGE_BASE_H_5 0x1404F4
249
250#define mmMME5_RTR_HBW_RANGE_BASE_H_6 0x1404F8
251
252#define mmMME5_RTR_HBW_RANGE_BASE_H_7 0x1404FC
253
254#define mmMME5_RTR_LBW_RANGE_HIT 0x140500
255
256#define mmMME5_RTR_LBW_RANGE_MASK_0 0x140510
257
258#define mmMME5_RTR_LBW_RANGE_MASK_1 0x140514
259
260#define mmMME5_RTR_LBW_RANGE_MASK_2 0x140518
261
262#define mmMME5_RTR_LBW_RANGE_MASK_3 0x14051C
263
264#define mmMME5_RTR_LBW_RANGE_MASK_4 0x140520
265
266#define mmMME5_RTR_LBW_RANGE_MASK_5 0x140524
267
268#define mmMME5_RTR_LBW_RANGE_MASK_6 0x140528
269
270#define mmMME5_RTR_LBW_RANGE_MASK_7 0x14052C
271
272#define mmMME5_RTR_LBW_RANGE_MASK_8 0x140530
273
274#define mmMME5_RTR_LBW_RANGE_MASK_9 0x140534
275
276#define mmMME5_RTR_LBW_RANGE_MASK_10 0x140538
277
278#define mmMME5_RTR_LBW_RANGE_MASK_11 0x14053C
279
280#define mmMME5_RTR_LBW_RANGE_MASK_12 0x140540
281
282#define mmMME5_RTR_LBW_RANGE_MASK_13 0x140544
283
284#define mmMME5_RTR_LBW_RANGE_MASK_14 0x140548
285
286#define mmMME5_RTR_LBW_RANGE_MASK_15 0x14054C
287
288#define mmMME5_RTR_LBW_RANGE_BASE_0 0x140550
289
290#define mmMME5_RTR_LBW_RANGE_BASE_1 0x140554
291
292#define mmMME5_RTR_LBW_RANGE_BASE_2 0x140558
293
294#define mmMME5_RTR_LBW_RANGE_BASE_3 0x14055C
295
296#define mmMME5_RTR_LBW_RANGE_BASE_4 0x140560
297
298#define mmMME5_RTR_LBW_RANGE_BASE_5 0x140564
299
300#define mmMME5_RTR_LBW_RANGE_BASE_6 0x140568
301
302#define mmMME5_RTR_LBW_RANGE_BASE_7 0x14056C
303
304#define mmMME5_RTR_LBW_RANGE_BASE_8 0x140570
305
306#define mmMME5_RTR_LBW_RANGE_BASE_9 0x140574
307
308#define mmMME5_RTR_LBW_RANGE_BASE_10 0x140578
309
310#define mmMME5_RTR_LBW_RANGE_BASE_11 0x14057C
311
312#define mmMME5_RTR_LBW_RANGE_BASE_12 0x140580
313
314#define mmMME5_RTR_LBW_RANGE_BASE_13 0x140584
315
316#define mmMME5_RTR_LBW_RANGE_BASE_14 0x140588
317
318#define mmMME5_RTR_LBW_RANGE_BASE_15 0x14058C
319
320#define mmMME5_RTR_RGLTR 0x140590
321
322#define mmMME5_RTR_RGLTR_WR_RESULT 0x140594
323
324#define mmMME5_RTR_RGLTR_RD_RESULT 0x140598
325
326#define mmMME5_RTR_SCRAMB_EN 0x140600
327
328#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604
329
330#endif /* ASIC_REG_MME5_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
new file mode 100644
index 000000000000..fcec68388278
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
@@ -0,0 +1,331 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME6_RTR_REGS_H_
14#define ASIC_REG_MME6_RTR_REGS_H_
15
16/*
17 *****************************************
18 * MME6_RTR (Prototype: MME_RTR)
19 *****************************************
20 */
21
22#define mmMME6_RTR_HBW_RD_RQ_E_ARB 0x180100
23
24#define mmMME6_RTR_HBW_RD_RQ_W_ARB 0x180104
25
26#define mmMME6_RTR_HBW_RD_RQ_N_ARB 0x180108
27
28#define mmMME6_RTR_HBW_RD_RQ_S_ARB 0x18010C
29
30#define mmMME6_RTR_HBW_RD_RQ_L_ARB 0x180110
31
32#define mmMME6_RTR_HBW_E_ARB_MAX 0x180120
33
34#define mmMME6_RTR_HBW_W_ARB_MAX 0x180124
35
36#define mmMME6_RTR_HBW_N_ARB_MAX 0x180128
37
38#define mmMME6_RTR_HBW_S_ARB_MAX 0x18012C
39
40#define mmMME6_RTR_HBW_L_ARB_MAX 0x180130
41
42#define mmMME6_RTR_HBW_RD_RS_MAX_CREDIT 0x180140
43
44#define mmMME6_RTR_HBW_WR_RQ_MAX_CREDIT 0x180144
45
46#define mmMME6_RTR_HBW_RD_RQ_MAX_CREDIT 0x180148
47
48#define mmMME6_RTR_HBW_RD_RS_E_ARB 0x180150
49
50#define mmMME6_RTR_HBW_RD_RS_W_ARB 0x180154
51
52#define mmMME6_RTR_HBW_RD_RS_N_ARB 0x180158
53
54#define mmMME6_RTR_HBW_RD_RS_S_ARB 0x18015C
55
56#define mmMME6_RTR_HBW_RD_RS_L_ARB 0x180160
57
58#define mmMME6_RTR_HBW_WR_RQ_E_ARB 0x180170
59
60#define mmMME6_RTR_HBW_WR_RQ_W_ARB 0x180174
61
62#define mmMME6_RTR_HBW_WR_RQ_N_ARB 0x180178
63
64#define mmMME6_RTR_HBW_WR_RQ_S_ARB 0x18017C
65
66#define mmMME6_RTR_HBW_WR_RQ_L_ARB 0x180180
67
68#define mmMME6_RTR_HBW_WR_RS_E_ARB 0x180190
69
70#define mmMME6_RTR_HBW_WR_RS_W_ARB 0x180194
71
72#define mmMME6_RTR_HBW_WR_RS_N_ARB 0x180198
73
74#define mmMME6_RTR_HBW_WR_RS_S_ARB 0x18019C
75
76#define mmMME6_RTR_HBW_WR_RS_L_ARB 0x1801A0
77
78#define mmMME6_RTR_LBW_RD_RQ_E_ARB 0x180200
79
80#define mmMME6_RTR_LBW_RD_RQ_W_ARB 0x180204
81
82#define mmMME6_RTR_LBW_RD_RQ_N_ARB 0x180208
83
84#define mmMME6_RTR_LBW_RD_RQ_S_ARB 0x18020C
85
86#define mmMME6_RTR_LBW_RD_RQ_L_ARB 0x180210
87
88#define mmMME6_RTR_LBW_E_ARB_MAX 0x180220
89
90#define mmMME6_RTR_LBW_W_ARB_MAX 0x180224
91
92#define mmMME6_RTR_LBW_N_ARB_MAX 0x180228
93
94#define mmMME6_RTR_LBW_S_ARB_MAX 0x18022C
95
96#define mmMME6_RTR_LBW_L_ARB_MAX 0x180230
97
98#define mmMME6_RTR_LBW_SRAM_MAX_CREDIT 0x180240
99
100#define mmMME6_RTR_LBW_RD_RS_E_ARB 0x180250
101
102#define mmMME6_RTR_LBW_RD_RS_W_ARB 0x180254
103
104#define mmMME6_RTR_LBW_RD_RS_N_ARB 0x180258
105
106#define mmMME6_RTR_LBW_RD_RS_S_ARB 0x18025C
107
108#define mmMME6_RTR_LBW_RD_RS_L_ARB 0x180260
109
110#define mmMME6_RTR_LBW_WR_RQ_E_ARB 0x180270
111
112#define mmMME6_RTR_LBW_WR_RQ_W_ARB 0x180274
113
114#define mmMME6_RTR_LBW_WR_RQ_N_ARB 0x180278
115
116#define mmMME6_RTR_LBW_WR_RQ_S_ARB 0x18027C
117
118#define mmMME6_RTR_LBW_WR_RQ_L_ARB 0x180280
119
120#define mmMME6_RTR_LBW_WR_RS_E_ARB 0x180290
121
122#define mmMME6_RTR_LBW_WR_RS_W_ARB 0x180294
123
124#define mmMME6_RTR_LBW_WR_RS_N_ARB 0x180298
125
126#define mmMME6_RTR_LBW_WR_RS_S_ARB 0x18029C
127
128#define mmMME6_RTR_LBW_WR_RS_L_ARB 0x1802A0
129
130#define mmMME6_RTR_DBG_E_ARB 0x180300
131
132#define mmMME6_RTR_DBG_W_ARB 0x180304
133
134#define mmMME6_RTR_DBG_N_ARB 0x180308
135
136#define mmMME6_RTR_DBG_S_ARB 0x18030C
137
138#define mmMME6_RTR_DBG_L_ARB 0x180310
139
140#define mmMME6_RTR_DBG_E_ARB_MAX 0x180320
141
142#define mmMME6_RTR_DBG_W_ARB_MAX 0x180324
143
144#define mmMME6_RTR_DBG_N_ARB_MAX 0x180328
145
146#define mmMME6_RTR_DBG_S_ARB_MAX 0x18032C
147
148#define mmMME6_RTR_DBG_L_ARB_MAX 0x180330
149
150#define mmMME6_RTR_SPLIT_COEF_0 0x180400
151
152#define mmMME6_RTR_SPLIT_COEF_1 0x180404
153
154#define mmMME6_RTR_SPLIT_COEF_2 0x180408
155
156#define mmMME6_RTR_SPLIT_COEF_3 0x18040C
157
158#define mmMME6_RTR_SPLIT_COEF_4 0x180410
159
160#define mmMME6_RTR_SPLIT_COEF_5 0x180414
161
162#define mmMME6_RTR_SPLIT_COEF_6 0x180418
163
164#define mmMME6_RTR_SPLIT_COEF_7 0x18041C
165
166#define mmMME6_RTR_SPLIT_COEF_8 0x180420
167
168#define mmMME6_RTR_SPLIT_COEF_9 0x180424
169
170#define mmMME6_RTR_SPLIT_CFG 0x180440
171
172#define mmMME6_RTR_SPLIT_RD_SAT 0x180444
173
174#define mmMME6_RTR_SPLIT_RD_RST_TOKEN 0x180448
175
176#define mmMME6_RTR_SPLIT_RD_TIMEOUT_0 0x18044C
177
178#define mmMME6_RTR_SPLIT_RD_TIMEOUT_1 0x180450
179
180#define mmMME6_RTR_SPLIT_WR_SAT 0x180454
181
182#define mmMME6_RTR_WPLIT_WR_TST_TOLEN 0x180458
183
184#define mmMME6_RTR_SPLIT_WR_TIMEOUT_0 0x18045C
185
186#define mmMME6_RTR_SPLIT_WR_TIMEOUT_1 0x180460
187
188#define mmMME6_RTR_HBW_RANGE_HIT 0x180470
189
190#define mmMME6_RTR_HBW_RANGE_MASK_L_0 0x180480
191
192#define mmMME6_RTR_HBW_RANGE_MASK_L_1 0x180484
193
194#define mmMME6_RTR_HBW_RANGE_MASK_L_2 0x180488
195
196#define mmMME6_RTR_HBW_RANGE_MASK_L_3 0x18048C
197
198#define mmMME6_RTR_HBW_RANGE_MASK_L_4 0x180490
199
200#define mmMME6_RTR_HBW_RANGE_MASK_L_5 0x180494
201
202#define mmMME6_RTR_HBW_RANGE_MASK_L_6 0x180498
203
204#define mmMME6_RTR_HBW_RANGE_MASK_L_7 0x18049C
205
206#define mmMME6_RTR_HBW_RANGE_MASK_H_0 0x1804A0
207
208#define mmMME6_RTR_HBW_RANGE_MASK_H_1 0x1804A4
209
210#define mmMME6_RTR_HBW_RANGE_MASK_H_2 0x1804A8
211
212#define mmMME6_RTR_HBW_RANGE_MASK_H_3 0x1804AC
213
214#define mmMME6_RTR_HBW_RANGE_MASK_H_4 0x1804B0
215
216#define mmMME6_RTR_HBW_RANGE_MASK_H_5 0x1804B4
217
218#define mmMME6_RTR_HBW_RANGE_MASK_H_6 0x1804B8
219
220#define mmMME6_RTR_HBW_RANGE_MASK_H_7 0x1804BC
221
222#define mmMME6_RTR_HBW_RANGE_BASE_L_0 0x1804C0
223
224#define mmMME6_RTR_HBW_RANGE_BASE_L_1 0x1804C4
225
226#define mmMME6_RTR_HBW_RANGE_BASE_L_2 0x1804C8
227
228#define mmMME6_RTR_HBW_RANGE_BASE_L_3 0x1804CC
229
230#define mmMME6_RTR_HBW_RANGE_BASE_L_4 0x1804D0
231
232#define mmMME6_RTR_HBW_RANGE_BASE_L_5 0x1804D4
233
234#define mmMME6_RTR_HBW_RANGE_BASE_L_6 0x1804D8
235
236#define mmMME6_RTR_HBW_RANGE_BASE_L_7 0x1804DC
237
238#define mmMME6_RTR_HBW_RANGE_BASE_H_0 0x1804E0
239
240#define mmMME6_RTR_HBW_RANGE_BASE_H_1 0x1804E4
241
242#define mmMME6_RTR_HBW_RANGE_BASE_H_2 0x1804E8
243
244#define mmMME6_RTR_HBW_RANGE_BASE_H_3 0x1804EC
245
246#define mmMME6_RTR_HBW_RANGE_BASE_H_4 0x1804F0
247
248#define mmMME6_RTR_HBW_RANGE_BASE_H_5 0x1804F4
249
250#define mmMME6_RTR_HBW_RANGE_BASE_H_6 0x1804F8
251
252#define mmMME6_RTR_HBW_RANGE_BASE_H_7 0x1804FC
253
254#define mmMME6_RTR_LBW_RANGE_HIT 0x180500
255
256#define mmMME6_RTR_LBW_RANGE_MASK_0 0x180510
257
258#define mmMME6_RTR_LBW_RANGE_MASK_1 0x180514
259
260#define mmMME6_RTR_LBW_RANGE_MASK_2 0x180518
261
262#define mmMME6_RTR_LBW_RANGE_MASK_3 0x18051C
263
264#define mmMME6_RTR_LBW_RANGE_MASK_4 0x180520
265
266#define mmMME6_RTR_LBW_RANGE_MASK_5 0x180524
267
268#define mmMME6_RTR_LBW_RANGE_MASK_6 0x180528
269
270#define mmMME6_RTR_LBW_RANGE_MASK_7 0x18052C
271
272#define mmMME6_RTR_LBW_RANGE_MASK_8 0x180530
273
274#define mmMME6_RTR_LBW_RANGE_MASK_9 0x180534
275
276#define mmMME6_RTR_LBW_RANGE_MASK_10 0x180538
277
278#define mmMME6_RTR_LBW_RANGE_MASK_11 0x18053C
279
280#define mmMME6_RTR_LBW_RANGE_MASK_12 0x180540
281
282#define mmMME6_RTR_LBW_RANGE_MASK_13 0x180544
283
284#define mmMME6_RTR_LBW_RANGE_MASK_14 0x180548
285
286#define mmMME6_RTR_LBW_RANGE_MASK_15 0x18054C
287
288#define mmMME6_RTR_LBW_RANGE_BASE_0 0x180550
289
290#define mmMME6_RTR_LBW_RANGE_BASE_1 0x180554
291
292#define mmMME6_RTR_LBW_RANGE_BASE_2 0x180558
293
294#define mmMME6_RTR_LBW_RANGE_BASE_3 0x18055C
295
296#define mmMME6_RTR_LBW_RANGE_BASE_4 0x180560
297
298#define mmMME6_RTR_LBW_RANGE_BASE_5 0x180564
299
300#define mmMME6_RTR_LBW_RANGE_BASE_6 0x180568
301
302#define mmMME6_RTR_LBW_RANGE_BASE_7 0x18056C
303
304#define mmMME6_RTR_LBW_RANGE_BASE_8 0x180570
305
306#define mmMME6_RTR_LBW_RANGE_BASE_9 0x180574
307
308#define mmMME6_RTR_LBW_RANGE_BASE_10 0x180578
309
310#define mmMME6_RTR_LBW_RANGE_BASE_11 0x18057C
311
312#define mmMME6_RTR_LBW_RANGE_BASE_12 0x180580
313
314#define mmMME6_RTR_LBW_RANGE_BASE_13 0x180584
315
316#define mmMME6_RTR_LBW_RANGE_BASE_14 0x180588
317
318#define mmMME6_RTR_LBW_RANGE_BASE_15 0x18058C
319
320#define mmMME6_RTR_RGLTR 0x180590
321
322#define mmMME6_RTR_RGLTR_WR_RESULT 0x180594
323
324#define mmMME6_RTR_RGLTR_RD_RESULT 0x180598
325
326#define mmMME6_RTR_SCRAMB_EN 0x180600
327
328#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604
329
330#endif /* ASIC_REG_MME6_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
new file mode 100644
index 000000000000..a0d4382fbbd0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
@@ -0,0 +1,373 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME_CMDQ_MASKS_H_
14#define ASIC_REG_MME_CMDQ_MASKS_H_
15
16/*
17 *****************************************
18 * MME_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22/* MME_CMDQ_GLBL_CFG0 */
23#define MME_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0
24#define MME_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1
25#define MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1
26#define MME_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2
27#define MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2
28#define MME_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4
29#define MME_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3
30#define MME_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8
31
32/* MME_CMDQ_GLBL_CFG1 */
33#define MME_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0
34#define MME_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1
35#define MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1
36#define MME_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2
37#define MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2
38#define MME_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4
39#define MME_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3
40#define MME_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8
41#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8
42#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100
43#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9
44#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200
45#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10
46#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400
47#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11
48#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800
49
50/* MME_CMDQ_GLBL_PROT */
51#define MME_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0
52#define MME_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1
53#define MME_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1
54#define MME_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2
55#define MME_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2
56#define MME_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4
57#define MME_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3
58#define MME_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8
59#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
60#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
61#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
62#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
63#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6
64#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40
65#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
66#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
67
68/* MME_CMDQ_GLBL_ERR_CFG */
69#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
70#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
71#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
72#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
73#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
74#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
75#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
76#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
77#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
78#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
79#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
80#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
81#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
82#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
83#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
84#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
85#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
86#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
87#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
88#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
89#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
90#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
91#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
92#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
93
94/* MME_CMDQ_GLBL_ERR_ADDR_LO */
95#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
96#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
97
98/* MME_CMDQ_GLBL_ERR_ADDR_HI */
99#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
100#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
101
102/* MME_CMDQ_GLBL_ERR_WDATA */
103#define MME_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0
104#define MME_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
105
106/* MME_CMDQ_GLBL_SECURE_PROPS */
107#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0
108#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
109#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10
110#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400
111
112/* MME_CMDQ_GLBL_NON_SECURE_PROPS */
113#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
114#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
115#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
116#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
117
118/* MME_CMDQ_GLBL_STS0 */
119#define MME_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0
120#define MME_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1
121#define MME_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1
122#define MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2
123#define MME_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2
124#define MME_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4
125#define MME_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3
126#define MME_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8
127#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4
128#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10
129#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5
130#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20
131#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6
132#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40
133#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7
134#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80
135
136/* MME_CMDQ_GLBL_STS1 */
137#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0
138#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1
139#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1
140#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2
141#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2
142#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4
143#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
144#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
145#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4
146#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10
147#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
148#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
149#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8
150#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100
151#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9
152#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200
153#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
154#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
155#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
156#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
157
158/* MME_CMDQ_CQ_CFG0 */
159#define MME_CMDQ_CQ_CFG0_RESERVED_SHIFT 0
160#define MME_CMDQ_CQ_CFG0_RESERVED_MASK 0x1
161
162/* MME_CMDQ_CQ_CFG1 */
163#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0
164#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
165#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
166#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
167
168/* MME_CMDQ_CQ_ARUSER */
169#define MME_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0
170#define MME_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1
171#define MME_CMDQ_CQ_ARUSER_WORD_SHIFT 1
172#define MME_CMDQ_CQ_ARUSER_WORD_MASK 0x2
173
174/* MME_CMDQ_CQ_PTR_LO */
175#define MME_CMDQ_CQ_PTR_LO_VAL_SHIFT 0
176#define MME_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
177
178/* MME_CMDQ_CQ_PTR_HI */
179#define MME_CMDQ_CQ_PTR_HI_VAL_SHIFT 0
180#define MME_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
181
182/* MME_CMDQ_CQ_TSIZE */
183#define MME_CMDQ_CQ_TSIZE_VAL_SHIFT 0
184#define MME_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
185
186/* MME_CMDQ_CQ_CTL */
187#define MME_CMDQ_CQ_CTL_RPT_SHIFT 0
188#define MME_CMDQ_CQ_CTL_RPT_MASK 0xFFFF
189#define MME_CMDQ_CQ_CTL_CTL_SHIFT 16
190#define MME_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000
191
192/* MME_CMDQ_CQ_PTR_LO_STS */
193#define MME_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0
194#define MME_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
195
196/* MME_CMDQ_CQ_PTR_HI_STS */
197#define MME_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0
198#define MME_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
199
200/* MME_CMDQ_CQ_TSIZE_STS */
201#define MME_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0
202#define MME_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
203
204/* MME_CMDQ_CQ_CTL_STS */
205#define MME_CMDQ_CQ_CTL_STS_RPT_SHIFT 0
206#define MME_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF
207#define MME_CMDQ_CQ_CTL_STS_CTL_SHIFT 16
208#define MME_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000
209
210/* MME_CMDQ_CQ_STS0 */
211#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
212#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
213#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16
214#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
215
216/* MME_CMDQ_CQ_STS1 */
217#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
218#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
219#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
220#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
221#define MME_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31
222#define MME_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000
223
224/* MME_CMDQ_CQ_RD_RATE_LIM_EN */
225#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
226#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
227
228/* MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */
229#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
230#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
231
232/* MME_CMDQ_CQ_RD_RATE_LIM_SAT */
233#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
234#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
235
236/* MME_CMDQ_CQ_RD_RATE_LIM_TOUT */
237#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
238#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
239
240/* MME_CMDQ_CQ_IFIFO_CNT */
241#define MME_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0
242#define MME_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3
243
244/* MME_CMDQ_CP_MSG_BASE0_ADDR_LO */
245#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
246#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
247
248/* MME_CMDQ_CP_MSG_BASE0_ADDR_HI */
249#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
250#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
251
252/* MME_CMDQ_CP_MSG_BASE1_ADDR_LO */
253#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
254#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
255
256/* MME_CMDQ_CP_MSG_BASE1_ADDR_HI */
257#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
258#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
259
260/* MME_CMDQ_CP_MSG_BASE2_ADDR_LO */
261#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
262#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
263
264/* MME_CMDQ_CP_MSG_BASE2_ADDR_HI */
265#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
266#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
267
268/* MME_CMDQ_CP_MSG_BASE3_ADDR_LO */
269#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
270#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
271
272/* MME_CMDQ_CP_MSG_BASE3_ADDR_HI */
273#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
274#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
275
276/* MME_CMDQ_CP_LDMA_TSIZE_OFFSET */
277#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
278#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
279
280/* MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */
281#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
282#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
283
284/* MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */
285#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
286#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
287
288/* MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */
289#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
290#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
291
292/* MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */
293#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
294#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
295
296/* MME_CMDQ_CP_LDMA_COMMIT_OFFSET */
297#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
298#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
299
300/* MME_CMDQ_CP_FENCE0_RDATA */
301#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
302#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
303
304/* MME_CMDQ_CP_FENCE1_RDATA */
305#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
306#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
307
308/* MME_CMDQ_CP_FENCE2_RDATA */
309#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
310#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
311
312/* MME_CMDQ_CP_FENCE3_RDATA */
313#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
314#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
315
316/* MME_CMDQ_CP_FENCE0_CNT */
317#define MME_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0
318#define MME_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF
319
320/* MME_CMDQ_CP_FENCE1_CNT */
321#define MME_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0
322#define MME_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF
323
324/* MME_CMDQ_CP_FENCE2_CNT */
325#define MME_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0
326#define MME_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF
327
328/* MME_CMDQ_CP_FENCE3_CNT */
329#define MME_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0
330#define MME_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF
331
332/* MME_CMDQ_CP_STS */
333#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
334#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
335#define MME_CMDQ_CP_STS_ERDY_SHIFT 16
336#define MME_CMDQ_CP_STS_ERDY_MASK 0x10000
337#define MME_CMDQ_CP_STS_RRDY_SHIFT 17
338#define MME_CMDQ_CP_STS_RRDY_MASK 0x20000
339#define MME_CMDQ_CP_STS_MRDY_SHIFT 18
340#define MME_CMDQ_CP_STS_MRDY_MASK 0x40000
341#define MME_CMDQ_CP_STS_SW_STOP_SHIFT 19
342#define MME_CMDQ_CP_STS_SW_STOP_MASK 0x80000
343#define MME_CMDQ_CP_STS_FENCE_ID_SHIFT 20
344#define MME_CMDQ_CP_STS_FENCE_ID_MASK 0x300000
345#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
346#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
347
348/* MME_CMDQ_CP_CURRENT_INST_LO */
349#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0
350#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
351
352/* MME_CMDQ_CP_CURRENT_INST_HI */
353#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0
354#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
355
356/* MME_CMDQ_CP_BARRIER_CFG */
357#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0
358#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
359
360/* MME_CMDQ_CP_DBG_0 */
361#define MME_CMDQ_CP_DBG_0_VAL_SHIFT 0
362#define MME_CMDQ_CP_DBG_0_VAL_MASK 0xFF
363
364/* MME_CMDQ_CQ_BUF_ADDR */
365#define MME_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0
366#define MME_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
367
368/* MME_CMDQ_CQ_BUF_RDATA */
369#define MME_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0
370#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
371
372#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
373
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
new file mode 100644
index 000000000000..5c2f6b870a58
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME_CMDQ_REGS_H_
14#define ASIC_REG_MME_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * MME_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmMME_CMDQ_GLBL_CFG0 0xD9000
23
24#define mmMME_CMDQ_GLBL_CFG1 0xD9004
25
26#define mmMME_CMDQ_GLBL_PROT 0xD9008
27
28#define mmMME_CMDQ_GLBL_ERR_CFG 0xD900C
29
30#define mmMME_CMDQ_GLBL_ERR_ADDR_LO 0xD9010
31
32#define mmMME_CMDQ_GLBL_ERR_ADDR_HI 0xD9014
33
34#define mmMME_CMDQ_GLBL_ERR_WDATA 0xD9018
35
36#define mmMME_CMDQ_GLBL_SECURE_PROPS 0xD901C
37
38#define mmMME_CMDQ_GLBL_NON_SECURE_PROPS 0xD9020
39
40#define mmMME_CMDQ_GLBL_STS0 0xD9024
41
42#define mmMME_CMDQ_GLBL_STS1 0xD9028
43
44#define mmMME_CMDQ_CQ_CFG0 0xD90B0
45
46#define mmMME_CMDQ_CQ_CFG1 0xD90B4
47
48#define mmMME_CMDQ_CQ_ARUSER 0xD90B8
49
50#define mmMME_CMDQ_CQ_PTR_LO 0xD90C0
51
52#define mmMME_CMDQ_CQ_PTR_HI 0xD90C4
53
54#define mmMME_CMDQ_CQ_TSIZE 0xD90C8
55
56#define mmMME_CMDQ_CQ_CTL 0xD90CC
57
58#define mmMME_CMDQ_CQ_PTR_LO_STS 0xD90D4
59
60#define mmMME_CMDQ_CQ_PTR_HI_STS 0xD90D8
61
62#define mmMME_CMDQ_CQ_TSIZE_STS 0xD90DC
63
64#define mmMME_CMDQ_CQ_CTL_STS 0xD90E0
65
66#define mmMME_CMDQ_CQ_STS0 0xD90E4
67
68#define mmMME_CMDQ_CQ_STS1 0xD90E8
69
70#define mmMME_CMDQ_CQ_RD_RATE_LIM_EN 0xD90F0
71
72#define mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xD90F4
73
74#define mmMME_CMDQ_CQ_RD_RATE_LIM_SAT 0xD90F8
75
76#define mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT 0xD90FC
77
78#define mmMME_CMDQ_CQ_IFIFO_CNT 0xD9108
79
80#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO 0xD9120
81
82#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI 0xD9124
83
84#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO 0xD9128
85
86#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI 0xD912C
87
88#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO 0xD9130
89
90#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI 0xD9134
91
92#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO 0xD9138
93
94#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI 0xD913C
95
96#define mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET 0xD9140
97
98#define mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xD9144
99
100#define mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xD9148
101
102#define mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xD914C
103
104#define mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xD9150
105
106#define mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET 0xD9154
107
108#define mmMME_CMDQ_CP_FENCE0_RDATA 0xD9158
109
110#define mmMME_CMDQ_CP_FENCE1_RDATA 0xD915C
111
112#define mmMME_CMDQ_CP_FENCE2_RDATA 0xD9160
113
114#define mmMME_CMDQ_CP_FENCE3_RDATA 0xD9164
115
116#define mmMME_CMDQ_CP_FENCE0_CNT 0xD9168
117
118#define mmMME_CMDQ_CP_FENCE1_CNT 0xD916C
119
120#define mmMME_CMDQ_CP_FENCE2_CNT 0xD9170
121
122#define mmMME_CMDQ_CP_FENCE3_CNT 0xD9174
123
124#define mmMME_CMDQ_CP_STS 0xD9178
125
126#define mmMME_CMDQ_CP_CURRENT_INST_LO 0xD917C
127
128#define mmMME_CMDQ_CP_CURRENT_INST_HI 0xD9180
129
130#define mmMME_CMDQ_CP_BARRIER_CFG 0xD9184
131
132#define mmMME_CMDQ_CP_DBG_0 0xD9188
133
134#define mmMME_CMDQ_CQ_BUF_ADDR 0xD9308
135
136#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C
137
138#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
new file mode 100644
index 000000000000..c7b1b0bb3384
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
@@ -0,0 +1,1537 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME_MASKS_H_
14#define ASIC_REG_MME_MASKS_H_
15
16/*
17 *****************************************
18 * MME (Prototype: MME)
19 *****************************************
20 */
21
22/* MME_ARCH_STATUS */
23#define MME_ARCH_STATUS_A_SHIFT 0
24#define MME_ARCH_STATUS_A_MASK 0x1
25#define MME_ARCH_STATUS_B_SHIFT 1
26#define MME_ARCH_STATUS_B_MASK 0x2
27#define MME_ARCH_STATUS_CIN_SHIFT 2
28#define MME_ARCH_STATUS_CIN_MASK 0x4
29#define MME_ARCH_STATUS_COUT_SHIFT 3
30#define MME_ARCH_STATUS_COUT_MASK 0x8
31#define MME_ARCH_STATUS_TE_SHIFT 4
32#define MME_ARCH_STATUS_TE_MASK 0x10
33#define MME_ARCH_STATUS_LD_SHIFT 5
34#define MME_ARCH_STATUS_LD_MASK 0x20
35#define MME_ARCH_STATUS_ST_SHIFT 6
36#define MME_ARCH_STATUS_ST_MASK 0x40
37#define MME_ARCH_STATUS_SB_A_EMPTY_SHIFT 7
38#define MME_ARCH_STATUS_SB_A_EMPTY_MASK 0x80
39#define MME_ARCH_STATUS_SB_B_EMPTY_SHIFT 8
40#define MME_ARCH_STATUS_SB_B_EMPTY_MASK 0x100
41#define MME_ARCH_STATUS_SB_CIN_EMPTY_SHIFT 9
42#define MME_ARCH_STATUS_SB_CIN_EMPTY_MASK 0x200
43#define MME_ARCH_STATUS_SB_COUT_EMPTY_SHIFT 10
44#define MME_ARCH_STATUS_SB_COUT_EMPTY_MASK 0x400
45#define MME_ARCH_STATUS_SM_IDLE_SHIFT 11
46#define MME_ARCH_STATUS_SM_IDLE_MASK 0x800
47#define MME_ARCH_STATUS_WBC_AXI_IDLE_SHIFT 12
48#define MME_ARCH_STATUS_WBC_AXI_IDLE_MASK 0xF000
49#define MME_ARCH_STATUS_SBC_AXI_IDLE_SHIFT 16
50#define MME_ARCH_STATUS_SBC_AXI_IDLE_MASK 0x30000
51#define MME_ARCH_STATUS_SBB_AXI_IDLE_SHIFT 18
52#define MME_ARCH_STATUS_SBB_AXI_IDLE_MASK 0xC0000
53#define MME_ARCH_STATUS_SBA_AXI_IDLE_SHIFT 20
54#define MME_ARCH_STATUS_SBA_AXI_IDLE_MASK 0x300000
55#define MME_ARCH_STATUS_FREE_ACCUMS_SHIFT 22
56#define MME_ARCH_STATUS_FREE_ACCUMS_MASK 0x1C00000
57
58/* MME_ARCH_A_BASE_ADDR_HIGH */
59#define MME_ARCH_A_BASE_ADDR_HIGH_V_SHIFT 0
60#define MME_ARCH_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
61
62/* MME_ARCH_B_BASE_ADDR_HIGH */
63#define MME_ARCH_B_BASE_ADDR_HIGH_V_SHIFT 0
64#define MME_ARCH_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
65
66/* MME_ARCH_CIN_BASE_ADDR_HIGH */
67#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_SHIFT 0
68#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
69
70/* MME_ARCH_COUT_BASE_ADDR_HIGH */
71#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_SHIFT 0
72#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
73
74/* MME_ARCH_BIAS_BASE_ADDR_HIGH */
75#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
76#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
77
78/* MME_ARCH_A_BASE_ADDR_LOW */
79#define MME_ARCH_A_BASE_ADDR_LOW_V_SHIFT 0
80#define MME_ARCH_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
81
82/* MME_ARCH_B_BASE_ADDR_LOW */
83#define MME_ARCH_B_BASE_ADDR_LOW_V_SHIFT 0
84#define MME_ARCH_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
85
86/* MME_ARCH_CIN_BASE_ADDR_LOW */
87#define MME_ARCH_CIN_BASE_ADDR_LOW_V_SHIFT 0
88#define MME_ARCH_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
89
90/* MME_ARCH_COUT_BASE_ADDR_LOW */
91#define MME_ARCH_COUT_BASE_ADDR_LOW_V_SHIFT 0
92#define MME_ARCH_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
93
94/* MME_ARCH_BIAS_BASE_ADDR_LOW */
95#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_SHIFT 0
96#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
97
98/* MME_ARCH_HEADER */
99#define MME_ARCH_HEADER_SIGNAL_MASK_SHIFT 0
100#define MME_ARCH_HEADER_SIGNAL_MASK_MASK 0x1F
101#define MME_ARCH_HEADER_SIGNAL_EN_SHIFT 5
102#define MME_ARCH_HEADER_SIGNAL_EN_MASK 0x20
103#define MME_ARCH_HEADER_TRANS_A_SHIFT 6
104#define MME_ARCH_HEADER_TRANS_A_MASK 0x40
105#define MME_ARCH_HEADER_LOWER_A_SHIFT 7
106#define MME_ARCH_HEADER_LOWER_A_MASK 0x80
107#define MME_ARCH_HEADER_ACCUM_MASK_SHIFT 8
108#define MME_ARCH_HEADER_ACCUM_MASK_MASK 0xF00
109#define MME_ARCH_HEADER_LOAD_BIAS_SHIFT 12
110#define MME_ARCH_HEADER_LOAD_BIAS_MASK 0x1000
111#define MME_ARCH_HEADER_LOAD_CIN_SHIFT 13
112#define MME_ARCH_HEADER_LOAD_CIN_MASK 0x2000
113#define MME_ARCH_HEADER_STORE_OUT_SHIFT 15
114#define MME_ARCH_HEADER_STORE_OUT_MASK 0x8000
115#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
116#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
117#define MME_ARCH_HEADER_ADVANCE_A_SHIFT 17
118#define MME_ARCH_HEADER_ADVANCE_A_MASK 0x20000
119#define MME_ARCH_HEADER_ADVANCE_B_SHIFT 18
120#define MME_ARCH_HEADER_ADVANCE_B_MASK 0x40000
121#define MME_ARCH_HEADER_ADVANCE_CIN_SHIFT 19
122#define MME_ARCH_HEADER_ADVANCE_CIN_MASK 0x80000
123#define MME_ARCH_HEADER_ADVANCE_COUT_SHIFT 20
124#define MME_ARCH_HEADER_ADVANCE_COUT_MASK 0x100000
125#define MME_ARCH_HEADER_COMPRESSED_B_SHIFT 21
126#define MME_ARCH_HEADER_COMPRESSED_B_MASK 0x200000
127#define MME_ARCH_HEADER_MASK_CONV_END_SHIFT 22
128#define MME_ARCH_HEADER_MASK_CONV_END_MASK 0x400000
129#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
130#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
131#define MME_ARCH_HEADER_AB_DATA_TYPE_SHIFT 24
132#define MME_ARCH_HEADER_AB_DATA_TYPE_MASK 0x3000000
133#define MME_ARCH_HEADER_CIN_DATA_TYPE_SHIFT 26
134#define MME_ARCH_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
135#define MME_ARCH_HEADER_COUT_DATA_TYPE_SHIFT 29
136#define MME_ARCH_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
137
138/* MME_ARCH_KERNEL_SIZE_MINUS_1 */
139#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
140#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
141#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
142#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
143#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
144#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
145#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
146#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
147
148/* MME_ARCH_ASSOCIATED_DIMS */
149#define MME_ARCH_ASSOCIATED_DIMS_A_0_SHIFT 0
150#define MME_ARCH_ASSOCIATED_DIMS_A_0_MASK 0x7
151#define MME_ARCH_ASSOCIATED_DIMS_B_0_SHIFT 3
152#define MME_ARCH_ASSOCIATED_DIMS_B_0_MASK 0x38
153#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_SHIFT 6
154#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
155#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_SHIFT 9
156#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
157#define MME_ARCH_ASSOCIATED_DIMS_A_1_SHIFT 16
158#define MME_ARCH_ASSOCIATED_DIMS_A_1_MASK 0x70000
159#define MME_ARCH_ASSOCIATED_DIMS_B_1_SHIFT 19
160#define MME_ARCH_ASSOCIATED_DIMS_B_1_MASK 0x380000
161#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_SHIFT 22
162#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
163#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_SHIFT 25
164#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
165
166/* MME_ARCH_COUT_SCALE */
167#define MME_ARCH_COUT_SCALE_V_SHIFT 0
168#define MME_ARCH_COUT_SCALE_V_MASK 0xFFFFFFFF
169
170/* MME_ARCH_CIN_SCALE */
171#define MME_ARCH_CIN_SCALE_V_SHIFT 0
172#define MME_ARCH_CIN_SCALE_V_MASK 0xFFFFFFFF
173
174/* MME_ARCH_GEMMLOWP_ZP */
175#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
176#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
177#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
178#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
179#define MME_ARCH_GEMMLOWP_ZP_ZP_B_SHIFT 18
180#define MME_ARCH_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
181#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
182#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
183#define MME_ARCH_GEMMLOWP_ZP_ACCUM_SHIFT 28
184#define MME_ARCH_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
185#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
186#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
187#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_SHIFT 30
188#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
189
190/* MME_ARCH_GEMMLOWP_EXPONENT */
191#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
192#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
193#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
194#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
195#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
196#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
197#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
198#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
199
200/* MME_ARCH_A_ROI_BASE_OFFSET */
201#define MME_ARCH_A_ROI_BASE_OFFSET_V_SHIFT 0
202#define MME_ARCH_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
203
204/* MME_ARCH_A_VALID_ELEMENTS */
205#define MME_ARCH_A_VALID_ELEMENTS_V_SHIFT 0
206#define MME_ARCH_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
207
208/* MME_ARCH_A_LOOP_STRIDE */
209#define MME_ARCH_A_LOOP_STRIDE_V_SHIFT 0
210#define MME_ARCH_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
211
212/* MME_ARCH_A_ROI_SIZE */
213#define MME_ARCH_A_ROI_SIZE_V_SHIFT 0
214#define MME_ARCH_A_ROI_SIZE_V_MASK 0xFFFFFFFF
215
216/* MME_ARCH_A_SPATIAL_START_OFFSET */
217#define MME_ARCH_A_SPATIAL_START_OFFSET_V_SHIFT 0
218#define MME_ARCH_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
219
220/* MME_ARCH_A_SPATIAL_STRIDE */
221#define MME_ARCH_A_SPATIAL_STRIDE_V_SHIFT 0
222#define MME_ARCH_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
223
224/* MME_ARCH_A_SPATIAL_SIZE_MINUS_1 */
225#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
226#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
227
228/* MME_ARCH_B_ROI_BASE_OFFSET */
229#define MME_ARCH_B_ROI_BASE_OFFSET_V_SHIFT 0
230#define MME_ARCH_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
231
232/* MME_ARCH_B_VALID_ELEMENTS */
233#define MME_ARCH_B_VALID_ELEMENTS_V_SHIFT 0
234#define MME_ARCH_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
235
236/* MME_ARCH_B_LOOP_STRIDE */
237#define MME_ARCH_B_LOOP_STRIDE_V_SHIFT 0
238#define MME_ARCH_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
239
240/* MME_ARCH_B_ROI_SIZE */
241#define MME_ARCH_B_ROI_SIZE_V_SHIFT 0
242#define MME_ARCH_B_ROI_SIZE_V_MASK 0xFFFFFFFF
243
244/* MME_ARCH_B_SPATIAL_START_OFFSET */
245#define MME_ARCH_B_SPATIAL_START_OFFSET_V_SHIFT 0
246#define MME_ARCH_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
247
248/* MME_ARCH_B_SPATIAL_STRIDE */
249#define MME_ARCH_B_SPATIAL_STRIDE_V_SHIFT 0
250#define MME_ARCH_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
251
252/* MME_ARCH_B_SPATIAL_SIZE_MINUS_1 */
253#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
254#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
255
256/* MME_ARCH_C_ROI_BASE_OFFSET */
257#define MME_ARCH_C_ROI_BASE_OFFSET_V_SHIFT 0
258#define MME_ARCH_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
259
260/* MME_ARCH_C_VALID_ELEMENTS */
261#define MME_ARCH_C_VALID_ELEMENTS_V_SHIFT 0
262#define MME_ARCH_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
263
264/* MME_ARCH_C_LOOP_STRIDE */
265#define MME_ARCH_C_LOOP_STRIDE_V_SHIFT 0
266#define MME_ARCH_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
267
268/* MME_ARCH_C_ROI_SIZE */
269#define MME_ARCH_C_ROI_SIZE_V_SHIFT 0
270#define MME_ARCH_C_ROI_SIZE_V_MASK 0xFFFFFFFF
271
272/* MME_ARCH_C_SPATIAL_START_OFFSET */
273#define MME_ARCH_C_SPATIAL_START_OFFSET_V_SHIFT 0
274#define MME_ARCH_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
275
276/* MME_ARCH_C_SPATIAL_STRIDE */
277#define MME_ARCH_C_SPATIAL_STRIDE_V_SHIFT 0
278#define MME_ARCH_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
279
280/* MME_ARCH_C_SPATIAL_SIZE_MINUS_1 */
281#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
282#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
283
284/* MME_ARCH_SYNC_OBJECT_MESSAGE */
285#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
286#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
287#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
288#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
289#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
290#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
291
292/* MME_ARCH_E_PADDING_VALUE_A */
293#define MME_ARCH_E_PADDING_VALUE_A_V_SHIFT 0
294#define MME_ARCH_E_PADDING_VALUE_A_V_MASK 0xFFFF
295
296/* MME_ARCH_E_NUM_ITERATION_MINUS_1 */
297#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
298#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
299
300/* MME_ARCH_E_BUBBLES_PER_SPLIT */
301#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_SHIFT 0
302#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
303#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_SHIFT 8
304#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
305#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
306#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
307#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
308#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
309
310/* MME_CMD */
311#define MME_CMD_EXECUTE_SHIFT 0
312#define MME_CMD_EXECUTE_MASK 0x1
313
314/* MME_DUMMY */
315#define MME_DUMMY_V_SHIFT 0
316#define MME_DUMMY_V_MASK 0xFFFFFFFF
317
318/* MME_RESET */
319#define MME_RESET_V_SHIFT 0
320#define MME_RESET_V_MASK 0x1
321
322/* MME_STALL */
323#define MME_STALL_V_SHIFT 0
324#define MME_STALL_V_MASK 0xFFFFFFFF
325
326/* MME_SM_BASE_ADDRESS_LOW */
327#define MME_SM_BASE_ADDRESS_LOW_V_SHIFT 0
328#define MME_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
329
330/* MME_SM_BASE_ADDRESS_HIGH */
331#define MME_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
332#define MME_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
333
334/* MME_DBGMEM_ADD */
335#define MME_DBGMEM_ADD_V_SHIFT 0
336#define MME_DBGMEM_ADD_V_MASK 0xFFFFFFFF
337
338/* MME_DBGMEM_DATA_WR */
339#define MME_DBGMEM_DATA_WR_V_SHIFT 0
340#define MME_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF
341
342/* MME_DBGMEM_DATA_RD */
343#define MME_DBGMEM_DATA_RD_V_SHIFT 0
344#define MME_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF
345
346/* MME_DBGMEM_CTRL */
347#define MME_DBGMEM_CTRL_WR_NRD_SHIFT 0
348#define MME_DBGMEM_CTRL_WR_NRD_MASK 0x1
349
350/* MME_DBGMEM_RC */
351#define MME_DBGMEM_RC_VALID_SHIFT 0
352#define MME_DBGMEM_RC_VALID_MASK 0x1
353#define MME_DBGMEM_RC_FULL_SHIFT 1
354#define MME_DBGMEM_RC_FULL_MASK 0x2
355
356/* MME_LOG_SHADOW */
357#define MME_LOG_SHADOW_MASK_0_SHIFT 0
358#define MME_LOG_SHADOW_MASK_0_MASK 0x7F
359#define MME_LOG_SHADOW_MASK_1_SHIFT 8
360#define MME_LOG_SHADOW_MASK_1_MASK 0x7F00
361#define MME_LOG_SHADOW_MASK_2_SHIFT 16
362#define MME_LOG_SHADOW_MASK_2_MASK 0x7F0000
363#define MME_LOG_SHADOW_MASK_3_SHIFT 24
364#define MME_LOG_SHADOW_MASK_3_MASK 0x7F000000
365
366/* MME_STORE_MAX_CREDIT */
367#define MME_STORE_MAX_CREDIT_V_SHIFT 0
368#define MME_STORE_MAX_CREDIT_V_MASK 0x3F
369
370/* MME_AGU */
371#define MME_AGU_SBA_MAX_CREDIT_SHIFT 0
372#define MME_AGU_SBA_MAX_CREDIT_MASK 0x1F
373#define MME_AGU_SBB_MAX_CREDIT_SHIFT 8
374#define MME_AGU_SBB_MAX_CREDIT_MASK 0x1F00
375#define MME_AGU_SBC_MAX_CREDIT_SHIFT 16
376#define MME_AGU_SBC_MAX_CREDIT_MASK 0x1F0000
377#define MME_AGU_WBC_MAX_CREDIT_SHIFT 24
378#define MME_AGU_WBC_MAX_CREDIT_MASK 0x3F000000
379
380/* MME_SBA */
381#define MME_SBA_MAX_SIZE_SHIFT 0
382#define MME_SBA_MAX_SIZE_MASK 0x3FF
383#define MME_SBA_EU_MAX_CREDIT_SHIFT 16
384#define MME_SBA_EU_MAX_CREDIT_MASK 0x1F0000
385
386/* MME_SBB */
387#define MME_SBB_MAX_SIZE_SHIFT 0
388#define MME_SBB_MAX_SIZE_MASK 0x3FF
389#define MME_SBB_EU_MAX_CREDIT_SHIFT 16
390#define MME_SBB_EU_MAX_CREDIT_MASK 0x1F0000
391
392/* MME_SBC */
393#define MME_SBC_MAX_SIZE_SHIFT 0
394#define MME_SBC_MAX_SIZE_MASK 0x3FF
395#define MME_SBC_EU_MAX_CREDIT_SHIFT 16
396#define MME_SBC_EU_MAX_CREDIT_MASK 0x1F0000
397
398/* MME_WBC */
399#define MME_WBC_MAX_OUTSTANDING_SHIFT 0
400#define MME_WBC_MAX_OUTSTANDING_MASK 0xFFF
401#define MME_WBC_DISABLE_FAST_END_PE_SHIFT 12
402#define MME_WBC_DISABLE_FAST_END_PE_MASK 0x1000
403#define MME_WBC_LD_INSERT_BUBBLE_DIS_SHIFT 13
404#define MME_WBC_LD_INSERT_BUBBLE_DIS_MASK 0x2000
405
406/* MME_SBA_CONTROL_DATA */
407#define MME_SBA_CONTROL_DATA_ASID_SHIFT 0
408#define MME_SBA_CONTROL_DATA_ASID_MASK 0x3FF
409#define MME_SBA_CONTROL_DATA_MMBP_SHIFT 10
410#define MME_SBA_CONTROL_DATA_MMBP_MASK 0x400
411
412/* MME_SBB_CONTROL_DATA */
413#define MME_SBB_CONTROL_DATA_ASID_SHIFT 0
414#define MME_SBB_CONTROL_DATA_ASID_MASK 0x3FF
415#define MME_SBB_CONTROL_DATA_MMBP_SHIFT 10
416#define MME_SBB_CONTROL_DATA_MMBP_MASK 0x400
417
418/* MME_SBC_CONTROL_DATA */
419#define MME_SBC_CONTROL_DATA_ASID_SHIFT 0
420#define MME_SBC_CONTROL_DATA_ASID_MASK 0x3FF
421#define MME_SBC_CONTROL_DATA_MMBP_SHIFT 10
422#define MME_SBC_CONTROL_DATA_MMBP_MASK 0x400
423
424/* MME_WBC_CONTROL_DATA */
425#define MME_WBC_CONTROL_DATA_ASID_SHIFT 0
426#define MME_WBC_CONTROL_DATA_ASID_MASK 0x3FF
427#define MME_WBC_CONTROL_DATA_MMBP_SHIFT 10
428#define MME_WBC_CONTROL_DATA_MMBP_MASK 0x400
429
430/* MME_TE */
431#define MME_TE_MAX_CREDIT_SHIFT 0
432#define MME_TE_MAX_CREDIT_MASK 0x1F
433#define MME_TE_DESC_MAX_CREDIT_SHIFT 8
434#define MME_TE_DESC_MAX_CREDIT_MASK 0x1F00
435
436/* MME_TE2DEC */
437#define MME_TE2DEC_MAX_CREDIT_SHIFT 0
438#define MME_TE2DEC_MAX_CREDIT_MASK 0x1F
439
440/* MME_REI_STATUS */
441#define MME_REI_STATUS_V_SHIFT 0
442#define MME_REI_STATUS_V_MASK 0xFFFFFFFF
443
444/* MME_REI_MASK */
445#define MME_REI_MASK_V_SHIFT 0
446#define MME_REI_MASK_V_MASK 0xFFFFFFFF
447
448/* MME_SEI_STATUS */
449#define MME_SEI_STATUS_V_SHIFT 0
450#define MME_SEI_STATUS_V_MASK 0xFFFFFFFF
451
452/* MME_SEI_MASK */
453#define MME_SEI_MASK_V_SHIFT 0
454#define MME_SEI_MASK_V_MASK 0xFFFFFFFF
455
456/* MME_SPI_STATUS */
457#define MME_SPI_STATUS_V_SHIFT 0
458#define MME_SPI_STATUS_V_MASK 0xFFFFFFFF
459
460/* MME_SPI_MASK */
461#define MME_SPI_MASK_V_SHIFT 0
462#define MME_SPI_MASK_V_MASK 0xFFFFFFFF
463
464/* MME_SHADOW_0_STATUS */
465#define MME_SHADOW_0_STATUS_A_SHIFT 0
466#define MME_SHADOW_0_STATUS_A_MASK 0x1
467#define MME_SHADOW_0_STATUS_B_SHIFT 1
468#define MME_SHADOW_0_STATUS_B_MASK 0x2
469#define MME_SHADOW_0_STATUS_CIN_SHIFT 2
470#define MME_SHADOW_0_STATUS_CIN_MASK 0x4
471#define MME_SHADOW_0_STATUS_COUT_SHIFT 3
472#define MME_SHADOW_0_STATUS_COUT_MASK 0x8
473#define MME_SHADOW_0_STATUS_TE_SHIFT 4
474#define MME_SHADOW_0_STATUS_TE_MASK 0x10
475#define MME_SHADOW_0_STATUS_LD_SHIFT 5
476#define MME_SHADOW_0_STATUS_LD_MASK 0x20
477#define MME_SHADOW_0_STATUS_ST_SHIFT 6
478#define MME_SHADOW_0_STATUS_ST_MASK 0x40
479
480/* MME_SHADOW_0_A_BASE_ADDR_HIGH */
481#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_SHIFT 0
482#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
483
484/* MME_SHADOW_0_B_BASE_ADDR_HIGH */
485#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_SHIFT 0
486#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
487
488/* MME_SHADOW_0_CIN_BASE_ADDR_HIGH */
489#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_SHIFT 0
490#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
491
492/* MME_SHADOW_0_COUT_BASE_ADDR_HIGH */
493#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_SHIFT 0
494#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
495
496/* MME_SHADOW_0_BIAS_BASE_ADDR_HIGH */
497#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
498#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
499
500/* MME_SHADOW_0_A_BASE_ADDR_LOW */
501#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_SHIFT 0
502#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
503
504/* MME_SHADOW_0_B_BASE_ADDR_LOW */
505#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_SHIFT 0
506#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
507
508/* MME_SHADOW_0_CIN_BASE_ADDR_LOW */
509#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_SHIFT 0
510#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
511
512/* MME_SHADOW_0_COUT_BASE_ADDR_LOW */
513#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_SHIFT 0
514#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
515
516/* MME_SHADOW_0_BIAS_BASE_ADDR_LOW */
517#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_SHIFT 0
518#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
519
520/* MME_SHADOW_0_HEADER */
521#define MME_SHADOW_0_HEADER_SIGNAL_MASK_SHIFT 0
522#define MME_SHADOW_0_HEADER_SIGNAL_MASK_MASK 0x1F
523#define MME_SHADOW_0_HEADER_SIGNAL_EN_SHIFT 5
524#define MME_SHADOW_0_HEADER_SIGNAL_EN_MASK 0x20
525#define MME_SHADOW_0_HEADER_TRANS_A_SHIFT 6
526#define MME_SHADOW_0_HEADER_TRANS_A_MASK 0x40
527#define MME_SHADOW_0_HEADER_LOWER_A_SHIFT 7
528#define MME_SHADOW_0_HEADER_LOWER_A_MASK 0x80
529#define MME_SHADOW_0_HEADER_ACCUM_MASK_SHIFT 8
530#define MME_SHADOW_0_HEADER_ACCUM_MASK_MASK 0xF00
531#define MME_SHADOW_0_HEADER_LOAD_BIAS_SHIFT 12
532#define MME_SHADOW_0_HEADER_LOAD_BIAS_MASK 0x1000
533#define MME_SHADOW_0_HEADER_LOAD_CIN_SHIFT 13
534#define MME_SHADOW_0_HEADER_LOAD_CIN_MASK 0x2000
535#define MME_SHADOW_0_HEADER_STORE_OUT_SHIFT 15
536#define MME_SHADOW_0_HEADER_STORE_OUT_MASK 0x8000
537#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
538#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
539#define MME_SHADOW_0_HEADER_ADVANCE_A_SHIFT 17
540#define MME_SHADOW_0_HEADER_ADVANCE_A_MASK 0x20000
541#define MME_SHADOW_0_HEADER_ADVANCE_B_SHIFT 18
542#define MME_SHADOW_0_HEADER_ADVANCE_B_MASK 0x40000
543#define MME_SHADOW_0_HEADER_ADVANCE_CIN_SHIFT 19
544#define MME_SHADOW_0_HEADER_ADVANCE_CIN_MASK 0x80000
545#define MME_SHADOW_0_HEADER_ADVANCE_COUT_SHIFT 20
546#define MME_SHADOW_0_HEADER_ADVANCE_COUT_MASK 0x100000
547#define MME_SHADOW_0_HEADER_COMPRESSED_B_SHIFT 21
548#define MME_SHADOW_0_HEADER_COMPRESSED_B_MASK 0x200000
549#define MME_SHADOW_0_HEADER_MASK_CONV_END_SHIFT 22
550#define MME_SHADOW_0_HEADER_MASK_CONV_END_MASK 0x400000
551#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
552#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
553#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_SHIFT 24
554#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_MASK 0x3000000
555#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_SHIFT 26
556#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
557#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_SHIFT 29
558#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
559
560/* MME_SHADOW_0_KERNEL_SIZE_MINUS_1 */
561#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
562#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
563#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
564#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
565#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
566#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
567#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
568#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
569
570/* MME_SHADOW_0_ASSOCIATED_DIMS */
571#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_SHIFT 0
572#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_MASK 0x7
573#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_SHIFT 3
574#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_MASK 0x38
575#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_SHIFT 6
576#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
577#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_SHIFT 9
578#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
579#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_SHIFT 16
580#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_MASK 0x70000
581#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_SHIFT 19
582#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_MASK 0x380000
583#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_SHIFT 22
584#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
585#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_SHIFT 25
586#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
587
588/* MME_SHADOW_0_COUT_SCALE */
589#define MME_SHADOW_0_COUT_SCALE_V_SHIFT 0
590#define MME_SHADOW_0_COUT_SCALE_V_MASK 0xFFFFFFFF
591
592/* MME_SHADOW_0_CIN_SCALE */
593#define MME_SHADOW_0_CIN_SCALE_V_SHIFT 0
594#define MME_SHADOW_0_CIN_SCALE_V_MASK 0xFFFFFFFF
595
596/* MME_SHADOW_0_GEMMLOWP_ZP */
597#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
598#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
599#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
600#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
601#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_SHIFT 18
602#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
603#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
604#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
605#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_SHIFT 28
606#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
607#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
608#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
609#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_SHIFT 30
610#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
611
612/* MME_SHADOW_0_GEMMLOWP_EXPONENT */
613#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
614#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
615#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
616#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
617#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
618#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
619#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
620#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
621
622/* MME_SHADOW_0_A_ROI_BASE_OFFSET */
623#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_SHIFT 0
624#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
625
626/* MME_SHADOW_0_A_VALID_ELEMENTS */
627#define MME_SHADOW_0_A_VALID_ELEMENTS_V_SHIFT 0
628#define MME_SHADOW_0_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
629
630/* MME_SHADOW_0_A_LOOP_STRIDE */
631#define MME_SHADOW_0_A_LOOP_STRIDE_V_SHIFT 0
632#define MME_SHADOW_0_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
633
634/* MME_SHADOW_0_A_ROI_SIZE */
635#define MME_SHADOW_0_A_ROI_SIZE_V_SHIFT 0
636#define MME_SHADOW_0_A_ROI_SIZE_V_MASK 0xFFFFFFFF
637
638/* MME_SHADOW_0_A_SPATIAL_START_OFFSET */
639#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_SHIFT 0
640#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
641
642/* MME_SHADOW_0_A_SPATIAL_STRIDE */
643#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_SHIFT 0
644#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
645
646/* MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 */
647#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
648#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
649
650/* MME_SHADOW_0_B_ROI_BASE_OFFSET */
651#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_SHIFT 0
652#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
653
654/* MME_SHADOW_0_B_VALID_ELEMENTS */
655#define MME_SHADOW_0_B_VALID_ELEMENTS_V_SHIFT 0
656#define MME_SHADOW_0_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
657
658/* MME_SHADOW_0_B_LOOP_STRIDE */
659#define MME_SHADOW_0_B_LOOP_STRIDE_V_SHIFT 0
660#define MME_SHADOW_0_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
661
662/* MME_SHADOW_0_B_ROI_SIZE */
663#define MME_SHADOW_0_B_ROI_SIZE_V_SHIFT 0
664#define MME_SHADOW_0_B_ROI_SIZE_V_MASK 0xFFFFFFFF
665
666/* MME_SHADOW_0_B_SPATIAL_START_OFFSET */
667#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_SHIFT 0
668#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
669
670/* MME_SHADOW_0_B_SPATIAL_STRIDE */
671#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_SHIFT 0
672#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
673
674/* MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 */
675#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
676#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
677
678/* MME_SHADOW_0_C_ROI_BASE_OFFSET */
679#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_SHIFT 0
680#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
681
682/* MME_SHADOW_0_C_VALID_ELEMENTS */
683#define MME_SHADOW_0_C_VALID_ELEMENTS_V_SHIFT 0
684#define MME_SHADOW_0_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
685
686/* MME_SHADOW_0_C_LOOP_STRIDE */
687#define MME_SHADOW_0_C_LOOP_STRIDE_V_SHIFT 0
688#define MME_SHADOW_0_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
689
690/* MME_SHADOW_0_C_ROI_SIZE */
691#define MME_SHADOW_0_C_ROI_SIZE_V_SHIFT 0
692#define MME_SHADOW_0_C_ROI_SIZE_V_MASK 0xFFFFFFFF
693
694/* MME_SHADOW_0_C_SPATIAL_START_OFFSET */
695#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_SHIFT 0
696#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
697
698/* MME_SHADOW_0_C_SPATIAL_STRIDE */
699#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_SHIFT 0
700#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
701
702/* MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 */
703#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
704#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
705
706/* MME_SHADOW_0_SYNC_OBJECT_MESSAGE */
707#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
708#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
709#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
710#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
711#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
712#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
713
714/* MME_SHADOW_0_E_PADDING_VALUE_A */
715#define MME_SHADOW_0_E_PADDING_VALUE_A_V_SHIFT 0
716#define MME_SHADOW_0_E_PADDING_VALUE_A_V_MASK 0xFFFF
717
718/* MME_SHADOW_0_E_NUM_ITERATION_MINUS_1 */
719#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
720#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
721
722/* MME_SHADOW_0_E_BUBBLES_PER_SPLIT */
723#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_SHIFT 0
724#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
725#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_SHIFT 8
726#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
727#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
728#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
729#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
730#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
731
732/* MME_SHADOW_1_STATUS */
733#define MME_SHADOW_1_STATUS_A_SHIFT 0
734#define MME_SHADOW_1_STATUS_A_MASK 0x1
735#define MME_SHADOW_1_STATUS_B_SHIFT 1
736#define MME_SHADOW_1_STATUS_B_MASK 0x2
737#define MME_SHADOW_1_STATUS_CIN_SHIFT 2
738#define MME_SHADOW_1_STATUS_CIN_MASK 0x4
739#define MME_SHADOW_1_STATUS_COUT_SHIFT 3
740#define MME_SHADOW_1_STATUS_COUT_MASK 0x8
741#define MME_SHADOW_1_STATUS_TE_SHIFT 4
742#define MME_SHADOW_1_STATUS_TE_MASK 0x10
743#define MME_SHADOW_1_STATUS_LD_SHIFT 5
744#define MME_SHADOW_1_STATUS_LD_MASK 0x20
745#define MME_SHADOW_1_STATUS_ST_SHIFT 6
746#define MME_SHADOW_1_STATUS_ST_MASK 0x40
747
748/* MME_SHADOW_1_A_BASE_ADDR_HIGH */
749#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_SHIFT 0
750#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
751
752/* MME_SHADOW_1_B_BASE_ADDR_HIGH */
753#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_SHIFT 0
754#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
755
756/* MME_SHADOW_1_CIN_BASE_ADDR_HIGH */
757#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_SHIFT 0
758#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
759
760/* MME_SHADOW_1_COUT_BASE_ADDR_HIGH */
761#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_SHIFT 0
762#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
763
764/* MME_SHADOW_1_BIAS_BASE_ADDR_HIGH */
765#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
766#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
767
768/* MME_SHADOW_1_A_BASE_ADDR_LOW */
769#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_SHIFT 0
770#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
771
772/* MME_SHADOW_1_B_BASE_ADDR_LOW */
773#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_SHIFT 0
774#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
775
776/* MME_SHADOW_1_CIN_BASE_ADDR_LOW */
777#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_SHIFT 0
778#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
779
780/* MME_SHADOW_1_COUT_BASE_ADDR_LOW */
781#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_SHIFT 0
782#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
783
784/* MME_SHADOW_1_BIAS_BASE_ADDR_LOW */
785#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_SHIFT 0
786#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
787
788/* MME_SHADOW_1_HEADER */
789#define MME_SHADOW_1_HEADER_SIGNAL_MASK_SHIFT 0
790#define MME_SHADOW_1_HEADER_SIGNAL_MASK_MASK 0x1F
791#define MME_SHADOW_1_HEADER_SIGNAL_EN_SHIFT 5
792#define MME_SHADOW_1_HEADER_SIGNAL_EN_MASK 0x20
793#define MME_SHADOW_1_HEADER_TRANS_A_SHIFT 6
794#define MME_SHADOW_1_HEADER_TRANS_A_MASK 0x40
795#define MME_SHADOW_1_HEADER_LOWER_A_SHIFT 7
796#define MME_SHADOW_1_HEADER_LOWER_A_MASK 0x80
797#define MME_SHADOW_1_HEADER_ACCUM_MASK_SHIFT 8
798#define MME_SHADOW_1_HEADER_ACCUM_MASK_MASK 0xF00
799#define MME_SHADOW_1_HEADER_LOAD_BIAS_SHIFT 12
800#define MME_SHADOW_1_HEADER_LOAD_BIAS_MASK 0x1000
801#define MME_SHADOW_1_HEADER_LOAD_CIN_SHIFT 13
802#define MME_SHADOW_1_HEADER_LOAD_CIN_MASK 0x2000
803#define MME_SHADOW_1_HEADER_STORE_OUT_SHIFT 15
804#define MME_SHADOW_1_HEADER_STORE_OUT_MASK 0x8000
805#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
806#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
807#define MME_SHADOW_1_HEADER_ADVANCE_A_SHIFT 17
808#define MME_SHADOW_1_HEADER_ADVANCE_A_MASK 0x20000
809#define MME_SHADOW_1_HEADER_ADVANCE_B_SHIFT 18
810#define MME_SHADOW_1_HEADER_ADVANCE_B_MASK 0x40000
811#define MME_SHADOW_1_HEADER_ADVANCE_CIN_SHIFT 19
812#define MME_SHADOW_1_HEADER_ADVANCE_CIN_MASK 0x80000
813#define MME_SHADOW_1_HEADER_ADVANCE_COUT_SHIFT 20
814#define MME_SHADOW_1_HEADER_ADVANCE_COUT_MASK 0x100000
815#define MME_SHADOW_1_HEADER_COMPRESSED_B_SHIFT 21
816#define MME_SHADOW_1_HEADER_COMPRESSED_B_MASK 0x200000
817#define MME_SHADOW_1_HEADER_MASK_CONV_END_SHIFT 22
818#define MME_SHADOW_1_HEADER_MASK_CONV_END_MASK 0x400000
819#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
820#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
821#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_SHIFT 24
822#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_MASK 0x3000000
823#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_SHIFT 26
824#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
825#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_SHIFT 29
826#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
827
828/* MME_SHADOW_1_KERNEL_SIZE_MINUS_1 */
829#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
830#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
831#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
832#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
833#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
834#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
835#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
836#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
837
838/* MME_SHADOW_1_ASSOCIATED_DIMS */
839#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_SHIFT 0
840#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_MASK 0x7
841#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_SHIFT 3
842#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_MASK 0x38
843#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_SHIFT 6
844#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
845#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_SHIFT 9
846#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
847#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_SHIFT 16
848#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_MASK 0x70000
849#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_SHIFT 19
850#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_MASK 0x380000
851#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_SHIFT 22
852#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
853#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_SHIFT 25
854#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
855
856/* MME_SHADOW_1_COUT_SCALE */
857#define MME_SHADOW_1_COUT_SCALE_V_SHIFT 0
858#define MME_SHADOW_1_COUT_SCALE_V_MASK 0xFFFFFFFF
859
860/* MME_SHADOW_1_CIN_SCALE */
861#define MME_SHADOW_1_CIN_SCALE_V_SHIFT 0
862#define MME_SHADOW_1_CIN_SCALE_V_MASK 0xFFFFFFFF
863
864/* MME_SHADOW_1_GEMMLOWP_ZP */
865#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
866#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
867#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
868#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
869#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_SHIFT 18
870#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
871#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
872#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
873#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_SHIFT 28
874#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
875#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
876#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
877#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_SHIFT 30
878#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
879
880/* MME_SHADOW_1_GEMMLOWP_EXPONENT */
881#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
882#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
883#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
884#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
885#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
886#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
887#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
888#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
889
890/* MME_SHADOW_1_A_ROI_BASE_OFFSET */
891#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_SHIFT 0
892#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
893
894/* MME_SHADOW_1_A_VALID_ELEMENTS */
895#define MME_SHADOW_1_A_VALID_ELEMENTS_V_SHIFT 0
896#define MME_SHADOW_1_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
897
898/* MME_SHADOW_1_A_LOOP_STRIDE */
899#define MME_SHADOW_1_A_LOOP_STRIDE_V_SHIFT 0
900#define MME_SHADOW_1_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
901
902/* MME_SHADOW_1_A_ROI_SIZE */
903#define MME_SHADOW_1_A_ROI_SIZE_V_SHIFT 0
904#define MME_SHADOW_1_A_ROI_SIZE_V_MASK 0xFFFFFFFF
905
906/* MME_SHADOW_1_A_SPATIAL_START_OFFSET */
907#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_SHIFT 0
908#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
909
910/* MME_SHADOW_1_A_SPATIAL_STRIDE */
911#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_SHIFT 0
912#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
913
914/* MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 */
915#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
916#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
917
918/* MME_SHADOW_1_B_ROI_BASE_OFFSET */
919#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_SHIFT 0
920#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
921
922/* MME_SHADOW_1_B_VALID_ELEMENTS */
923#define MME_SHADOW_1_B_VALID_ELEMENTS_V_SHIFT 0
924#define MME_SHADOW_1_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
925
926/* MME_SHADOW_1_B_LOOP_STRIDE */
927#define MME_SHADOW_1_B_LOOP_STRIDE_V_SHIFT 0
928#define MME_SHADOW_1_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
929
930/* MME_SHADOW_1_B_ROI_SIZE */
931#define MME_SHADOW_1_B_ROI_SIZE_V_SHIFT 0
932#define MME_SHADOW_1_B_ROI_SIZE_V_MASK 0xFFFFFFFF
933
934/* MME_SHADOW_1_B_SPATIAL_START_OFFSET */
935#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_SHIFT 0
936#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
937
938/* MME_SHADOW_1_B_SPATIAL_STRIDE */
939#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_SHIFT 0
940#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
941
942/* MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 */
943#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
944#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
945
946/* MME_SHADOW_1_C_ROI_BASE_OFFSET */
947#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_SHIFT 0
948#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
949
950/* MME_SHADOW_1_C_VALID_ELEMENTS */
951#define MME_SHADOW_1_C_VALID_ELEMENTS_V_SHIFT 0
952#define MME_SHADOW_1_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
953
954/* MME_SHADOW_1_C_LOOP_STRIDE */
955#define MME_SHADOW_1_C_LOOP_STRIDE_V_SHIFT 0
956#define MME_SHADOW_1_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
957
958/* MME_SHADOW_1_C_ROI_SIZE */
959#define MME_SHADOW_1_C_ROI_SIZE_V_SHIFT 0
960#define MME_SHADOW_1_C_ROI_SIZE_V_MASK 0xFFFFFFFF
961
962/* MME_SHADOW_1_C_SPATIAL_START_OFFSET */
963#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_SHIFT 0
964#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
965
966/* MME_SHADOW_1_C_SPATIAL_STRIDE */
967#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_SHIFT 0
968#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
969
970/* MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 */
971#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
972#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
973
974/* MME_SHADOW_1_SYNC_OBJECT_MESSAGE */
975#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
976#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
977#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
978#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
979#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
980#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
981
982/* MME_SHADOW_1_E_PADDING_VALUE_A */
983#define MME_SHADOW_1_E_PADDING_VALUE_A_V_SHIFT 0
984#define MME_SHADOW_1_E_PADDING_VALUE_A_V_MASK 0xFFFF
985
986/* MME_SHADOW_1_E_NUM_ITERATION_MINUS_1 */
987#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
988#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
989
990/* MME_SHADOW_1_E_BUBBLES_PER_SPLIT */
991#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_SHIFT 0
992#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
993#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_SHIFT 8
994#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
995#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
996#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
997#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
998#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
999
1000/* MME_SHADOW_2_STATUS */
1001#define MME_SHADOW_2_STATUS_A_SHIFT 0
1002#define MME_SHADOW_2_STATUS_A_MASK 0x1
1003#define MME_SHADOW_2_STATUS_B_SHIFT 1
1004#define MME_SHADOW_2_STATUS_B_MASK 0x2
1005#define MME_SHADOW_2_STATUS_CIN_SHIFT 2
1006#define MME_SHADOW_2_STATUS_CIN_MASK 0x4
1007#define MME_SHADOW_2_STATUS_COUT_SHIFT 3
1008#define MME_SHADOW_2_STATUS_COUT_MASK 0x8
1009#define MME_SHADOW_2_STATUS_TE_SHIFT 4
1010#define MME_SHADOW_2_STATUS_TE_MASK 0x10
1011#define MME_SHADOW_2_STATUS_LD_SHIFT 5
1012#define MME_SHADOW_2_STATUS_LD_MASK 0x20
1013#define MME_SHADOW_2_STATUS_ST_SHIFT 6
1014#define MME_SHADOW_2_STATUS_ST_MASK 0x40
1015
1016/* MME_SHADOW_2_A_BASE_ADDR_HIGH */
1017#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_SHIFT 0
1018#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1019
1020/* MME_SHADOW_2_B_BASE_ADDR_HIGH */
1021#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_SHIFT 0
1022#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1023
1024/* MME_SHADOW_2_CIN_BASE_ADDR_HIGH */
1025#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_SHIFT 0
1026#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1027
1028/* MME_SHADOW_2_COUT_BASE_ADDR_HIGH */
1029#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_SHIFT 0
1030#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1031
1032/* MME_SHADOW_2_BIAS_BASE_ADDR_HIGH */
1033#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
1034#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1035
1036/* MME_SHADOW_2_A_BASE_ADDR_LOW */
1037#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_SHIFT 0
1038#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1039
1040/* MME_SHADOW_2_B_BASE_ADDR_LOW */
1041#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_SHIFT 0
1042#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1043
1044/* MME_SHADOW_2_CIN_BASE_ADDR_LOW */
1045#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_SHIFT 0
1046#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1047
1048/* MME_SHADOW_2_COUT_BASE_ADDR_LOW */
1049#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_SHIFT 0
1050#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1051
1052/* MME_SHADOW_2_BIAS_BASE_ADDR_LOW */
1053#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_SHIFT 0
1054#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1055
1056/* MME_SHADOW_2_HEADER */
1057#define MME_SHADOW_2_HEADER_SIGNAL_MASK_SHIFT 0
1058#define MME_SHADOW_2_HEADER_SIGNAL_MASK_MASK 0x1F
1059#define MME_SHADOW_2_HEADER_SIGNAL_EN_SHIFT 5
1060#define MME_SHADOW_2_HEADER_SIGNAL_EN_MASK 0x20
1061#define MME_SHADOW_2_HEADER_TRANS_A_SHIFT 6
1062#define MME_SHADOW_2_HEADER_TRANS_A_MASK 0x40
1063#define MME_SHADOW_2_HEADER_LOWER_A_SHIFT 7
1064#define MME_SHADOW_2_HEADER_LOWER_A_MASK 0x80
1065#define MME_SHADOW_2_HEADER_ACCUM_MASK_SHIFT 8
1066#define MME_SHADOW_2_HEADER_ACCUM_MASK_MASK 0xF00
1067#define MME_SHADOW_2_HEADER_LOAD_BIAS_SHIFT 12
1068#define MME_SHADOW_2_HEADER_LOAD_BIAS_MASK 0x1000
1069#define MME_SHADOW_2_HEADER_LOAD_CIN_SHIFT 13
1070#define MME_SHADOW_2_HEADER_LOAD_CIN_MASK 0x2000
1071#define MME_SHADOW_2_HEADER_STORE_OUT_SHIFT 15
1072#define MME_SHADOW_2_HEADER_STORE_OUT_MASK 0x8000
1073#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
1074#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
1075#define MME_SHADOW_2_HEADER_ADVANCE_A_SHIFT 17
1076#define MME_SHADOW_2_HEADER_ADVANCE_A_MASK 0x20000
1077#define MME_SHADOW_2_HEADER_ADVANCE_B_SHIFT 18
1078#define MME_SHADOW_2_HEADER_ADVANCE_B_MASK 0x40000
1079#define MME_SHADOW_2_HEADER_ADVANCE_CIN_SHIFT 19
1080#define MME_SHADOW_2_HEADER_ADVANCE_CIN_MASK 0x80000
1081#define MME_SHADOW_2_HEADER_ADVANCE_COUT_SHIFT 20
1082#define MME_SHADOW_2_HEADER_ADVANCE_COUT_MASK 0x100000
1083#define MME_SHADOW_2_HEADER_COMPRESSED_B_SHIFT 21
1084#define MME_SHADOW_2_HEADER_COMPRESSED_B_MASK 0x200000
1085#define MME_SHADOW_2_HEADER_MASK_CONV_END_SHIFT 22
1086#define MME_SHADOW_2_HEADER_MASK_CONV_END_MASK 0x400000
1087#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
1088#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
1089#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_SHIFT 24
1090#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_MASK 0x3000000
1091#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_SHIFT 26
1092#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
1093#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_SHIFT 29
1094#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
1095
1096/* MME_SHADOW_2_KERNEL_SIZE_MINUS_1 */
1097#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
1098#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
1099#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
1100#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
1101#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
1102#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
1103#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
1104#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
1105
1106/* MME_SHADOW_2_ASSOCIATED_DIMS */
1107#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_SHIFT 0
1108#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_MASK 0x7
1109#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_SHIFT 3
1110#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_MASK 0x38
1111#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_SHIFT 6
1112#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
1113#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_SHIFT 9
1114#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
1115#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_SHIFT 16
1116#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_MASK 0x70000
1117#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_SHIFT 19
1118#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_MASK 0x380000
1119#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_SHIFT 22
1120#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
1121#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_SHIFT 25
1122#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
1123
1124/* MME_SHADOW_2_COUT_SCALE */
1125#define MME_SHADOW_2_COUT_SCALE_V_SHIFT 0
1126#define MME_SHADOW_2_COUT_SCALE_V_MASK 0xFFFFFFFF
1127
1128/* MME_SHADOW_2_CIN_SCALE */
1129#define MME_SHADOW_2_CIN_SCALE_V_SHIFT 0
1130#define MME_SHADOW_2_CIN_SCALE_V_MASK 0xFFFFFFFF
1131
1132/* MME_SHADOW_2_GEMMLOWP_ZP */
1133#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
1134#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
1135#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
1136#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
1137#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_SHIFT 18
1138#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
1139#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
1140#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
1141#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_SHIFT 28
1142#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
1143#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
1144#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
1145#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_SHIFT 30
1146#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
1147
1148/* MME_SHADOW_2_GEMMLOWP_EXPONENT */
1149#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
1150#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
1151#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
1152#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
1153#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
1154#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
1155#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
1156#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
1157
1158/* MME_SHADOW_2_A_ROI_BASE_OFFSET */
1159#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_SHIFT 0
1160#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
1161
1162/* MME_SHADOW_2_A_VALID_ELEMENTS */
1163#define MME_SHADOW_2_A_VALID_ELEMENTS_V_SHIFT 0
1164#define MME_SHADOW_2_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
1165
1166/* MME_SHADOW_2_A_LOOP_STRIDE */
1167#define MME_SHADOW_2_A_LOOP_STRIDE_V_SHIFT 0
1168#define MME_SHADOW_2_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
1169
1170/* MME_SHADOW_2_A_ROI_SIZE */
1171#define MME_SHADOW_2_A_ROI_SIZE_V_SHIFT 0
1172#define MME_SHADOW_2_A_ROI_SIZE_V_MASK 0xFFFFFFFF
1173
1174/* MME_SHADOW_2_A_SPATIAL_START_OFFSET */
1175#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_SHIFT 0
1176#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
1177
1178/* MME_SHADOW_2_A_SPATIAL_STRIDE */
1179#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_SHIFT 0
1180#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
1181
1182/* MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 */
1183#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
1184#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
1185
1186/* MME_SHADOW_2_B_ROI_BASE_OFFSET */
1187#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_SHIFT 0
1188#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
1189
1190/* MME_SHADOW_2_B_VALID_ELEMENTS */
1191#define MME_SHADOW_2_B_VALID_ELEMENTS_V_SHIFT 0
1192#define MME_SHADOW_2_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
1193
1194/* MME_SHADOW_2_B_LOOP_STRIDE */
1195#define MME_SHADOW_2_B_LOOP_STRIDE_V_SHIFT 0
1196#define MME_SHADOW_2_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
1197
1198/* MME_SHADOW_2_B_ROI_SIZE */
1199#define MME_SHADOW_2_B_ROI_SIZE_V_SHIFT 0
1200#define MME_SHADOW_2_B_ROI_SIZE_V_MASK 0xFFFFFFFF
1201
1202/* MME_SHADOW_2_B_SPATIAL_START_OFFSET */
1203#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_SHIFT 0
1204#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
1205
1206/* MME_SHADOW_2_B_SPATIAL_STRIDE */
1207#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_SHIFT 0
1208#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
1209
1210/* MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 */
1211#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
1212#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
1213
1214/* MME_SHADOW_2_C_ROI_BASE_OFFSET */
1215#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_SHIFT 0
1216#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
1217
1218/* MME_SHADOW_2_C_VALID_ELEMENTS */
1219#define MME_SHADOW_2_C_VALID_ELEMENTS_V_SHIFT 0
1220#define MME_SHADOW_2_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
1221
1222/* MME_SHADOW_2_C_LOOP_STRIDE */
1223#define MME_SHADOW_2_C_LOOP_STRIDE_V_SHIFT 0
1224#define MME_SHADOW_2_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
1225
1226/* MME_SHADOW_2_C_ROI_SIZE */
1227#define MME_SHADOW_2_C_ROI_SIZE_V_SHIFT 0
1228#define MME_SHADOW_2_C_ROI_SIZE_V_MASK 0xFFFFFFFF
1229
1230/* MME_SHADOW_2_C_SPATIAL_START_OFFSET */
1231#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_SHIFT 0
1232#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
1233
1234/* MME_SHADOW_2_C_SPATIAL_STRIDE */
1235#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_SHIFT 0
1236#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
1237
1238/* MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 */
1239#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
1240#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
1241
1242/* MME_SHADOW_2_SYNC_OBJECT_MESSAGE */
1243#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
1244#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
1245#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
1246#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
1247#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
1248#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
1249
1250/* MME_SHADOW_2_E_PADDING_VALUE_A */
1251#define MME_SHADOW_2_E_PADDING_VALUE_A_V_SHIFT 0
1252#define MME_SHADOW_2_E_PADDING_VALUE_A_V_MASK 0xFFFF
1253
1254/* MME_SHADOW_2_E_NUM_ITERATION_MINUS_1 */
1255#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
1256#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
1257
1258/* MME_SHADOW_2_E_BUBBLES_PER_SPLIT */
1259#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_SHIFT 0
1260#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
1261#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_SHIFT 8
1262#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
1263#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
1264#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
1265#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
1266#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
1267
1268/* MME_SHADOW_3_STATUS */
1269#define MME_SHADOW_3_STATUS_A_SHIFT 0
1270#define MME_SHADOW_3_STATUS_A_MASK 0x1
1271#define MME_SHADOW_3_STATUS_B_SHIFT 1
1272#define MME_SHADOW_3_STATUS_B_MASK 0x2
1273#define MME_SHADOW_3_STATUS_CIN_SHIFT 2
1274#define MME_SHADOW_3_STATUS_CIN_MASK 0x4
1275#define MME_SHADOW_3_STATUS_COUT_SHIFT 3
1276#define MME_SHADOW_3_STATUS_COUT_MASK 0x8
1277#define MME_SHADOW_3_STATUS_TE_SHIFT 4
1278#define MME_SHADOW_3_STATUS_TE_MASK 0x10
1279#define MME_SHADOW_3_STATUS_LD_SHIFT 5
1280#define MME_SHADOW_3_STATUS_LD_MASK 0x20
1281#define MME_SHADOW_3_STATUS_ST_SHIFT 6
1282#define MME_SHADOW_3_STATUS_ST_MASK 0x40
1283
1284/* MME_SHADOW_3_A_BASE_ADDR_HIGH */
1285#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_SHIFT 0
1286#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1287
1288/* MME_SHADOW_3_B_BASE_ADDR_HIGH */
1289#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_SHIFT 0
1290#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1291
1292/* MME_SHADOW_3_CIN_BASE_ADDR_HIGH */
1293#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_SHIFT 0
1294#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1295
1296/* MME_SHADOW_3_COUT_BASE_ADDR_HIGH */
1297#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_SHIFT 0
1298#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1299
1300/* MME_SHADOW_3_BIAS_BASE_ADDR_HIGH */
1301#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
1302#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1303
1304/* MME_SHADOW_3_A_BASE_ADDR_LOW */
1305#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_SHIFT 0
1306#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1307
1308/* MME_SHADOW_3_B_BASE_ADDR_LOW */
1309#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_SHIFT 0
1310#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1311
1312/* MME_SHADOW_3_CIN_BASE_ADDR_LOW */
1313#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_SHIFT 0
1314#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1315
1316/* MME_SHADOW_3_COUT_BASE_ADDR_LOW */
1317#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_SHIFT 0
1318#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1319
1320/* MME_SHADOW_3_BIAS_BASE_ADDR_LOW */
1321#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_SHIFT 0
1322#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1323
1324/* MME_SHADOW_3_HEADER */
1325#define MME_SHADOW_3_HEADER_SIGNAL_MASK_SHIFT 0
1326#define MME_SHADOW_3_HEADER_SIGNAL_MASK_MASK 0x1F
1327#define MME_SHADOW_3_HEADER_SIGNAL_EN_SHIFT 5
1328#define MME_SHADOW_3_HEADER_SIGNAL_EN_MASK 0x20
1329#define MME_SHADOW_3_HEADER_TRANS_A_SHIFT 6
1330#define MME_SHADOW_3_HEADER_TRANS_A_MASK 0x40
1331#define MME_SHADOW_3_HEADER_LOWER_A_SHIFT 7
1332#define MME_SHADOW_3_HEADER_LOWER_A_MASK 0x80
1333#define MME_SHADOW_3_HEADER_ACCUM_MASK_SHIFT 8
1334#define MME_SHADOW_3_HEADER_ACCUM_MASK_MASK 0xF00
1335#define MME_SHADOW_3_HEADER_LOAD_BIAS_SHIFT 12
1336#define MME_SHADOW_3_HEADER_LOAD_BIAS_MASK 0x1000
1337#define MME_SHADOW_3_HEADER_LOAD_CIN_SHIFT 13
1338#define MME_SHADOW_3_HEADER_LOAD_CIN_MASK 0x2000
1339#define MME_SHADOW_3_HEADER_STORE_OUT_SHIFT 15
1340#define MME_SHADOW_3_HEADER_STORE_OUT_MASK 0x8000
1341#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
1342#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
1343#define MME_SHADOW_3_HEADER_ADVANCE_A_SHIFT 17
1344#define MME_SHADOW_3_HEADER_ADVANCE_A_MASK 0x20000
1345#define MME_SHADOW_3_HEADER_ADVANCE_B_SHIFT 18
1346#define MME_SHADOW_3_HEADER_ADVANCE_B_MASK 0x40000
1347#define MME_SHADOW_3_HEADER_ADVANCE_CIN_SHIFT 19
1348#define MME_SHADOW_3_HEADER_ADVANCE_CIN_MASK 0x80000
1349#define MME_SHADOW_3_HEADER_ADVANCE_COUT_SHIFT 20
1350#define MME_SHADOW_3_HEADER_ADVANCE_COUT_MASK 0x100000
1351#define MME_SHADOW_3_HEADER_COMPRESSED_B_SHIFT 21
1352#define MME_SHADOW_3_HEADER_COMPRESSED_B_MASK 0x200000
1353#define MME_SHADOW_3_HEADER_MASK_CONV_END_SHIFT 22
1354#define MME_SHADOW_3_HEADER_MASK_CONV_END_MASK 0x400000
1355#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
1356#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
1357#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_SHIFT 24
1358#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_MASK 0x3000000
1359#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_SHIFT 26
1360#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
1361#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_SHIFT 29
1362#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
1363
1364/* MME_SHADOW_3_KERNEL_SIZE_MINUS_1 */
1365#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
1366#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
1367#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
1368#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
1369#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
1370#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
1371#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
1372#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
1373
1374/* MME_SHADOW_3_ASSOCIATED_DIMS */
1375#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_SHIFT 0
1376#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_MASK 0x7
1377#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_SHIFT 3
1378#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_MASK 0x38
1379#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_SHIFT 6
1380#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
1381#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_SHIFT 9
1382#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
1383#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_SHIFT 16
1384#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_MASK 0x70000
1385#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_SHIFT 19
1386#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_MASK 0x380000
1387#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_SHIFT 22
1388#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
1389#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_SHIFT 25
1390#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
1391
1392/* MME_SHADOW_3_COUT_SCALE */
1393#define MME_SHADOW_3_COUT_SCALE_V_SHIFT 0
1394#define MME_SHADOW_3_COUT_SCALE_V_MASK 0xFFFFFFFF
1395
1396/* MME_SHADOW_3_CIN_SCALE */
1397#define MME_SHADOW_3_CIN_SCALE_V_SHIFT 0
1398#define MME_SHADOW_3_CIN_SCALE_V_MASK 0xFFFFFFFF
1399
1400/* MME_SHADOW_3_GEMMLOWP_ZP */
1401#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
1402#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
1403#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
1404#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
1405#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_SHIFT 18
1406#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
1407#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
1408#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
1409#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_SHIFT 28
1410#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
1411#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
1412#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
1413#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_SHIFT 30
1414#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
1415
1416/* MME_SHADOW_3_GEMMLOWP_EXPONENT */
1417#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
1418#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
1419#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
1420#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
1421#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
1422#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
1423#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
1424#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
1425
1426/* MME_SHADOW_3_A_ROI_BASE_OFFSET */
1427#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_SHIFT 0
1428#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
1429
1430/* MME_SHADOW_3_A_VALID_ELEMENTS */
1431#define MME_SHADOW_3_A_VALID_ELEMENTS_V_SHIFT 0
1432#define MME_SHADOW_3_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
1433
1434/* MME_SHADOW_3_A_LOOP_STRIDE */
1435#define MME_SHADOW_3_A_LOOP_STRIDE_V_SHIFT 0
1436#define MME_SHADOW_3_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
1437
1438/* MME_SHADOW_3_A_ROI_SIZE */
1439#define MME_SHADOW_3_A_ROI_SIZE_V_SHIFT 0
1440#define MME_SHADOW_3_A_ROI_SIZE_V_MASK 0xFFFFFFFF
1441
1442/* MME_SHADOW_3_A_SPATIAL_START_OFFSET */
1443#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_SHIFT 0
1444#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
1445
1446/* MME_SHADOW_3_A_SPATIAL_STRIDE */
1447#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_SHIFT 0
1448#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
1449
1450/* MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 */
1451#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
1452#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
1453
1454/* MME_SHADOW_3_B_ROI_BASE_OFFSET */
1455#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_SHIFT 0
1456#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
1457
1458/* MME_SHADOW_3_B_VALID_ELEMENTS */
1459#define MME_SHADOW_3_B_VALID_ELEMENTS_V_SHIFT 0
1460#define MME_SHADOW_3_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
1461
1462/* MME_SHADOW_3_B_LOOP_STRIDE */
1463#define MME_SHADOW_3_B_LOOP_STRIDE_V_SHIFT 0
1464#define MME_SHADOW_3_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
1465
1466/* MME_SHADOW_3_B_ROI_SIZE */
1467#define MME_SHADOW_3_B_ROI_SIZE_V_SHIFT 0
1468#define MME_SHADOW_3_B_ROI_SIZE_V_MASK 0xFFFFFFFF
1469
1470/* MME_SHADOW_3_B_SPATIAL_START_OFFSET */
1471#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_SHIFT 0
1472#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
1473
1474/* MME_SHADOW_3_B_SPATIAL_STRIDE */
1475#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_SHIFT 0
1476#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
1477
1478/* MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 */
1479#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
1480#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
1481
1482/* MME_SHADOW_3_C_ROI_BASE_OFFSET */
1483#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_SHIFT 0
1484#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
1485
1486/* MME_SHADOW_3_C_VALID_ELEMENTS */
1487#define MME_SHADOW_3_C_VALID_ELEMENTS_V_SHIFT 0
1488#define MME_SHADOW_3_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
1489
1490/* MME_SHADOW_3_C_LOOP_STRIDE */
1491#define MME_SHADOW_3_C_LOOP_STRIDE_V_SHIFT 0
1492#define MME_SHADOW_3_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
1493
1494/* MME_SHADOW_3_C_ROI_SIZE */
1495#define MME_SHADOW_3_C_ROI_SIZE_V_SHIFT 0
1496#define MME_SHADOW_3_C_ROI_SIZE_V_MASK 0xFFFFFFFF
1497
1498/* MME_SHADOW_3_C_SPATIAL_START_OFFSET */
1499#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_SHIFT 0
1500#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
1501
1502/* MME_SHADOW_3_C_SPATIAL_STRIDE */
1503#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_SHIFT 0
1504#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
1505
1506/* MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 */
1507#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
1508#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
1509
1510/* MME_SHADOW_3_SYNC_OBJECT_MESSAGE */
1511#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
1512#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
1513#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
1514#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
1515#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
1516#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
1517
1518/* MME_SHADOW_3_E_PADDING_VALUE_A */
1519#define MME_SHADOW_3_E_PADDING_VALUE_A_V_SHIFT 0
1520#define MME_SHADOW_3_E_PADDING_VALUE_A_V_MASK 0xFFFF
1521
1522/* MME_SHADOW_3_E_NUM_ITERATION_MINUS_1 */
1523#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
1524#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
1525
1526/* MME_SHADOW_3_E_BUBBLES_PER_SPLIT */
1527#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_SHIFT 0
1528#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
1529#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_SHIFT 8
1530#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
1531#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
1532#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
1533#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
1534#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
1535
1536#endif /* ASIC_REG_MME_MASKS_H_ */
1537
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
new file mode 100644
index 000000000000..d4bfa58dce19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
@@ -0,0 +1,465 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME_QM_MASKS_H_
14#define ASIC_REG_MME_QM_MASKS_H_
15
16/*
17 *****************************************
18 * MME_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22/* MME_QM_GLBL_CFG0 */
23#define MME_QM_GLBL_CFG0_PQF_EN_SHIFT 0
24#define MME_QM_GLBL_CFG0_PQF_EN_MASK 0x1
25#define MME_QM_GLBL_CFG0_CQF_EN_SHIFT 1
26#define MME_QM_GLBL_CFG0_CQF_EN_MASK 0x2
27#define MME_QM_GLBL_CFG0_CP_EN_SHIFT 2
28#define MME_QM_GLBL_CFG0_CP_EN_MASK 0x4
29#define MME_QM_GLBL_CFG0_DMA_EN_SHIFT 3
30#define MME_QM_GLBL_CFG0_DMA_EN_MASK 0x8
31
32/* MME_QM_GLBL_CFG1 */
33#define MME_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
34#define MME_QM_GLBL_CFG1_PQF_STOP_MASK 0x1
35#define MME_QM_GLBL_CFG1_CQF_STOP_SHIFT 1
36#define MME_QM_GLBL_CFG1_CQF_STOP_MASK 0x2
37#define MME_QM_GLBL_CFG1_CP_STOP_SHIFT 2
38#define MME_QM_GLBL_CFG1_CP_STOP_MASK 0x4
39#define MME_QM_GLBL_CFG1_DMA_STOP_SHIFT 3
40#define MME_QM_GLBL_CFG1_DMA_STOP_MASK 0x8
41#define MME_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8
42#define MME_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100
43#define MME_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9
44#define MME_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200
45#define MME_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10
46#define MME_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400
47#define MME_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11
48#define MME_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800
49
50/* MME_QM_GLBL_PROT */
51#define MME_QM_GLBL_PROT_PQF_PROT_SHIFT 0
52#define MME_QM_GLBL_PROT_PQF_PROT_MASK 0x1
53#define MME_QM_GLBL_PROT_CQF_PROT_SHIFT 1
54#define MME_QM_GLBL_PROT_CQF_PROT_MASK 0x2
55#define MME_QM_GLBL_PROT_CP_PROT_SHIFT 2
56#define MME_QM_GLBL_PROT_CP_PROT_MASK 0x4
57#define MME_QM_GLBL_PROT_DMA_PROT_SHIFT 3
58#define MME_QM_GLBL_PROT_DMA_PROT_MASK 0x8
59#define MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
60#define MME_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
61#define MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
62#define MME_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
63#define MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6
64#define MME_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40
65#define MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
66#define MME_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
67
68/* MME_QM_GLBL_ERR_CFG */
69#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
70#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
71#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
72#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
73#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
74#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
75#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
76#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
77#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
78#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
79#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
80#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
81#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
82#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
83#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
84#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
85#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
86#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
87#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
88#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
89#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
90#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
91#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
92#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
93
94/* MME_QM_GLBL_ERR_ADDR_LO */
95#define MME_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
96#define MME_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
97
98/* MME_QM_GLBL_ERR_ADDR_HI */
99#define MME_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
100#define MME_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
101
102/* MME_QM_GLBL_ERR_WDATA */
103#define MME_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
104#define MME_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
105
106/* MME_QM_GLBL_SECURE_PROPS */
107#define MME_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0
108#define MME_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
109#define MME_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10
110#define MME_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400
111
112/* MME_QM_GLBL_NON_SECURE_PROPS */
113#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
114#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
115#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
116#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
117
118/* MME_QM_GLBL_STS0 */
119#define MME_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
120#define MME_QM_GLBL_STS0_PQF_IDLE_MASK 0x1
121#define MME_QM_GLBL_STS0_CQF_IDLE_SHIFT 1
122#define MME_QM_GLBL_STS0_CQF_IDLE_MASK 0x2
123#define MME_QM_GLBL_STS0_CP_IDLE_SHIFT 2
124#define MME_QM_GLBL_STS0_CP_IDLE_MASK 0x4
125#define MME_QM_GLBL_STS0_DMA_IDLE_SHIFT 3
126#define MME_QM_GLBL_STS0_DMA_IDLE_MASK 0x8
127#define MME_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4
128#define MME_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10
129#define MME_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5
130#define MME_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20
131#define MME_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6
132#define MME_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40
133#define MME_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7
134#define MME_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80
135
136/* MME_QM_GLBL_STS1 */
137#define MME_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
138#define MME_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
139#define MME_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
140#define MME_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
141#define MME_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
142#define MME_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
143#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
144#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
145#define MME_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
146#define MME_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
147#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
148#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
149#define MME_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8
150#define MME_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100
151#define MME_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9
152#define MME_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200
153#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
154#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
155#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
156#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
157
158/* MME_QM_PQ_BASE_LO */
159#define MME_QM_PQ_BASE_LO_VAL_SHIFT 0
160#define MME_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
161
162/* MME_QM_PQ_BASE_HI */
163#define MME_QM_PQ_BASE_HI_VAL_SHIFT 0
164#define MME_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
165
166/* MME_QM_PQ_SIZE */
167#define MME_QM_PQ_SIZE_VAL_SHIFT 0
168#define MME_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
169
170/* MME_QM_PQ_PI */
171#define MME_QM_PQ_PI_VAL_SHIFT 0
172#define MME_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
173
174/* MME_QM_PQ_CI */
175#define MME_QM_PQ_CI_VAL_SHIFT 0
176#define MME_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
177
178/* MME_QM_PQ_CFG0 */
179#define MME_QM_PQ_CFG0_RESERVED_SHIFT 0
180#define MME_QM_PQ_CFG0_RESERVED_MASK 0x1
181
182/* MME_QM_PQ_CFG1 */
183#define MME_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
184#define MME_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
185#define MME_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
186#define MME_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
187
188/* MME_QM_PQ_ARUSER */
189#define MME_QM_PQ_ARUSER_NOSNOOP_SHIFT 0
190#define MME_QM_PQ_ARUSER_NOSNOOP_MASK 0x1
191#define MME_QM_PQ_ARUSER_WORD_SHIFT 1
192#define MME_QM_PQ_ARUSER_WORD_MASK 0x2
193
194/* MME_QM_PQ_PUSH0 */
195#define MME_QM_PQ_PUSH0_PTR_LO_SHIFT 0
196#define MME_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
197
198/* MME_QM_PQ_PUSH1 */
199#define MME_QM_PQ_PUSH1_PTR_HI_SHIFT 0
200#define MME_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
201
202/* MME_QM_PQ_PUSH2 */
203#define MME_QM_PQ_PUSH2_TSIZE_SHIFT 0
204#define MME_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
205
206/* MME_QM_PQ_PUSH3 */
207#define MME_QM_PQ_PUSH3_RPT_SHIFT 0
208#define MME_QM_PQ_PUSH3_RPT_MASK 0xFFFF
209#define MME_QM_PQ_PUSH3_CTL_SHIFT 16
210#define MME_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000
211
212/* MME_QM_PQ_STS0 */
213#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
214#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
215#define MME_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
216#define MME_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
217
218/* MME_QM_PQ_STS1 */
219#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
220#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
221#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
222#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
223#define MME_QM_PQ_STS1_PQ_BUSY_SHIFT 31
224#define MME_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
225
226/* MME_QM_PQ_RD_RATE_LIM_EN */
227#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
228#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
229
230/* MME_QM_PQ_RD_RATE_LIM_RST_TOKEN */
231#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
232#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
233
234/* MME_QM_PQ_RD_RATE_LIM_SAT */
235#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
236#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
237
238/* MME_QM_PQ_RD_RATE_LIM_TOUT */
239#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
240#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
241
242/* MME_QM_CQ_CFG0 */
243#define MME_QM_CQ_CFG0_RESERVED_SHIFT 0
244#define MME_QM_CQ_CFG0_RESERVED_MASK 0x1
245
246/* MME_QM_CQ_CFG1 */
247#define MME_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
248#define MME_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
249#define MME_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
250#define MME_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
251
252/* MME_QM_CQ_ARUSER */
253#define MME_QM_CQ_ARUSER_NOSNOOP_SHIFT 0
254#define MME_QM_CQ_ARUSER_NOSNOOP_MASK 0x1
255#define MME_QM_CQ_ARUSER_WORD_SHIFT 1
256#define MME_QM_CQ_ARUSER_WORD_MASK 0x2
257
258/* MME_QM_CQ_PTR_LO */
259#define MME_QM_CQ_PTR_LO_VAL_SHIFT 0
260#define MME_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
261
262/* MME_QM_CQ_PTR_HI */
263#define MME_QM_CQ_PTR_HI_VAL_SHIFT 0
264#define MME_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
265
266/* MME_QM_CQ_TSIZE */
267#define MME_QM_CQ_TSIZE_VAL_SHIFT 0
268#define MME_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
269
270/* MME_QM_CQ_CTL */
271#define MME_QM_CQ_CTL_RPT_SHIFT 0
272#define MME_QM_CQ_CTL_RPT_MASK 0xFFFF
273#define MME_QM_CQ_CTL_CTL_SHIFT 16
274#define MME_QM_CQ_CTL_CTL_MASK 0xFFFF0000
275
276/* MME_QM_CQ_PTR_LO_STS */
277#define MME_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
278#define MME_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
279
280/* MME_QM_CQ_PTR_HI_STS */
281#define MME_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
282#define MME_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
283
284/* MME_QM_CQ_TSIZE_STS */
285#define MME_QM_CQ_TSIZE_STS_VAL_SHIFT 0
286#define MME_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
287
288/* MME_QM_CQ_CTL_STS */
289#define MME_QM_CQ_CTL_STS_RPT_SHIFT 0
290#define MME_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
291#define MME_QM_CQ_CTL_STS_CTL_SHIFT 16
292#define MME_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
293
294/* MME_QM_CQ_STS0 */
295#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
296#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
297#define MME_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
298#define MME_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
299
300/* MME_QM_CQ_STS1 */
301#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
302#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
303#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
304#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
305#define MME_QM_CQ_STS1_CQ_BUSY_SHIFT 31
306#define MME_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
307
308/* MME_QM_CQ_RD_RATE_LIM_EN */
309#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
310#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
311
312/* MME_QM_CQ_RD_RATE_LIM_RST_TOKEN */
313#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
314#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
315
316/* MME_QM_CQ_RD_RATE_LIM_SAT */
317#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
318#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
319
320/* MME_QM_CQ_RD_RATE_LIM_TOUT */
321#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
322#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
323
324/* MME_QM_CQ_IFIFO_CNT */
325#define MME_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
326#define MME_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
327
328/* MME_QM_CP_MSG_BASE0_ADDR_LO */
329#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
330#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
331
332/* MME_QM_CP_MSG_BASE0_ADDR_HI */
333#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
334#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
335
336/* MME_QM_CP_MSG_BASE1_ADDR_LO */
337#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
338#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
339
340/* MME_QM_CP_MSG_BASE1_ADDR_HI */
341#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
342#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
343
344/* MME_QM_CP_MSG_BASE2_ADDR_LO */
345#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
346#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
347
348/* MME_QM_CP_MSG_BASE2_ADDR_HI */
349#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
350#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
351
352/* MME_QM_CP_MSG_BASE3_ADDR_LO */
353#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
354#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
355
356/* MME_QM_CP_MSG_BASE3_ADDR_HI */
357#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
358#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
359
360/* MME_QM_CP_LDMA_TSIZE_OFFSET */
361#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
362#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
363
364/* MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
365#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
366#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
367
368/* MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET */
369#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
370#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
371
372/* MME_QM_CP_LDMA_DST_BASE_LO_OFFSET */
373#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
374#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
375
376/* MME_QM_CP_LDMA_DST_BASE_HI_OFFSET */
377#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
378#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
379
380/* MME_QM_CP_LDMA_COMMIT_OFFSET */
381#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
382#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
383
384/* MME_QM_CP_FENCE0_RDATA */
385#define MME_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
386#define MME_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
387
388/* MME_QM_CP_FENCE1_RDATA */
389#define MME_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
390#define MME_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
391
392/* MME_QM_CP_FENCE2_RDATA */
393#define MME_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
394#define MME_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
395
396/* MME_QM_CP_FENCE3_RDATA */
397#define MME_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
398#define MME_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
399
400/* MME_QM_CP_FENCE0_CNT */
401#define MME_QM_CP_FENCE0_CNT_VAL_SHIFT 0
402#define MME_QM_CP_FENCE0_CNT_VAL_MASK 0xFF
403
404/* MME_QM_CP_FENCE1_CNT */
405#define MME_QM_CP_FENCE1_CNT_VAL_SHIFT 0
406#define MME_QM_CP_FENCE1_CNT_VAL_MASK 0xFF
407
408/* MME_QM_CP_FENCE2_CNT */
409#define MME_QM_CP_FENCE2_CNT_VAL_SHIFT 0
410#define MME_QM_CP_FENCE2_CNT_VAL_MASK 0xFF
411
412/* MME_QM_CP_FENCE3_CNT */
413#define MME_QM_CP_FENCE3_CNT_VAL_SHIFT 0
414#define MME_QM_CP_FENCE3_CNT_VAL_MASK 0xFF
415
416/* MME_QM_CP_STS */
417#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
418#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
419#define MME_QM_CP_STS_ERDY_SHIFT 16
420#define MME_QM_CP_STS_ERDY_MASK 0x10000
421#define MME_QM_CP_STS_RRDY_SHIFT 17
422#define MME_QM_CP_STS_RRDY_MASK 0x20000
423#define MME_QM_CP_STS_MRDY_SHIFT 18
424#define MME_QM_CP_STS_MRDY_MASK 0x40000
425#define MME_QM_CP_STS_SW_STOP_SHIFT 19
426#define MME_QM_CP_STS_SW_STOP_MASK 0x80000
427#define MME_QM_CP_STS_FENCE_ID_SHIFT 20
428#define MME_QM_CP_STS_FENCE_ID_MASK 0x300000
429#define MME_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
430#define MME_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
431
432/* MME_QM_CP_CURRENT_INST_LO */
433#define MME_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
434#define MME_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
435
436/* MME_QM_CP_CURRENT_INST_HI */
437#define MME_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
438#define MME_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
439
440/* MME_QM_CP_BARRIER_CFG */
441#define MME_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
442#define MME_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
443
444/* MME_QM_CP_DBG_0 */
445#define MME_QM_CP_DBG_0_VAL_SHIFT 0
446#define MME_QM_CP_DBG_0_VAL_MASK 0xFF
447
448/* MME_QM_PQ_BUF_ADDR */
449#define MME_QM_PQ_BUF_ADDR_VAL_SHIFT 0
450#define MME_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
451
452/* MME_QM_PQ_BUF_RDATA */
453#define MME_QM_PQ_BUF_RDATA_VAL_SHIFT 0
454#define MME_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
455
456/* MME_QM_CQ_BUF_ADDR */
457#define MME_QM_CQ_BUF_ADDR_VAL_SHIFT 0
458#define MME_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
459
460/* MME_QM_CQ_BUF_RDATA */
461#define MME_QM_CQ_BUF_RDATA_VAL_SHIFT 0
462#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
463
464#endif /* ASIC_REG_MME_QM_MASKS_H_ */
465
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
new file mode 100644
index 000000000000..b5b1c776f6c3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME_QM_REGS_H_
14#define ASIC_REG_MME_QM_REGS_H_
15
16/*
17 *****************************************
18 * MME_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmMME_QM_GLBL_CFG0 0xD8000
23
24#define mmMME_QM_GLBL_CFG1 0xD8004
25
26#define mmMME_QM_GLBL_PROT 0xD8008
27
28#define mmMME_QM_GLBL_ERR_CFG 0xD800C
29
30#define mmMME_QM_GLBL_ERR_ADDR_LO 0xD8010
31
32#define mmMME_QM_GLBL_ERR_ADDR_HI 0xD8014
33
34#define mmMME_QM_GLBL_ERR_WDATA 0xD8018
35
36#define mmMME_QM_GLBL_SECURE_PROPS 0xD801C
37
38#define mmMME_QM_GLBL_NON_SECURE_PROPS 0xD8020
39
40#define mmMME_QM_GLBL_STS0 0xD8024
41
42#define mmMME_QM_GLBL_STS1 0xD8028
43
44#define mmMME_QM_PQ_BASE_LO 0xD8060
45
46#define mmMME_QM_PQ_BASE_HI 0xD8064
47
48#define mmMME_QM_PQ_SIZE 0xD8068
49
50#define mmMME_QM_PQ_PI 0xD806C
51
52#define mmMME_QM_PQ_CI 0xD8070
53
54#define mmMME_QM_PQ_CFG0 0xD8074
55
56#define mmMME_QM_PQ_CFG1 0xD8078
57
58#define mmMME_QM_PQ_ARUSER 0xD807C
59
60#define mmMME_QM_PQ_PUSH0 0xD8080
61
62#define mmMME_QM_PQ_PUSH1 0xD8084
63
64#define mmMME_QM_PQ_PUSH2 0xD8088
65
66#define mmMME_QM_PQ_PUSH3 0xD808C
67
68#define mmMME_QM_PQ_STS0 0xD8090
69
70#define mmMME_QM_PQ_STS1 0xD8094
71
72#define mmMME_QM_PQ_RD_RATE_LIM_EN 0xD80A0
73
74#define mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xD80A4
75
76#define mmMME_QM_PQ_RD_RATE_LIM_SAT 0xD80A8
77
78#define mmMME_QM_PQ_RD_RATE_LIM_TOUT 0xD80AC
79
80#define mmMME_QM_CQ_CFG0 0xD80B0
81
82#define mmMME_QM_CQ_CFG1 0xD80B4
83
84#define mmMME_QM_CQ_ARUSER 0xD80B8
85
86#define mmMME_QM_CQ_PTR_LO 0xD80C0
87
88#define mmMME_QM_CQ_PTR_HI 0xD80C4
89
90#define mmMME_QM_CQ_TSIZE 0xD80C8
91
92#define mmMME_QM_CQ_CTL 0xD80CC
93
94#define mmMME_QM_CQ_PTR_LO_STS 0xD80D4
95
96#define mmMME_QM_CQ_PTR_HI_STS 0xD80D8
97
98#define mmMME_QM_CQ_TSIZE_STS 0xD80DC
99
100#define mmMME_QM_CQ_CTL_STS 0xD80E0
101
102#define mmMME_QM_CQ_STS0 0xD80E4
103
104#define mmMME_QM_CQ_STS1 0xD80E8
105
106#define mmMME_QM_CQ_RD_RATE_LIM_EN 0xD80F0
107
108#define mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xD80F4
109
110#define mmMME_QM_CQ_RD_RATE_LIM_SAT 0xD80F8
111
112#define mmMME_QM_CQ_RD_RATE_LIM_TOUT 0xD80FC
113
114#define mmMME_QM_CQ_IFIFO_CNT 0xD8108
115
116#define mmMME_QM_CP_MSG_BASE0_ADDR_LO 0xD8120
117
118#define mmMME_QM_CP_MSG_BASE0_ADDR_HI 0xD8124
119
120#define mmMME_QM_CP_MSG_BASE1_ADDR_LO 0xD8128
121
122#define mmMME_QM_CP_MSG_BASE1_ADDR_HI 0xD812C
123
124#define mmMME_QM_CP_MSG_BASE2_ADDR_LO 0xD8130
125
126#define mmMME_QM_CP_MSG_BASE2_ADDR_HI 0xD8134
127
128#define mmMME_QM_CP_MSG_BASE3_ADDR_LO 0xD8138
129
130#define mmMME_QM_CP_MSG_BASE3_ADDR_HI 0xD813C
131
132#define mmMME_QM_CP_LDMA_TSIZE_OFFSET 0xD8140
133
134#define mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xD8144
135
136#define mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xD8148
137
138#define mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xD814C
139
140#define mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xD8150
141
142#define mmMME_QM_CP_LDMA_COMMIT_OFFSET 0xD8154
143
144#define mmMME_QM_CP_FENCE0_RDATA 0xD8158
145
146#define mmMME_QM_CP_FENCE1_RDATA 0xD815C
147
148#define mmMME_QM_CP_FENCE2_RDATA 0xD8160
149
150#define mmMME_QM_CP_FENCE3_RDATA 0xD8164
151
152#define mmMME_QM_CP_FENCE0_CNT 0xD8168
153
154#define mmMME_QM_CP_FENCE1_CNT 0xD816C
155
156#define mmMME_QM_CP_FENCE2_CNT 0xD8170
157
158#define mmMME_QM_CP_FENCE3_CNT 0xD8174
159
160#define mmMME_QM_CP_STS 0xD8178
161
162#define mmMME_QM_CP_CURRENT_INST_LO 0xD817C
163
164#define mmMME_QM_CP_CURRENT_INST_HI 0xD8180
165
166#define mmMME_QM_CP_BARRIER_CFG 0xD8184
167
168#define mmMME_QM_CP_DBG_0 0xD8188
169
170#define mmMME_QM_PQ_BUF_ADDR 0xD8300
171
172#define mmMME_QM_PQ_BUF_RDATA 0xD8304
173
174#define mmMME_QM_CQ_BUF_ADDR 0xD8308
175
176#define mmMME_QM_CQ_BUF_RDATA 0xD830C
177
178#endif /* ASIC_REG_MME_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
new file mode 100644
index 000000000000..9436b1e2705a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
@@ -0,0 +1,1153 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MME_REGS_H_
14#define ASIC_REG_MME_REGS_H_
15
16/*
17 *****************************************
18 * MME (Prototype: MME)
19 *****************************************
20 */
21
22#define mmMME_ARCH_STATUS 0xD0000
23
24#define mmMME_ARCH_A_BASE_ADDR_HIGH 0xD0008
25
26#define mmMME_ARCH_B_BASE_ADDR_HIGH 0xD000C
27
28#define mmMME_ARCH_CIN_BASE_ADDR_HIGH 0xD0010
29
30#define mmMME_ARCH_COUT_BASE_ADDR_HIGH 0xD0014
31
32#define mmMME_ARCH_BIAS_BASE_ADDR_HIGH 0xD0018
33
34#define mmMME_ARCH_A_BASE_ADDR_LOW 0xD001C
35
36#define mmMME_ARCH_B_BASE_ADDR_LOW 0xD0020
37
38#define mmMME_ARCH_CIN_BASE_ADDR_LOW 0xD0024
39
40#define mmMME_ARCH_COUT_BASE_ADDR_LOW 0xD0028
41
42#define mmMME_ARCH_BIAS_BASE_ADDR_LOW 0xD002C
43
44#define mmMME_ARCH_HEADER 0xD0030
45
46#define mmMME_ARCH_KERNEL_SIZE_MINUS_1 0xD0034
47
48#define mmMME_ARCH_ASSOCIATED_DIMS_0 0xD0038
49
50#define mmMME_ARCH_ASSOCIATED_DIMS_1 0xD003C
51
52#define mmMME_ARCH_COUT_SCALE 0xD0040
53
54#define mmMME_ARCH_CIN_SCALE 0xD0044
55
56#define mmMME_ARCH_GEMMLOWP_ZP 0xD0048
57
58#define mmMME_ARCH_GEMMLOWP_EXPONENT 0xD004C
59
60#define mmMME_ARCH_A_ROI_BASE_OFFSET_0 0xD0050
61
62#define mmMME_ARCH_A_ROI_BASE_OFFSET_1 0xD0054
63
64#define mmMME_ARCH_A_ROI_BASE_OFFSET_2 0xD0058
65
66#define mmMME_ARCH_A_ROI_BASE_OFFSET_3 0xD005C
67
68#define mmMME_ARCH_A_ROI_BASE_OFFSET_4 0xD0060
69
70#define mmMME_ARCH_A_VALID_ELEMENTS_0 0xD0064
71
72#define mmMME_ARCH_A_VALID_ELEMENTS_1 0xD0068
73
74#define mmMME_ARCH_A_VALID_ELEMENTS_2 0xD006C
75
76#define mmMME_ARCH_A_VALID_ELEMENTS_3 0xD0070
77
78#define mmMME_ARCH_A_VALID_ELEMENTS_4 0xD0074
79
80#define mmMME_ARCH_A_LOOP_STRIDE_0 0xD0078
81
82#define mmMME_ARCH_A_LOOP_STRIDE_1 0xD007C
83
84#define mmMME_ARCH_A_LOOP_STRIDE_2 0xD0080
85
86#define mmMME_ARCH_A_LOOP_STRIDE_3 0xD0084
87
88#define mmMME_ARCH_A_LOOP_STRIDE_4 0xD0088
89
90#define mmMME_ARCH_A_ROI_SIZE_0 0xD008C
91
92#define mmMME_ARCH_A_ROI_SIZE_1 0xD0090
93
94#define mmMME_ARCH_A_ROI_SIZE_2 0xD0094
95
96#define mmMME_ARCH_A_ROI_SIZE_3 0xD0098
97
98#define mmMME_ARCH_A_SPATIAL_START_OFFSET_0 0xD009C
99
100#define mmMME_ARCH_A_SPATIAL_START_OFFSET_1 0xD00A0
101
102#define mmMME_ARCH_A_SPATIAL_START_OFFSET_2 0xD00A4
103
104#define mmMME_ARCH_A_SPATIAL_START_OFFSET_3 0xD00A8
105
106#define mmMME_ARCH_A_SPATIAL_STRIDE_0 0xD00AC
107
108#define mmMME_ARCH_A_SPATIAL_STRIDE_1 0xD00B0
109
110#define mmMME_ARCH_A_SPATIAL_STRIDE_2 0xD00B4
111
112#define mmMME_ARCH_A_SPATIAL_STRIDE_3 0xD00B8
113
114#define mmMME_ARCH_A_SPATIAL_SIZE_MINUS_1 0xD00BC
115
116#define mmMME_ARCH_B_ROI_BASE_OFFSET_0 0xD00C0
117
118#define mmMME_ARCH_B_ROI_BASE_OFFSET_1 0xD00C4
119
120#define mmMME_ARCH_B_ROI_BASE_OFFSET_2 0xD00C8
121
122#define mmMME_ARCH_B_ROI_BASE_OFFSET_3 0xD00CC
123
124#define mmMME_ARCH_B_ROI_BASE_OFFSET_4 0xD00D0
125
126#define mmMME_ARCH_B_VALID_ELEMENTS_0 0xD00D4
127
128#define mmMME_ARCH_B_VALID_ELEMENTS_1 0xD00D8
129
130#define mmMME_ARCH_B_VALID_ELEMENTS_2 0xD00DC
131
132#define mmMME_ARCH_B_VALID_ELEMENTS_3 0xD00E0
133
134#define mmMME_ARCH_B_VALID_ELEMENTS_4 0xD00E4
135
136#define mmMME_ARCH_B_LOOP_STRIDE_0 0xD00E8
137
138#define mmMME_ARCH_B_LOOP_STRIDE_1 0xD00EC
139
140#define mmMME_ARCH_B_LOOP_STRIDE_2 0xD00F0
141
142#define mmMME_ARCH_B_LOOP_STRIDE_3 0xD00F4
143
144#define mmMME_ARCH_B_LOOP_STRIDE_4 0xD00F8
145
146#define mmMME_ARCH_B_ROI_SIZE_0 0xD00FC
147
148#define mmMME_ARCH_B_ROI_SIZE_1 0xD0100
149
150#define mmMME_ARCH_B_ROI_SIZE_2 0xD0104
151
152#define mmMME_ARCH_B_ROI_SIZE_3 0xD0108
153
154#define mmMME_ARCH_B_SPATIAL_START_OFFSET_0 0xD010C
155
156#define mmMME_ARCH_B_SPATIAL_START_OFFSET_1 0xD0110
157
158#define mmMME_ARCH_B_SPATIAL_START_OFFSET_2 0xD0114
159
160#define mmMME_ARCH_B_SPATIAL_START_OFFSET_3 0xD0118
161
162#define mmMME_ARCH_B_SPATIAL_STRIDE_0 0xD011C
163
164#define mmMME_ARCH_B_SPATIAL_STRIDE_1 0xD0120
165
166#define mmMME_ARCH_B_SPATIAL_STRIDE_2 0xD0124
167
168#define mmMME_ARCH_B_SPATIAL_STRIDE_3 0xD0128
169
170#define mmMME_ARCH_B_SPATIAL_SIZE_MINUS_1 0xD012C
171
172#define mmMME_ARCH_C_ROI_BASE_OFFSET_0 0xD0130
173
174#define mmMME_ARCH_C_ROI_BASE_OFFSET_1 0xD0134
175
176#define mmMME_ARCH_C_ROI_BASE_OFFSET_2 0xD0138
177
178#define mmMME_ARCH_C_ROI_BASE_OFFSET_3 0xD013C
179
180#define mmMME_ARCH_C_ROI_BASE_OFFSET_4 0xD0140
181
182#define mmMME_ARCH_C_VALID_ELEMENTS_0 0xD0144
183
184#define mmMME_ARCH_C_VALID_ELEMENTS_1 0xD0148
185
186#define mmMME_ARCH_C_VALID_ELEMENTS_2 0xD014C
187
188#define mmMME_ARCH_C_VALID_ELEMENTS_3 0xD0150
189
190#define mmMME_ARCH_C_VALID_ELEMENTS_4 0xD0154
191
192#define mmMME_ARCH_C_LOOP_STRIDE_0 0xD0158
193
194#define mmMME_ARCH_C_LOOP_STRIDE_1 0xD015C
195
196#define mmMME_ARCH_C_LOOP_STRIDE_2 0xD0160
197
198#define mmMME_ARCH_C_LOOP_STRIDE_3 0xD0164
199
200#define mmMME_ARCH_C_LOOP_STRIDE_4 0xD0168
201
202#define mmMME_ARCH_C_ROI_SIZE_0 0xD016C
203
204#define mmMME_ARCH_C_ROI_SIZE_1 0xD0170
205
206#define mmMME_ARCH_C_ROI_SIZE_2 0xD0174
207
208#define mmMME_ARCH_C_ROI_SIZE_3 0xD0178
209
210#define mmMME_ARCH_C_SPATIAL_START_OFFSET_0 0xD017C
211
212#define mmMME_ARCH_C_SPATIAL_START_OFFSET_1 0xD0180
213
214#define mmMME_ARCH_C_SPATIAL_START_OFFSET_2 0xD0184
215
216#define mmMME_ARCH_C_SPATIAL_START_OFFSET_3 0xD0188
217
218#define mmMME_ARCH_C_SPATIAL_STRIDE_0 0xD018C
219
220#define mmMME_ARCH_C_SPATIAL_STRIDE_1 0xD0190
221
222#define mmMME_ARCH_C_SPATIAL_STRIDE_2 0xD0194
223
224#define mmMME_ARCH_C_SPATIAL_STRIDE_3 0xD0198
225
226#define mmMME_ARCH_C_SPATIAL_SIZE_MINUS_1 0xD019C
227
228#define mmMME_ARCH_SYNC_OBJECT_MESSAGE 0xD01A0
229
230#define mmMME_ARCH_E_PADDING_VALUE_A 0xD01A4
231
232#define mmMME_ARCH_E_NUM_ITERATION_MINUS_1 0xD01A8
233
234#define mmMME_ARCH_E_BUBBLES_PER_SPLIT 0xD01AC
235
236#define mmMME_CMD 0xD0200
237
238#define mmMME_DUMMY 0xD0204
239
240#define mmMME_RESET 0xD0208
241
242#define mmMME_STALL 0xD020C
243
244#define mmMME_SM_BASE_ADDRESS_LOW 0xD0210
245
246#define mmMME_SM_BASE_ADDRESS_HIGH 0xD0214
247
248#define mmMME_DBGMEM_ADD 0xD0218
249
250#define mmMME_DBGMEM_DATA_WR 0xD021C
251
252#define mmMME_DBGMEM_DATA_RD 0xD0220
253
254#define mmMME_DBGMEM_CTRL 0xD0224
255
256#define mmMME_DBGMEM_RC 0xD0228
257
258#define mmMME_LOG_SHADOW 0xD022C
259
260#define mmMME_STORE_MAX_CREDIT 0xD0300
261
262#define mmMME_AGU 0xD0304
263
264#define mmMME_SBA 0xD0308
265
266#define mmMME_SBB 0xD030C
267
268#define mmMME_SBC 0xD0310
269
270#define mmMME_WBC 0xD0314
271
272#define mmMME_SBA_CONTROL_DATA 0xD0318
273
274#define mmMME_SBB_CONTROL_DATA 0xD031C
275
276#define mmMME_SBC_CONTROL_DATA 0xD0320
277
278#define mmMME_WBC_CONTROL_DATA 0xD0324
279
280#define mmMME_TE 0xD0328
281
282#define mmMME_TE2DEC 0xD032C
283
284#define mmMME_REI_STATUS 0xD0330
285
286#define mmMME_REI_MASK 0xD0334
287
288#define mmMME_SEI_STATUS 0xD0338
289
290#define mmMME_SEI_MASK 0xD033C
291
292#define mmMME_SPI_STATUS 0xD0340
293
294#define mmMME_SPI_MASK 0xD0344
295
296#define mmMME_SHADOW_0_STATUS 0xD0400
297
298#define mmMME_SHADOW_0_A_BASE_ADDR_HIGH 0xD0408
299
300#define mmMME_SHADOW_0_B_BASE_ADDR_HIGH 0xD040C
301
302#define mmMME_SHADOW_0_CIN_BASE_ADDR_HIGH 0xD0410
303
304#define mmMME_SHADOW_0_COUT_BASE_ADDR_HIGH 0xD0414
305
306#define mmMME_SHADOW_0_BIAS_BASE_ADDR_HIGH 0xD0418
307
308#define mmMME_SHADOW_0_A_BASE_ADDR_LOW 0xD041C
309
310#define mmMME_SHADOW_0_B_BASE_ADDR_LOW 0xD0420
311
312#define mmMME_SHADOW_0_CIN_BASE_ADDR_LOW 0xD0424
313
314#define mmMME_SHADOW_0_COUT_BASE_ADDR_LOW 0xD0428
315
316#define mmMME_SHADOW_0_BIAS_BASE_ADDR_LOW 0xD042C
317
318#define mmMME_SHADOW_0_HEADER 0xD0430
319
320#define mmMME_SHADOW_0_KERNEL_SIZE_MINUS_1 0xD0434
321
322#define mmMME_SHADOW_0_ASSOCIATED_DIMS_0 0xD0438
323
324#define mmMME_SHADOW_0_ASSOCIATED_DIMS_1 0xD043C
325
326#define mmMME_SHADOW_0_COUT_SCALE 0xD0440
327
328#define mmMME_SHADOW_0_CIN_SCALE 0xD0444
329
330#define mmMME_SHADOW_0_GEMMLOWP_ZP 0xD0448
331
332#define mmMME_SHADOW_0_GEMMLOWP_EXPONENT 0xD044C
333
334#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_0 0xD0450
335
336#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_1 0xD0454
337
338#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_2 0xD0458
339
340#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_3 0xD045C
341
342#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_4 0xD0460
343
344#define mmMME_SHADOW_0_A_VALID_ELEMENTS_0 0xD0464
345
346#define mmMME_SHADOW_0_A_VALID_ELEMENTS_1 0xD0468
347
348#define mmMME_SHADOW_0_A_VALID_ELEMENTS_2 0xD046C
349
350#define mmMME_SHADOW_0_A_VALID_ELEMENTS_3 0xD0470
351
352#define mmMME_SHADOW_0_A_VALID_ELEMENTS_4 0xD0474
353
354#define mmMME_SHADOW_0_A_LOOP_STRIDE_0 0xD0478
355
356#define mmMME_SHADOW_0_A_LOOP_STRIDE_1 0xD047C
357
358#define mmMME_SHADOW_0_A_LOOP_STRIDE_2 0xD0480
359
360#define mmMME_SHADOW_0_A_LOOP_STRIDE_3 0xD0484
361
362#define mmMME_SHADOW_0_A_LOOP_STRIDE_4 0xD0488
363
364#define mmMME_SHADOW_0_A_ROI_SIZE_0 0xD048C
365
366#define mmMME_SHADOW_0_A_ROI_SIZE_1 0xD0490
367
368#define mmMME_SHADOW_0_A_ROI_SIZE_2 0xD0494
369
370#define mmMME_SHADOW_0_A_ROI_SIZE_3 0xD0498
371
372#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_0 0xD049C
373
374#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_1 0xD04A0
375
376#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_2 0xD04A4
377
378#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_3 0xD04A8
379
380#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_0 0xD04AC
381
382#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_1 0xD04B0
383
384#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_2 0xD04B4
385
386#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_3 0xD04B8
387
388#define mmMME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 0xD04BC
389
390#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_0 0xD04C0
391
392#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_1 0xD04C4
393
394#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_2 0xD04C8
395
396#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_3 0xD04CC
397
398#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_4 0xD04D0
399
400#define mmMME_SHADOW_0_B_VALID_ELEMENTS_0 0xD04D4
401
402#define mmMME_SHADOW_0_B_VALID_ELEMENTS_1 0xD04D8
403
404#define mmMME_SHADOW_0_B_VALID_ELEMENTS_2 0xD04DC
405
406#define mmMME_SHADOW_0_B_VALID_ELEMENTS_3 0xD04E0
407
408#define mmMME_SHADOW_0_B_VALID_ELEMENTS_4 0xD04E4
409
410#define mmMME_SHADOW_0_B_LOOP_STRIDE_0 0xD04E8
411
412#define mmMME_SHADOW_0_B_LOOP_STRIDE_1 0xD04EC
413
414#define mmMME_SHADOW_0_B_LOOP_STRIDE_2 0xD04F0
415
416#define mmMME_SHADOW_0_B_LOOP_STRIDE_3 0xD04F4
417
418#define mmMME_SHADOW_0_B_LOOP_STRIDE_4 0xD04F8
419
420#define mmMME_SHADOW_0_B_ROI_SIZE_0 0xD04FC
421
422#define mmMME_SHADOW_0_B_ROI_SIZE_1 0xD0500
423
424#define mmMME_SHADOW_0_B_ROI_SIZE_2 0xD0504
425
426#define mmMME_SHADOW_0_B_ROI_SIZE_3 0xD0508
427
428#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_0 0xD050C
429
430#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_1 0xD0510
431
432#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_2 0xD0514
433
434#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_3 0xD0518
435
436#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_0 0xD051C
437
438#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_1 0xD0520
439
440#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_2 0xD0524
441
442#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_3 0xD0528
443
444#define mmMME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 0xD052C
445
446#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_0 0xD0530
447
448#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_1 0xD0534
449
450#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_2 0xD0538
451
452#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_3 0xD053C
453
454#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_4 0xD0540
455
456#define mmMME_SHADOW_0_C_VALID_ELEMENTS_0 0xD0544
457
458#define mmMME_SHADOW_0_C_VALID_ELEMENTS_1 0xD0548
459
460#define mmMME_SHADOW_0_C_VALID_ELEMENTS_2 0xD054C
461
462#define mmMME_SHADOW_0_C_VALID_ELEMENTS_3 0xD0550
463
464#define mmMME_SHADOW_0_C_VALID_ELEMENTS_4 0xD0554
465
466#define mmMME_SHADOW_0_C_LOOP_STRIDE_0 0xD0558
467
468#define mmMME_SHADOW_0_C_LOOP_STRIDE_1 0xD055C
469
470#define mmMME_SHADOW_0_C_LOOP_STRIDE_2 0xD0560
471
472#define mmMME_SHADOW_0_C_LOOP_STRIDE_3 0xD0564
473
474#define mmMME_SHADOW_0_C_LOOP_STRIDE_4 0xD0568
475
476#define mmMME_SHADOW_0_C_ROI_SIZE_0 0xD056C
477
478#define mmMME_SHADOW_0_C_ROI_SIZE_1 0xD0570
479
480#define mmMME_SHADOW_0_C_ROI_SIZE_2 0xD0574
481
482#define mmMME_SHADOW_0_C_ROI_SIZE_3 0xD0578
483
484#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_0 0xD057C
485
486#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_1 0xD0580
487
488#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_2 0xD0584
489
490#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_3 0xD0588
491
492#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_0 0xD058C
493
494#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_1 0xD0590
495
496#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_2 0xD0594
497
498#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_3 0xD0598
499
500#define mmMME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 0xD059C
501
502#define mmMME_SHADOW_0_SYNC_OBJECT_MESSAGE 0xD05A0
503
504#define mmMME_SHADOW_0_E_PADDING_VALUE_A 0xD05A4
505
506#define mmMME_SHADOW_0_E_NUM_ITERATION_MINUS_1 0xD05A8
507
508#define mmMME_SHADOW_0_E_BUBBLES_PER_SPLIT 0xD05AC
509
510#define mmMME_SHADOW_1_STATUS 0xD0600
511
512#define mmMME_SHADOW_1_A_BASE_ADDR_HIGH 0xD0608
513
514#define mmMME_SHADOW_1_B_BASE_ADDR_HIGH 0xD060C
515
516#define mmMME_SHADOW_1_CIN_BASE_ADDR_HIGH 0xD0610
517
518#define mmMME_SHADOW_1_COUT_BASE_ADDR_HIGH 0xD0614
519
520#define mmMME_SHADOW_1_BIAS_BASE_ADDR_HIGH 0xD0618
521
522#define mmMME_SHADOW_1_A_BASE_ADDR_LOW 0xD061C
523
524#define mmMME_SHADOW_1_B_BASE_ADDR_LOW 0xD0620
525
526#define mmMME_SHADOW_1_CIN_BASE_ADDR_LOW 0xD0624
527
528#define mmMME_SHADOW_1_COUT_BASE_ADDR_LOW 0xD0628
529
530#define mmMME_SHADOW_1_BIAS_BASE_ADDR_LOW 0xD062C
531
532#define mmMME_SHADOW_1_HEADER 0xD0630
533
534#define mmMME_SHADOW_1_KERNEL_SIZE_MINUS_1 0xD0634
535
536#define mmMME_SHADOW_1_ASSOCIATED_DIMS_0 0xD0638
537
538#define mmMME_SHADOW_1_ASSOCIATED_DIMS_1 0xD063C
539
540#define mmMME_SHADOW_1_COUT_SCALE 0xD0640
541
542#define mmMME_SHADOW_1_CIN_SCALE 0xD0644
543
544#define mmMME_SHADOW_1_GEMMLOWP_ZP 0xD0648
545
546#define mmMME_SHADOW_1_GEMMLOWP_EXPONENT 0xD064C
547
548#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_0 0xD0650
549
550#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_1 0xD0654
551
552#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_2 0xD0658
553
554#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_3 0xD065C
555
556#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_4 0xD0660
557
558#define mmMME_SHADOW_1_A_VALID_ELEMENTS_0 0xD0664
559
560#define mmMME_SHADOW_1_A_VALID_ELEMENTS_1 0xD0668
561
562#define mmMME_SHADOW_1_A_VALID_ELEMENTS_2 0xD066C
563
564#define mmMME_SHADOW_1_A_VALID_ELEMENTS_3 0xD0670
565
566#define mmMME_SHADOW_1_A_VALID_ELEMENTS_4 0xD0674
567
568#define mmMME_SHADOW_1_A_LOOP_STRIDE_0 0xD0678
569
570#define mmMME_SHADOW_1_A_LOOP_STRIDE_1 0xD067C
571
572#define mmMME_SHADOW_1_A_LOOP_STRIDE_2 0xD0680
573
574#define mmMME_SHADOW_1_A_LOOP_STRIDE_3 0xD0684
575
576#define mmMME_SHADOW_1_A_LOOP_STRIDE_4 0xD0688
577
578#define mmMME_SHADOW_1_A_ROI_SIZE_0 0xD068C
579
580#define mmMME_SHADOW_1_A_ROI_SIZE_1 0xD0690
581
582#define mmMME_SHADOW_1_A_ROI_SIZE_2 0xD0694
583
584#define mmMME_SHADOW_1_A_ROI_SIZE_3 0xD0698
585
586#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_0 0xD069C
587
588#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_1 0xD06A0
589
590#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_2 0xD06A4
591
592#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_3 0xD06A8
593
594#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_0 0xD06AC
595
596#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_1 0xD06B0
597
598#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_2 0xD06B4
599
600#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_3 0xD06B8
601
602#define mmMME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 0xD06BC
603
604#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_0 0xD06C0
605
606#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_1 0xD06C4
607
608#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_2 0xD06C8
609
610#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_3 0xD06CC
611
612#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_4 0xD06D0
613
614#define mmMME_SHADOW_1_B_VALID_ELEMENTS_0 0xD06D4
615
616#define mmMME_SHADOW_1_B_VALID_ELEMENTS_1 0xD06D8
617
618#define mmMME_SHADOW_1_B_VALID_ELEMENTS_2 0xD06DC
619
620#define mmMME_SHADOW_1_B_VALID_ELEMENTS_3 0xD06E0
621
622#define mmMME_SHADOW_1_B_VALID_ELEMENTS_4 0xD06E4
623
624#define mmMME_SHADOW_1_B_LOOP_STRIDE_0 0xD06E8
625
626#define mmMME_SHADOW_1_B_LOOP_STRIDE_1 0xD06EC
627
628#define mmMME_SHADOW_1_B_LOOP_STRIDE_2 0xD06F0
629
630#define mmMME_SHADOW_1_B_LOOP_STRIDE_3 0xD06F4
631
632#define mmMME_SHADOW_1_B_LOOP_STRIDE_4 0xD06F8
633
634#define mmMME_SHADOW_1_B_ROI_SIZE_0 0xD06FC
635
636#define mmMME_SHADOW_1_B_ROI_SIZE_1 0xD0700
637
638#define mmMME_SHADOW_1_B_ROI_SIZE_2 0xD0704
639
640#define mmMME_SHADOW_1_B_ROI_SIZE_3 0xD0708
641
642#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_0 0xD070C
643
644#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_1 0xD0710
645
646#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_2 0xD0714
647
648#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_3 0xD0718
649
650#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_0 0xD071C
651
652#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_1 0xD0720
653
654#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_2 0xD0724
655
656#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_3 0xD0728
657
658#define mmMME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 0xD072C
659
660#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_0 0xD0730
661
662#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_1 0xD0734
663
664#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_2 0xD0738
665
666#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_3 0xD073C
667
668#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_4 0xD0740
669
670#define mmMME_SHADOW_1_C_VALID_ELEMENTS_0 0xD0744
671
672#define mmMME_SHADOW_1_C_VALID_ELEMENTS_1 0xD0748
673
674#define mmMME_SHADOW_1_C_VALID_ELEMENTS_2 0xD074C
675
676#define mmMME_SHADOW_1_C_VALID_ELEMENTS_3 0xD0750
677
678#define mmMME_SHADOW_1_C_VALID_ELEMENTS_4 0xD0754
679
680#define mmMME_SHADOW_1_C_LOOP_STRIDE_0 0xD0758
681
682#define mmMME_SHADOW_1_C_LOOP_STRIDE_1 0xD075C
683
684#define mmMME_SHADOW_1_C_LOOP_STRIDE_2 0xD0760
685
686#define mmMME_SHADOW_1_C_LOOP_STRIDE_3 0xD0764
687
688#define mmMME_SHADOW_1_C_LOOP_STRIDE_4 0xD0768
689
690#define mmMME_SHADOW_1_C_ROI_SIZE_0 0xD076C
691
692#define mmMME_SHADOW_1_C_ROI_SIZE_1 0xD0770
693
694#define mmMME_SHADOW_1_C_ROI_SIZE_2 0xD0774
695
696#define mmMME_SHADOW_1_C_ROI_SIZE_3 0xD0778
697
698#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_0 0xD077C
699
700#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_1 0xD0780
701
702#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_2 0xD0784
703
704#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_3 0xD0788
705
706#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_0 0xD078C
707
708#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_1 0xD0790
709
710#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_2 0xD0794
711
712#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_3 0xD0798
713
714#define mmMME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 0xD079C
715
716#define mmMME_SHADOW_1_SYNC_OBJECT_MESSAGE 0xD07A0
717
718#define mmMME_SHADOW_1_E_PADDING_VALUE_A 0xD07A4
719
720#define mmMME_SHADOW_1_E_NUM_ITERATION_MINUS_1 0xD07A8
721
722#define mmMME_SHADOW_1_E_BUBBLES_PER_SPLIT 0xD07AC
723
724#define mmMME_SHADOW_2_STATUS 0xD0800
725
726#define mmMME_SHADOW_2_A_BASE_ADDR_HIGH 0xD0808
727
728#define mmMME_SHADOW_2_B_BASE_ADDR_HIGH 0xD080C
729
730#define mmMME_SHADOW_2_CIN_BASE_ADDR_HIGH 0xD0810
731
732#define mmMME_SHADOW_2_COUT_BASE_ADDR_HIGH 0xD0814
733
734#define mmMME_SHADOW_2_BIAS_BASE_ADDR_HIGH 0xD0818
735
736#define mmMME_SHADOW_2_A_BASE_ADDR_LOW 0xD081C
737
738#define mmMME_SHADOW_2_B_BASE_ADDR_LOW 0xD0820
739
740#define mmMME_SHADOW_2_CIN_BASE_ADDR_LOW 0xD0824
741
742#define mmMME_SHADOW_2_COUT_BASE_ADDR_LOW 0xD0828
743
744#define mmMME_SHADOW_2_BIAS_BASE_ADDR_LOW 0xD082C
745
746#define mmMME_SHADOW_2_HEADER 0xD0830
747
748#define mmMME_SHADOW_2_KERNEL_SIZE_MINUS_1 0xD0834
749
750#define mmMME_SHADOW_2_ASSOCIATED_DIMS_0 0xD0838
751
752#define mmMME_SHADOW_2_ASSOCIATED_DIMS_1 0xD083C
753
754#define mmMME_SHADOW_2_COUT_SCALE 0xD0840
755
756#define mmMME_SHADOW_2_CIN_SCALE 0xD0844
757
758#define mmMME_SHADOW_2_GEMMLOWP_ZP 0xD0848
759
760#define mmMME_SHADOW_2_GEMMLOWP_EXPONENT 0xD084C
761
762#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_0 0xD0850
763
764#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_1 0xD0854
765
766#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_2 0xD0858
767
768#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_3 0xD085C
769
770#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_4 0xD0860
771
772#define mmMME_SHADOW_2_A_VALID_ELEMENTS_0 0xD0864
773
774#define mmMME_SHADOW_2_A_VALID_ELEMENTS_1 0xD0868
775
776#define mmMME_SHADOW_2_A_VALID_ELEMENTS_2 0xD086C
777
778#define mmMME_SHADOW_2_A_VALID_ELEMENTS_3 0xD0870
779
780#define mmMME_SHADOW_2_A_VALID_ELEMENTS_4 0xD0874
781
782#define mmMME_SHADOW_2_A_LOOP_STRIDE_0 0xD0878
783
784#define mmMME_SHADOW_2_A_LOOP_STRIDE_1 0xD087C
785
786#define mmMME_SHADOW_2_A_LOOP_STRIDE_2 0xD0880
787
788#define mmMME_SHADOW_2_A_LOOP_STRIDE_3 0xD0884
789
790#define mmMME_SHADOW_2_A_LOOP_STRIDE_4 0xD0888
791
792#define mmMME_SHADOW_2_A_ROI_SIZE_0 0xD088C
793
794#define mmMME_SHADOW_2_A_ROI_SIZE_1 0xD0890
795
796#define mmMME_SHADOW_2_A_ROI_SIZE_2 0xD0894
797
798#define mmMME_SHADOW_2_A_ROI_SIZE_3 0xD0898
799
800#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_0 0xD089C
801
802#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_1 0xD08A0
803
804#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_2 0xD08A4
805
806#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_3 0xD08A8
807
808#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_0 0xD08AC
809
810#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_1 0xD08B0
811
812#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_2 0xD08B4
813
814#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_3 0xD08B8
815
816#define mmMME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 0xD08BC
817
818#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_0 0xD08C0
819
820#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_1 0xD08C4
821
822#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_2 0xD08C8
823
824#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_3 0xD08CC
825
826#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_4 0xD08D0
827
828#define mmMME_SHADOW_2_B_VALID_ELEMENTS_0 0xD08D4
829
830#define mmMME_SHADOW_2_B_VALID_ELEMENTS_1 0xD08D8
831
832#define mmMME_SHADOW_2_B_VALID_ELEMENTS_2 0xD08DC
833
834#define mmMME_SHADOW_2_B_VALID_ELEMENTS_3 0xD08E0
835
836#define mmMME_SHADOW_2_B_VALID_ELEMENTS_4 0xD08E4
837
838#define mmMME_SHADOW_2_B_LOOP_STRIDE_0 0xD08E8
839
840#define mmMME_SHADOW_2_B_LOOP_STRIDE_1 0xD08EC
841
842#define mmMME_SHADOW_2_B_LOOP_STRIDE_2 0xD08F0
843
844#define mmMME_SHADOW_2_B_LOOP_STRIDE_3 0xD08F4
845
846#define mmMME_SHADOW_2_B_LOOP_STRIDE_4 0xD08F8
847
848#define mmMME_SHADOW_2_B_ROI_SIZE_0 0xD08FC
849
850#define mmMME_SHADOW_2_B_ROI_SIZE_1 0xD0900
851
852#define mmMME_SHADOW_2_B_ROI_SIZE_2 0xD0904
853
854#define mmMME_SHADOW_2_B_ROI_SIZE_3 0xD0908
855
856#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_0 0xD090C
857
858#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_1 0xD0910
859
860#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_2 0xD0914
861
862#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_3 0xD0918
863
864#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_0 0xD091C
865
866#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_1 0xD0920
867
868#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_2 0xD0924
869
870#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_3 0xD0928
871
872#define mmMME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 0xD092C
873
874#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_0 0xD0930
875
876#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_1 0xD0934
877
878#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_2 0xD0938
879
880#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_3 0xD093C
881
882#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_4 0xD0940
883
884#define mmMME_SHADOW_2_C_VALID_ELEMENTS_0 0xD0944
885
886#define mmMME_SHADOW_2_C_VALID_ELEMENTS_1 0xD0948
887
888#define mmMME_SHADOW_2_C_VALID_ELEMENTS_2 0xD094C
889
890#define mmMME_SHADOW_2_C_VALID_ELEMENTS_3 0xD0950
891
892#define mmMME_SHADOW_2_C_VALID_ELEMENTS_4 0xD0954
893
894#define mmMME_SHADOW_2_C_LOOP_STRIDE_0 0xD0958
895
896#define mmMME_SHADOW_2_C_LOOP_STRIDE_1 0xD095C
897
898#define mmMME_SHADOW_2_C_LOOP_STRIDE_2 0xD0960
899
900#define mmMME_SHADOW_2_C_LOOP_STRIDE_3 0xD0964
901
902#define mmMME_SHADOW_2_C_LOOP_STRIDE_4 0xD0968
903
904#define mmMME_SHADOW_2_C_ROI_SIZE_0 0xD096C
905
906#define mmMME_SHADOW_2_C_ROI_SIZE_1 0xD0970
907
908#define mmMME_SHADOW_2_C_ROI_SIZE_2 0xD0974
909
910#define mmMME_SHADOW_2_C_ROI_SIZE_3 0xD0978
911
912#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_0 0xD097C
913
914#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_1 0xD0980
915
916#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_2 0xD0984
917
918#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_3 0xD0988
919
920#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_0 0xD098C
921
922#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_1 0xD0990
923
924#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_2 0xD0994
925
926#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_3 0xD0998
927
928#define mmMME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 0xD099C
929
930#define mmMME_SHADOW_2_SYNC_OBJECT_MESSAGE 0xD09A0
931
932#define mmMME_SHADOW_2_E_PADDING_VALUE_A 0xD09A4
933
934#define mmMME_SHADOW_2_E_NUM_ITERATION_MINUS_1 0xD09A8
935
936#define mmMME_SHADOW_2_E_BUBBLES_PER_SPLIT 0xD09AC
937
938#define mmMME_SHADOW_3_STATUS 0xD0A00
939
940#define mmMME_SHADOW_3_A_BASE_ADDR_HIGH 0xD0A08
941
942#define mmMME_SHADOW_3_B_BASE_ADDR_HIGH 0xD0A0C
943
944#define mmMME_SHADOW_3_CIN_BASE_ADDR_HIGH 0xD0A10
945
946#define mmMME_SHADOW_3_COUT_BASE_ADDR_HIGH 0xD0A14
947
948#define mmMME_SHADOW_3_BIAS_BASE_ADDR_HIGH 0xD0A18
949
950#define mmMME_SHADOW_3_A_BASE_ADDR_LOW 0xD0A1C
951
952#define mmMME_SHADOW_3_B_BASE_ADDR_LOW 0xD0A20
953
954#define mmMME_SHADOW_3_CIN_BASE_ADDR_LOW 0xD0A24
955
956#define mmMME_SHADOW_3_COUT_BASE_ADDR_LOW 0xD0A28
957
958#define mmMME_SHADOW_3_BIAS_BASE_ADDR_LOW 0xD0A2C
959
960#define mmMME_SHADOW_3_HEADER 0xD0A30
961
962#define mmMME_SHADOW_3_KERNEL_SIZE_MINUS_1 0xD0A34
963
964#define mmMME_SHADOW_3_ASSOCIATED_DIMS_0 0xD0A38
965
966#define mmMME_SHADOW_3_ASSOCIATED_DIMS_1 0xD0A3C
967
968#define mmMME_SHADOW_3_COUT_SCALE 0xD0A40
969
970#define mmMME_SHADOW_3_CIN_SCALE 0xD0A44
971
972#define mmMME_SHADOW_3_GEMMLOWP_ZP 0xD0A48
973
974#define mmMME_SHADOW_3_GEMMLOWP_EXPONENT 0xD0A4C
975
976#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_0 0xD0A50
977
978#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_1 0xD0A54
979
980#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_2 0xD0A58
981
982#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_3 0xD0A5C
983
984#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_4 0xD0A60
985
986#define mmMME_SHADOW_3_A_VALID_ELEMENTS_0 0xD0A64
987
988#define mmMME_SHADOW_3_A_VALID_ELEMENTS_1 0xD0A68
989
990#define mmMME_SHADOW_3_A_VALID_ELEMENTS_2 0xD0A6C
991
992#define mmMME_SHADOW_3_A_VALID_ELEMENTS_3 0xD0A70
993
994#define mmMME_SHADOW_3_A_VALID_ELEMENTS_4 0xD0A74
995
996#define mmMME_SHADOW_3_A_LOOP_STRIDE_0 0xD0A78
997
998#define mmMME_SHADOW_3_A_LOOP_STRIDE_1 0xD0A7C
999
1000#define mmMME_SHADOW_3_A_LOOP_STRIDE_2 0xD0A80
1001
1002#define mmMME_SHADOW_3_A_LOOP_STRIDE_3 0xD0A84
1003
1004#define mmMME_SHADOW_3_A_LOOP_STRIDE_4 0xD0A88
1005
1006#define mmMME_SHADOW_3_A_ROI_SIZE_0 0xD0A8C
1007
1008#define mmMME_SHADOW_3_A_ROI_SIZE_1 0xD0A90
1009
1010#define mmMME_SHADOW_3_A_ROI_SIZE_2 0xD0A94
1011
1012#define mmMME_SHADOW_3_A_ROI_SIZE_3 0xD0A98
1013
1014#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_0 0xD0A9C
1015
1016#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_1 0xD0AA0
1017
1018#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_2 0xD0AA4
1019
1020#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_3 0xD0AA8
1021
1022#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_0 0xD0AAC
1023
1024#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_1 0xD0AB0
1025
1026#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_2 0xD0AB4
1027
1028#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_3 0xD0AB8
1029
1030#define mmMME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 0xD0ABC
1031
1032#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_0 0xD0AC0
1033
1034#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_1 0xD0AC4
1035
1036#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_2 0xD0AC8
1037
1038#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_3 0xD0ACC
1039
1040#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_4 0xD0AD0
1041
1042#define mmMME_SHADOW_3_B_VALID_ELEMENTS_0 0xD0AD4
1043
1044#define mmMME_SHADOW_3_B_VALID_ELEMENTS_1 0xD0AD8
1045
1046#define mmMME_SHADOW_3_B_VALID_ELEMENTS_2 0xD0ADC
1047
1048#define mmMME_SHADOW_3_B_VALID_ELEMENTS_3 0xD0AE0
1049
1050#define mmMME_SHADOW_3_B_VALID_ELEMENTS_4 0xD0AE4
1051
1052#define mmMME_SHADOW_3_B_LOOP_STRIDE_0 0xD0AE8
1053
1054#define mmMME_SHADOW_3_B_LOOP_STRIDE_1 0xD0AEC
1055
1056#define mmMME_SHADOW_3_B_LOOP_STRIDE_2 0xD0AF0
1057
1058#define mmMME_SHADOW_3_B_LOOP_STRIDE_3 0xD0AF4
1059
1060#define mmMME_SHADOW_3_B_LOOP_STRIDE_4 0xD0AF8
1061
1062#define mmMME_SHADOW_3_B_ROI_SIZE_0 0xD0AFC
1063
1064#define mmMME_SHADOW_3_B_ROI_SIZE_1 0xD0B00
1065
1066#define mmMME_SHADOW_3_B_ROI_SIZE_2 0xD0B04
1067
1068#define mmMME_SHADOW_3_B_ROI_SIZE_3 0xD0B08
1069
1070#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_0 0xD0B0C
1071
1072#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_1 0xD0B10
1073
1074#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_2 0xD0B14
1075
1076#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_3 0xD0B18
1077
1078#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_0 0xD0B1C
1079
1080#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_1 0xD0B20
1081
1082#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_2 0xD0B24
1083
1084#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_3 0xD0B28
1085
1086#define mmMME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 0xD0B2C
1087
1088#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_0 0xD0B30
1089
1090#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_1 0xD0B34
1091
1092#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_2 0xD0B38
1093
1094#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_3 0xD0B3C
1095
1096#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_4 0xD0B40
1097
1098#define mmMME_SHADOW_3_C_VALID_ELEMENTS_0 0xD0B44
1099
1100#define mmMME_SHADOW_3_C_VALID_ELEMENTS_1 0xD0B48
1101
1102#define mmMME_SHADOW_3_C_VALID_ELEMENTS_2 0xD0B4C
1103
1104#define mmMME_SHADOW_3_C_VALID_ELEMENTS_3 0xD0B50
1105
1106#define mmMME_SHADOW_3_C_VALID_ELEMENTS_4 0xD0B54
1107
1108#define mmMME_SHADOW_3_C_LOOP_STRIDE_0 0xD0B58
1109
1110#define mmMME_SHADOW_3_C_LOOP_STRIDE_1 0xD0B5C
1111
1112#define mmMME_SHADOW_3_C_LOOP_STRIDE_2 0xD0B60
1113
1114#define mmMME_SHADOW_3_C_LOOP_STRIDE_3 0xD0B64
1115
1116#define mmMME_SHADOW_3_C_LOOP_STRIDE_4 0xD0B68
1117
1118#define mmMME_SHADOW_3_C_ROI_SIZE_0 0xD0B6C
1119
1120#define mmMME_SHADOW_3_C_ROI_SIZE_1 0xD0B70
1121
1122#define mmMME_SHADOW_3_C_ROI_SIZE_2 0xD0B74
1123
1124#define mmMME_SHADOW_3_C_ROI_SIZE_3 0xD0B78
1125
1126#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_0 0xD0B7C
1127
1128#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_1 0xD0B80
1129
1130#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_2 0xD0B84
1131
1132#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_3 0xD0B88
1133
1134#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_0 0xD0B8C
1135
1136#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_1 0xD0B90
1137
1138#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_2 0xD0B94
1139
1140#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_3 0xD0B98
1141
1142#define mmMME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 0xD0B9C
1143
1144#define mmMME_SHADOW_3_SYNC_OBJECT_MESSAGE 0xD0BA0
1145
1146#define mmMME_SHADOW_3_E_PADDING_VALUE_A 0xD0BA4
1147
1148#define mmMME_SHADOW_3_E_NUM_ITERATION_MINUS_1 0xD0BA8
1149
1150#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC
1151
1152#endif /* ASIC_REG_MME_REGS_H_ */
1153
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
new file mode 100644
index 000000000000..3a78078d3c4c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
@@ -0,0 +1,143 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MMU_MASKS_H_
14#define ASIC_REG_MMU_MASKS_H_
15
16/*
17 *****************************************
18 * MMU (Prototype: MMU)
19 *****************************************
20 */
21
22/* MMU_INPUT_FIFO_THRESHOLD */
23#define MMU_INPUT_FIFO_THRESHOLD_PCI_SHIFT 0
24#define MMU_INPUT_FIFO_THRESHOLD_PCI_MASK 0x7
25#define MMU_INPUT_FIFO_THRESHOLD_PSOC_SHIFT 4
26#define MMU_INPUT_FIFO_THRESHOLD_PSOC_MASK 0x70
27#define MMU_INPUT_FIFO_THRESHOLD_DMA_SHIFT 8
28#define MMU_INPUT_FIFO_THRESHOLD_DMA_MASK 0x700
29#define MMU_INPUT_FIFO_THRESHOLD_CPU_SHIFT 12
30#define MMU_INPUT_FIFO_THRESHOLD_CPU_MASK 0x7000
31#define MMU_INPUT_FIFO_THRESHOLD_MME_SHIFT 16
32#define MMU_INPUT_FIFO_THRESHOLD_MME_MASK 0x70000
33#define MMU_INPUT_FIFO_THRESHOLD_TPC_SHIFT 20
34#define MMU_INPUT_FIFO_THRESHOLD_TPC_MASK 0x700000
35#define MMU_INPUT_FIFO_THRESHOLD_OTHER_SHIFT 24
36#define MMU_INPUT_FIFO_THRESHOLD_OTHER_MASK 0x7000000
37
38/* MMU_MMU_ENABLE */
39#define MMU_MMU_ENABLE_R_SHIFT 0
40#define MMU_MMU_ENABLE_R_MASK 0x1
41
42/* MMU_FORCE_ORDERING */
43#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_SHIFT 0
44#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_MASK 0x1
45#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_SHIFT 1
46#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_MASK 0x2
47#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_SHIFT 2
48#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_MASK 0x4
49#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_SHIFT 3
50#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_MASK 0x8
51#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_SHIFT 4
52#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_MASK 0x10
53#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_SHIFT 5
54#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_MASK 0x20
55#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_SHIFT 6
56#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_MASK 0x40
57#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_SHIFT 8
58#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_MASK 0x100
59#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_SHIFT 9
60#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_MASK 0x200
61#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_SHIFT 10
62#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_MASK 0x400
63#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_SHIFT 11
64#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_MASK 0x800
65#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_SHIFT 12
66#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_MASK 0x1000
67#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_SHIFT 13
68#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_MASK 0x2000
69#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_SHIFT 14
70#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_MASK 0x4000
71
72/* MMU_FEATURE_ENABLE */
73#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_SHIFT 0
74#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_MASK 0x1
75#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_SHIFT 1
76#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_MASK 0x2
77#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_SHIFT 2
78#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_MASK 0x4
79#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_SHIFT 3
80#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_MASK 0x8
81#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_SHIFT 4
82#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_MASK 0x10
83#define MMU_FEATURE_ENABLE_TRACE_ENABLE_SHIFT 5
84#define MMU_FEATURE_ENABLE_TRACE_ENABLE_MASK 0x20
85
86/* MMU_VA_ORDERING_MASK_31_7 */
87#define MMU_VA_ORDERING_MASK_31_7_R_SHIFT 0
88#define MMU_VA_ORDERING_MASK_31_7_R_MASK 0x1FFFFFF
89
90/* MMU_VA_ORDERING_MASK_49_32 */
91#define MMU_VA_ORDERING_MASK_49_32_R_SHIFT 0
92#define MMU_VA_ORDERING_MASK_49_32_R_MASK 0x3FFFF
93
94/* MMU_LOG2_DDR_SIZE */
95#define MMU_LOG2_DDR_SIZE_R_SHIFT 0
96#define MMU_LOG2_DDR_SIZE_R_MASK 0xFF
97
98/* MMU_SCRAMBLER */
99#define MMU_SCRAMBLER_ADDR_BIT_SHIFT 0
100#define MMU_SCRAMBLER_ADDR_BIT_MASK 0x3F
101#define MMU_SCRAMBLER_SINGLE_DDR_EN_SHIFT 6
102#define MMU_SCRAMBLER_SINGLE_DDR_EN_MASK 0x40
103#define MMU_SCRAMBLER_SINGLE_DDR_ID_SHIFT 7
104#define MMU_SCRAMBLER_SINGLE_DDR_ID_MASK 0x80
105
106/* MMU_MEM_INIT_BUSY */
107#define MMU_MEM_INIT_BUSY_DATA_SHIFT 0
108#define MMU_MEM_INIT_BUSY_DATA_MASK 0x3
109#define MMU_MEM_INIT_BUSY_OBI0_SHIFT 2
110#define MMU_MEM_INIT_BUSY_OBI0_MASK 0x4
111#define MMU_MEM_INIT_BUSY_OBI1_SHIFT 3
112#define MMU_MEM_INIT_BUSY_OBI1_MASK 0x8
113
114/* MMU_SPI_MASK */
115#define MMU_SPI_MASK_R_SHIFT 0
116#define MMU_SPI_MASK_R_MASK 0xFF
117
118/* MMU_SPI_CAUSE */
119#define MMU_SPI_CAUSE_R_SHIFT 0
120#define MMU_SPI_CAUSE_R_MASK 0xFF
121
122/* MMU_PAGE_ERROR_CAPTURE */
123#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_SHIFT 0
124#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
125#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18
126#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
127
128/* MMU_PAGE_ERROR_CAPTURE_VA */
129#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
130#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
131
132/* MMU_ACCESS_ERROR_CAPTURE */
133#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_SHIFT 0
134#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
135#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18
136#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
137
138/* MMU_ACCESS_ERROR_CAPTURE_VA */
139#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
140#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
141
142#endif /* ASIC_REG_MMU_MASKS_H_ */
143
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
new file mode 100644
index 000000000000..bec6c014135c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
@@ -0,0 +1,53 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_MMU_REGS_H_
14#define ASIC_REG_MMU_REGS_H_
15
16/*
17 *****************************************
18 * MMU (Prototype: MMU)
19 *****************************************
20 */
21
22#define mmMMU_INPUT_FIFO_THRESHOLD 0x480000
23
24#define mmMMU_MMU_ENABLE 0x48000C
25
26#define mmMMU_FORCE_ORDERING 0x480010
27
28#define mmMMU_FEATURE_ENABLE 0x480014
29
30#define mmMMU_VA_ORDERING_MASK_31_7 0x480018
31
32#define mmMMU_VA_ORDERING_MASK_49_32 0x48001C
33
34#define mmMMU_LOG2_DDR_SIZE 0x480020
35
36#define mmMMU_SCRAMBLER 0x480024
37
38#define mmMMU_MEM_INIT_BUSY 0x480028
39
40#define mmMMU_SPI_MASK 0x48002C
41
42#define mmMMU_SPI_CAUSE 0x480030
43
44#define mmMMU_PAGE_ERROR_CAPTURE 0x480034
45
46#define mmMMU_PAGE_ERROR_CAPTURE_VA 0x480038
47
48#define mmMMU_ACCESS_ERROR_CAPTURE 0x48003C
49
50#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040
51
52#endif /* ASIC_REG_MMU_REGS_H_ */
53
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
new file mode 100644
index 000000000000..209e41402a11
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PCI_NRTR_MASKS_H_
14#define ASIC_REG_PCI_NRTR_MASKS_H_
15
16/*
17 *****************************************
18 * PCI_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22/* PCI_NRTR_HBW_MAX_CRED */
23#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
24#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
25#define PCI_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
26#define PCI_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
27#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
28#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
29#define PCI_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
30#define PCI_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
31
32/* PCI_NRTR_LBW_MAX_CRED */
33#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
34#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
35#define PCI_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
36#define PCI_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
37#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
38#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
39#define PCI_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
40#define PCI_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
41
42/* PCI_NRTR_DBG_E_ARB */
43#define PCI_NRTR_DBG_E_ARB_W_SHIFT 0
44#define PCI_NRTR_DBG_E_ARB_W_MASK 0x7
45#define PCI_NRTR_DBG_E_ARB_S_SHIFT 8
46#define PCI_NRTR_DBG_E_ARB_S_MASK 0x700
47#define PCI_NRTR_DBG_E_ARB_N_SHIFT 16
48#define PCI_NRTR_DBG_E_ARB_N_MASK 0x70000
49#define PCI_NRTR_DBG_E_ARB_L_SHIFT 24
50#define PCI_NRTR_DBG_E_ARB_L_MASK 0x7000000
51
52/* PCI_NRTR_DBG_W_ARB */
53#define PCI_NRTR_DBG_W_ARB_E_SHIFT 0
54#define PCI_NRTR_DBG_W_ARB_E_MASK 0x7
55#define PCI_NRTR_DBG_W_ARB_S_SHIFT 8
56#define PCI_NRTR_DBG_W_ARB_S_MASK 0x700
57#define PCI_NRTR_DBG_W_ARB_N_SHIFT 16
58#define PCI_NRTR_DBG_W_ARB_N_MASK 0x70000
59#define PCI_NRTR_DBG_W_ARB_L_SHIFT 24
60#define PCI_NRTR_DBG_W_ARB_L_MASK 0x7000000
61
62/* PCI_NRTR_DBG_N_ARB */
63#define PCI_NRTR_DBG_N_ARB_W_SHIFT 0
64#define PCI_NRTR_DBG_N_ARB_W_MASK 0x7
65#define PCI_NRTR_DBG_N_ARB_E_SHIFT 8
66#define PCI_NRTR_DBG_N_ARB_E_MASK 0x700
67#define PCI_NRTR_DBG_N_ARB_S_SHIFT 16
68#define PCI_NRTR_DBG_N_ARB_S_MASK 0x70000
69#define PCI_NRTR_DBG_N_ARB_L_SHIFT 24
70#define PCI_NRTR_DBG_N_ARB_L_MASK 0x7000000
71
72/* PCI_NRTR_DBG_S_ARB */
73#define PCI_NRTR_DBG_S_ARB_W_SHIFT 0
74#define PCI_NRTR_DBG_S_ARB_W_MASK 0x7
75#define PCI_NRTR_DBG_S_ARB_E_SHIFT 8
76#define PCI_NRTR_DBG_S_ARB_E_MASK 0x700
77#define PCI_NRTR_DBG_S_ARB_N_SHIFT 16
78#define PCI_NRTR_DBG_S_ARB_N_MASK 0x70000
79#define PCI_NRTR_DBG_S_ARB_L_SHIFT 24
80#define PCI_NRTR_DBG_S_ARB_L_MASK 0x7000000
81
82/* PCI_NRTR_DBG_L_ARB */
83#define PCI_NRTR_DBG_L_ARB_W_SHIFT 0
84#define PCI_NRTR_DBG_L_ARB_W_MASK 0x7
85#define PCI_NRTR_DBG_L_ARB_E_SHIFT 8
86#define PCI_NRTR_DBG_L_ARB_E_MASK 0x700
87#define PCI_NRTR_DBG_L_ARB_S_SHIFT 16
88#define PCI_NRTR_DBG_L_ARB_S_MASK 0x70000
89#define PCI_NRTR_DBG_L_ARB_N_SHIFT 24
90#define PCI_NRTR_DBG_L_ARB_N_MASK 0x7000000
91
92/* PCI_NRTR_DBG_E_ARB_MAX */
93#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
94#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
95
96/* PCI_NRTR_DBG_W_ARB_MAX */
97#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
98#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
99
100/* PCI_NRTR_DBG_N_ARB_MAX */
101#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
102#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
103
104/* PCI_NRTR_DBG_S_ARB_MAX */
105#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
106#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
107
108/* PCI_NRTR_DBG_L_ARB_MAX */
109#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
110#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
111
112/* PCI_NRTR_SPLIT_COEF */
113#define PCI_NRTR_SPLIT_COEF_VAL_SHIFT 0
114#define PCI_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
115
116/* PCI_NRTR_SPLIT_CFG */
117#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
118#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
119#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
120#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
121#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
122#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
123#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
124#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
125#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
126#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
127#define PCI_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
128#define PCI_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
129
130/* PCI_NRTR_SPLIT_RD_SAT */
131#define PCI_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
132#define PCI_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
133
134/* PCI_NRTR_SPLIT_RD_RST_TOKEN */
135#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
136#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
137
138/* PCI_NRTR_SPLIT_RD_TIMEOUT */
139#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
140#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
141
142/* PCI_NRTR_SPLIT_WR_SAT */
143#define PCI_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
144#define PCI_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
145
146/* PCI_NRTR_WPLIT_WR_TST_TOLEN */
147#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
148#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
149
150/* PCI_NRTR_SPLIT_WR_TIMEOUT */
151#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
152#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
153
154/* PCI_NRTR_HBW_RANGE_HIT */
155#define PCI_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
156#define PCI_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
157
158/* PCI_NRTR_HBW_RANGE_MASK_L */
159#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
160#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
161
162/* PCI_NRTR_HBW_RANGE_MASK_H */
163#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
164#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
165
166/* PCI_NRTR_HBW_RANGE_BASE_L */
167#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
168#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
169
170/* PCI_NRTR_HBW_RANGE_BASE_H */
171#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
172#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
173
174/* PCI_NRTR_LBW_RANGE_HIT */
175#define PCI_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
176#define PCI_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
177
178/* PCI_NRTR_LBW_RANGE_MASK */
179#define PCI_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
180#define PCI_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
181
182/* PCI_NRTR_LBW_RANGE_BASE */
183#define PCI_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
184#define PCI_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
185
186/* PCI_NRTR_RGLTR */
187#define PCI_NRTR_RGLTR_WR_EN_SHIFT 0
188#define PCI_NRTR_RGLTR_WR_EN_MASK 0x1
189#define PCI_NRTR_RGLTR_RD_EN_SHIFT 4
190#define PCI_NRTR_RGLTR_RD_EN_MASK 0x10
191
192/* PCI_NRTR_RGLTR_WR_RESULT */
193#define PCI_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
194#define PCI_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
195
196/* PCI_NRTR_RGLTR_RD_RESULT */
197#define PCI_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
198#define PCI_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
199
200/* PCI_NRTR_SCRAMB_EN */
201#define PCI_NRTR_SCRAMB_EN_VAL_SHIFT 0
202#define PCI_NRTR_SCRAMB_EN_VAL_MASK 0x1
203
204/* PCI_NRTR_NON_LIN_SCRAMB */
205#define PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
206#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
207
208#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
new file mode 100644
index 000000000000..447e5d4e7dc8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
@@ -0,0 +1,227 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PCI_NRTR_REGS_H_
14#define ASIC_REG_PCI_NRTR_REGS_H_
15
16/*
17 *****************************************
18 * PCI_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22#define mmPCI_NRTR_HBW_MAX_CRED 0x100
23
24#define mmPCI_NRTR_LBW_MAX_CRED 0x120
25
26#define mmPCI_NRTR_DBG_E_ARB 0x300
27
28#define mmPCI_NRTR_DBG_W_ARB 0x304
29
30#define mmPCI_NRTR_DBG_N_ARB 0x308
31
32#define mmPCI_NRTR_DBG_S_ARB 0x30C
33
34#define mmPCI_NRTR_DBG_L_ARB 0x310
35
36#define mmPCI_NRTR_DBG_E_ARB_MAX 0x320
37
38#define mmPCI_NRTR_DBG_W_ARB_MAX 0x324
39
40#define mmPCI_NRTR_DBG_N_ARB_MAX 0x328
41
42#define mmPCI_NRTR_DBG_S_ARB_MAX 0x32C
43
44#define mmPCI_NRTR_DBG_L_ARB_MAX 0x330
45
46#define mmPCI_NRTR_SPLIT_COEF_0 0x400
47
48#define mmPCI_NRTR_SPLIT_COEF_1 0x404
49
50#define mmPCI_NRTR_SPLIT_COEF_2 0x408
51
52#define mmPCI_NRTR_SPLIT_COEF_3 0x40C
53
54#define mmPCI_NRTR_SPLIT_COEF_4 0x410
55
56#define mmPCI_NRTR_SPLIT_COEF_5 0x414
57
58#define mmPCI_NRTR_SPLIT_COEF_6 0x418
59
60#define mmPCI_NRTR_SPLIT_COEF_7 0x41C
61
62#define mmPCI_NRTR_SPLIT_COEF_8 0x420
63
64#define mmPCI_NRTR_SPLIT_COEF_9 0x424
65
66#define mmPCI_NRTR_SPLIT_CFG 0x440
67
68#define mmPCI_NRTR_SPLIT_RD_SAT 0x444
69
70#define mmPCI_NRTR_SPLIT_RD_RST_TOKEN 0x448
71
72#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_0 0x44C
73
74#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_1 0x450
75
76#define mmPCI_NRTR_SPLIT_WR_SAT 0x454
77
78#define mmPCI_NRTR_WPLIT_WR_TST_TOLEN 0x458
79
80#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_0 0x45C
81
82#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_1 0x460
83
84#define mmPCI_NRTR_HBW_RANGE_HIT 0x470
85
86#define mmPCI_NRTR_HBW_RANGE_MASK_L_0 0x480
87
88#define mmPCI_NRTR_HBW_RANGE_MASK_L_1 0x484
89
90#define mmPCI_NRTR_HBW_RANGE_MASK_L_2 0x488
91
92#define mmPCI_NRTR_HBW_RANGE_MASK_L_3 0x48C
93
94#define mmPCI_NRTR_HBW_RANGE_MASK_L_4 0x490
95
96#define mmPCI_NRTR_HBW_RANGE_MASK_L_5 0x494
97
98#define mmPCI_NRTR_HBW_RANGE_MASK_L_6 0x498
99
100#define mmPCI_NRTR_HBW_RANGE_MASK_L_7 0x49C
101
102#define mmPCI_NRTR_HBW_RANGE_MASK_H_0 0x4A0
103
104#define mmPCI_NRTR_HBW_RANGE_MASK_H_1 0x4A4
105
106#define mmPCI_NRTR_HBW_RANGE_MASK_H_2 0x4A8
107
108#define mmPCI_NRTR_HBW_RANGE_MASK_H_3 0x4AC
109
110#define mmPCI_NRTR_HBW_RANGE_MASK_H_4 0x4B0
111
112#define mmPCI_NRTR_HBW_RANGE_MASK_H_5 0x4B4
113
114#define mmPCI_NRTR_HBW_RANGE_MASK_H_6 0x4B8
115
116#define mmPCI_NRTR_HBW_RANGE_MASK_H_7 0x4BC
117
118#define mmPCI_NRTR_HBW_RANGE_BASE_L_0 0x4C0
119
120#define mmPCI_NRTR_HBW_RANGE_BASE_L_1 0x4C4
121
122#define mmPCI_NRTR_HBW_RANGE_BASE_L_2 0x4C8
123
124#define mmPCI_NRTR_HBW_RANGE_BASE_L_3 0x4CC
125
126#define mmPCI_NRTR_HBW_RANGE_BASE_L_4 0x4D0
127
128#define mmPCI_NRTR_HBW_RANGE_BASE_L_5 0x4D4
129
130#define mmPCI_NRTR_HBW_RANGE_BASE_L_6 0x4D8
131
132#define mmPCI_NRTR_HBW_RANGE_BASE_L_7 0x4DC
133
134#define mmPCI_NRTR_HBW_RANGE_BASE_H_0 0x4E0
135
136#define mmPCI_NRTR_HBW_RANGE_BASE_H_1 0x4E4
137
138#define mmPCI_NRTR_HBW_RANGE_BASE_H_2 0x4E8
139
140#define mmPCI_NRTR_HBW_RANGE_BASE_H_3 0x4EC
141
142#define mmPCI_NRTR_HBW_RANGE_BASE_H_4 0x4F0
143
144#define mmPCI_NRTR_HBW_RANGE_BASE_H_5 0x4F4
145
146#define mmPCI_NRTR_HBW_RANGE_BASE_H_6 0x4F8
147
148#define mmPCI_NRTR_HBW_RANGE_BASE_H_7 0x4FC
149
150#define mmPCI_NRTR_LBW_RANGE_HIT 0x500
151
152#define mmPCI_NRTR_LBW_RANGE_MASK_0 0x510
153
154#define mmPCI_NRTR_LBW_RANGE_MASK_1 0x514
155
156#define mmPCI_NRTR_LBW_RANGE_MASK_2 0x518
157
158#define mmPCI_NRTR_LBW_RANGE_MASK_3 0x51C
159
160#define mmPCI_NRTR_LBW_RANGE_MASK_4 0x520
161
162#define mmPCI_NRTR_LBW_RANGE_MASK_5 0x524
163
164#define mmPCI_NRTR_LBW_RANGE_MASK_6 0x528
165
166#define mmPCI_NRTR_LBW_RANGE_MASK_7 0x52C
167
168#define mmPCI_NRTR_LBW_RANGE_MASK_8 0x530
169
170#define mmPCI_NRTR_LBW_RANGE_MASK_9 0x534
171
172#define mmPCI_NRTR_LBW_RANGE_MASK_10 0x538
173
174#define mmPCI_NRTR_LBW_RANGE_MASK_11 0x53C
175
176#define mmPCI_NRTR_LBW_RANGE_MASK_12 0x540
177
178#define mmPCI_NRTR_LBW_RANGE_MASK_13 0x544
179
180#define mmPCI_NRTR_LBW_RANGE_MASK_14 0x548
181
182#define mmPCI_NRTR_LBW_RANGE_MASK_15 0x54C
183
184#define mmPCI_NRTR_LBW_RANGE_BASE_0 0x550
185
186#define mmPCI_NRTR_LBW_RANGE_BASE_1 0x554
187
188#define mmPCI_NRTR_LBW_RANGE_BASE_2 0x558
189
190#define mmPCI_NRTR_LBW_RANGE_BASE_3 0x55C
191
192#define mmPCI_NRTR_LBW_RANGE_BASE_4 0x560
193
194#define mmPCI_NRTR_LBW_RANGE_BASE_5 0x564
195
196#define mmPCI_NRTR_LBW_RANGE_BASE_6 0x568
197
198#define mmPCI_NRTR_LBW_RANGE_BASE_7 0x56C
199
200#define mmPCI_NRTR_LBW_RANGE_BASE_8 0x570
201
202#define mmPCI_NRTR_LBW_RANGE_BASE_9 0x574
203
204#define mmPCI_NRTR_LBW_RANGE_BASE_10 0x578
205
206#define mmPCI_NRTR_LBW_RANGE_BASE_11 0x57C
207
208#define mmPCI_NRTR_LBW_RANGE_BASE_12 0x580
209
210#define mmPCI_NRTR_LBW_RANGE_BASE_13 0x584
211
212#define mmPCI_NRTR_LBW_RANGE_BASE_14 0x588
213
214#define mmPCI_NRTR_LBW_RANGE_BASE_15 0x58C
215
216#define mmPCI_NRTR_RGLTR 0x590
217
218#define mmPCI_NRTR_RGLTR_WR_RESULT 0x594
219
220#define mmPCI_NRTR_RGLTR_RD_RESULT 0x598
221
222#define mmPCI_NRTR_SCRAMB_EN 0x600
223
224#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604
225
226#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
new file mode 100644
index 000000000000..daaf5d9079dc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
@@ -0,0 +1,243 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PCIE_AUX_REGS_H_
14#define ASIC_REG_PCIE_AUX_REGS_H_
15
16/*
17 *****************************************
18 * PCIE_AUX (Prototype: PCIE_AUX)
19 *****************************************
20 */
21
22#define mmPCIE_AUX_APB_TIMEOUT 0xC07004
23
24#define mmPCIE_AUX_PHY_INIT 0xC07100
25
26#define mmPCIE_AUX_LTR_MAX_LATENCY 0xC07138
27
28#define mmPCIE_AUX_BAR0_START_L 0xC07160
29
30#define mmPCIE_AUX_BAR0_START_H 0xC07164
31
32#define mmPCIE_AUX_BAR1_START 0xC07168
33
34#define mmPCIE_AUX_BAR2_START_L 0xC0716C
35
36#define mmPCIE_AUX_BAR2_START_H 0xC07170
37
38#define mmPCIE_AUX_BAR3_START 0xC07174
39
40#define mmPCIE_AUX_BAR4_START_L 0xC07178
41
42#define mmPCIE_AUX_BAR4_START_H 0xC0717C
43
44#define mmPCIE_AUX_BAR5_START 0xC07180
45
46#define mmPCIE_AUX_BAR0_LIMIT_L 0xC07184
47
48#define mmPCIE_AUX_BAR0_LIMIT_H 0xC07188
49
50#define mmPCIE_AUX_BAR1_LIMIT 0xC0718C
51
52#define mmPCIE_AUX_BAR2_LIMIT_L 0xC07190
53
54#define mmPCIE_AUX_BAR2_LIMIT_H 0xC07194
55
56#define mmPCIE_AUX_BAR3_LIMIT 0xC07198
57
58#define mmPCIE_AUX_BAR4_LIMIT_L 0xC0719C
59
60#define mmPCIE_AUX_BAR4_LIMIT_H 0xC07200
61
62#define mmPCIE_AUX_BAR5_LIMIT 0xC07204
63
64#define mmPCIE_AUX_BUS_MASTER_EN 0xC07208
65
66#define mmPCIE_AUX_MEM_SPACE_EN 0xC0720C
67
68#define mmPCIE_AUX_MAX_RD_REQ_SIZE 0xC07210
69
70#define mmPCIE_AUX_MAX_PAYLOAD_SIZE 0xC07214
71
72#define mmPCIE_AUX_EXT_TAG_EN 0xC07218
73
74#define mmPCIE_AUX_RCB 0xC0721C
75
76#define mmPCIE_AUX_PM_NO_SOFT_RST 0xC07220
77
78#define mmPCIE_AUX_PBUS_NUM 0xC07224
79
80#define mmPCIE_AUX_PBUS_DEV_NUM 0xC07228
81
82#define mmPCIE_AUX_NO_SNOOP_EN 0xC0722C
83
84#define mmPCIE_AUX_RELAX_ORDER_EN 0xC07230
85
86#define mmPCIE_AUX_HP_SLOT_CTRL_ACCESS 0xC07234
87
88#define mmPCIE_AUX_DLL_STATE_CHGED_EN 0xC07238
89
90#define mmPCIE_AUX_CMP_CPLED_INT_EN 0xC0723C
91
92#define mmPCIE_AUX_HP_INT_EN 0xC07340
93
94#define mmPCIE_AUX_PRE_DET_CHGEN_EN 0xC07344
95
96#define mmPCIE_AUX_MRL_SENSOR_CHGED_EN 0xC07348
97
98#define mmPCIE_AUX_PWR_FAULT_DET_EN 0xC0734C
99
100#define mmPCIE_AUX_ATTEN_BUTTON_PRESSED_EN 0xC07350
101
102#define mmPCIE_AUX_PF_FLR_ACTIVE 0xC07360
103
104#define mmPCIE_AUX_PF_FLR_DONE 0xC07364
105
106#define mmPCIE_AUX_FLR_INT 0xC07390
107
108#define mmPCIE_AUX_LTR_M_EN 0xC073B0
109
110#define mmPCIE_AUX_LTSSM_EN 0xC07428
111
112#define mmPCIE_AUX_SYS_INTR 0xC07440
113
114#define mmPCIE_AUX_INT_DISABLE 0xC07444
115
116#define mmPCIE_AUX_SMLH_LINK_UP 0xC07448
117
118#define mmPCIE_AUX_PM_CURR_STATE 0xC07450
119
120#define mmPCIE_AUX_RDLH_LINK_UP 0xC07458
121
122#define mmPCIE_AUX_BRDG_SLV_XFER_PENDING 0xC0745C
123
124#define mmPCIE_AUX_BRDG_DBI_XFER_PENDING 0xC07460
125
126#define mmPCIE_AUX_AUTO_SP_DIS 0xC07478
127
128#define mmPCIE_AUX_DBI 0xC07490
129
130#define mmPCIE_AUX_DBI_32 0xC07494
131
132#define mmPCIE_AUX_DIAG_STATUS_BUS_0 0xC074A4
133
134#define mmPCIE_AUX_DIAG_STATUS_BUS_1 0xC074A8
135
136#define mmPCIE_AUX_DIAG_STATUS_BUS_2 0xC074AC
137
138#define mmPCIE_AUX_DIAG_STATUS_BUS_3 0xC074B0
139
140#define mmPCIE_AUX_DIAG_STATUS_BUS_4 0xC074B4
141
142#define mmPCIE_AUX_DIAG_STATUS_BUS_5 0xC074B8
143
144#define mmPCIE_AUX_DIAG_STATUS_BUS_6 0xC074BC
145
146#define mmPCIE_AUX_DIAG_STATUS_BUS_7 0xC074C0
147
148#define mmPCIE_AUX_DIAG_STATUS_BUS_8 0xC074C4
149
150#define mmPCIE_AUX_DIAG_STATUS_BUS_9 0xC074C8
151
152#define mmPCIE_AUX_DIAG_STATUS_BUS_10 0xC074CC
153
154#define mmPCIE_AUX_DIAG_STATUS_BUS_11 0xC074D0
155
156#define mmPCIE_AUX_DIAG_STATUS_BUS_12 0xC074D4
157
158#define mmPCIE_AUX_DIAG_STATUS_BUS_13 0xC074D8
159
160#define mmPCIE_AUX_DIAG_STATUS_BUS_14 0xC074DC
161
162#define mmPCIE_AUX_DIAG_STATUS_BUS_15 0xC074E0
163
164#define mmPCIE_AUX_DIAG_STATUS_BUS_16 0xC074E4
165
166#define mmPCIE_AUX_DIAG_STATUS_BUS_17 0xC074E8
167
168#define mmPCIE_AUX_DIAG_STATUS_BUS_18 0xC074EC
169
170#define mmPCIE_AUX_DIAG_STATUS_BUS_19 0xC074F0
171
172#define mmPCIE_AUX_DIAG_STATUS_BUS_20 0xC074F4
173
174#define mmPCIE_AUX_DIAG_STATUS_BUS_21 0xC074F8
175
176#define mmPCIE_AUX_DIAG_STATUS_BUS_22 0xC074FC
177
178#define mmPCIE_AUX_DIAG_STATUS_BUS_23 0xC07500
179
180#define mmPCIE_AUX_DIAG_STATUS_BUS_24 0xC07504
181
182#define mmPCIE_AUX_DIAG_STATUS_BUS_25 0xC07508
183
184#define mmPCIE_AUX_DIAG_STATUS_BUS_26 0xC0750C
185
186#define mmPCIE_AUX_DIAG_STATUS_BUS_27 0xC07510
187
188#define mmPCIE_AUX_DIAG_STATUS_BUS_28 0xC07514
189
190#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_0 0xC07640
191
192#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_1 0xC07644
193
194#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_2 0xC07648
195
196#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_3 0xC0764C
197
198#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_4 0xC07650
199
200#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_5 0xC07654
201
202#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_6 0xC07658
203
204#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_7 0xC0765C
205
206#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_0 0xC07744
207
208#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_1 0xC07748
209
210#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_2 0xC0774C
211
212#define mmPCIE_AUX_APP_RAS_DES_TBA_CTRL 0xC07774
213
214#define mmPCIE_AUX_PM_DSTATE 0xC07840
215
216#define mmPCIE_AUX_PM_PME_EN 0xC07844
217
218#define mmPCIE_AUX_PM_LINKST_IN_L0S 0xC07848
219
220#define mmPCIE_AUX_PM_LINKST_IN_L1 0xC0784C
221
222#define mmPCIE_AUX_PM_LINKST_IN_L2 0xC07850
223
224#define mmPCIE_AUX_PM_LINKST_L2_EXIT 0xC07854
225
226#define mmPCIE_AUX_PM_STATUS 0xC07858
227
228#define mmPCIE_AUX_APP_READY_ENTER_L23 0xC0785C
229
230#define mmPCIE_AUX_APP_XFER_PENDING 0xC07860
231
232#define mmPCIE_AUX_APP_REQ_L1 0xC07930
233
234#define mmPCIE_AUX_AUX_PM_EN 0xC07934
235
236#define mmPCIE_AUX_APPS_PM_XMT_PME 0xC07938
237
238#define mmPCIE_AUX_OUTBAND_PWRUP_CMD 0xC07940
239
240#define mmPCIE_AUX_PERST 0xC079B8
241
242#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
243
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
new file mode 100644
index 000000000000..8eda4de58788
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PSOC_EMMC_PLL_REGS_H_
14#define ASIC_REG_PSOC_EMMC_PLL_REGS_H_
15
16/*
17 *****************************************
18 * PSOC_EMMC_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmPSOC_EMMC_PLL_NR 0xC70100
23
24#define mmPSOC_EMMC_PLL_NF 0xC70104
25
26#define mmPSOC_EMMC_PLL_OD 0xC70108
27
28#define mmPSOC_EMMC_PLL_NB 0xC7010C
29
30#define mmPSOC_EMMC_PLL_CFG 0xC70110
31
32#define mmPSOC_EMMC_PLL_LOSE_MASK 0xC70120
33
34#define mmPSOC_EMMC_PLL_LOCK_INTR 0xC70128
35
36#define mmPSOC_EMMC_PLL_LOCK_BYPASS 0xC7012C
37
38#define mmPSOC_EMMC_PLL_DATA_CHNG 0xC70130
39
40#define mmPSOC_EMMC_PLL_RST 0xC70134
41
42#define mmPSOC_EMMC_PLL_SLIP_WD_CNTR 0xC70150
43
44#define mmPSOC_EMMC_PLL_DIV_FACTOR_0 0xC70200
45
46#define mmPSOC_EMMC_PLL_DIV_FACTOR_1 0xC70204
47
48#define mmPSOC_EMMC_PLL_DIV_FACTOR_2 0xC70208
49
50#define mmPSOC_EMMC_PLL_DIV_FACTOR_3 0xC7020C
51
52#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_0 0xC70220
53
54#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_1 0xC70224
55
56#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_2 0xC70228
57
58#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_3 0xC7022C
59
60#define mmPSOC_EMMC_PLL_DIV_SEL_0 0xC70280
61
62#define mmPSOC_EMMC_PLL_DIV_SEL_1 0xC70284
63
64#define mmPSOC_EMMC_PLL_DIV_SEL_2 0xC70288
65
66#define mmPSOC_EMMC_PLL_DIV_SEL_3 0xC7028C
67
68#define mmPSOC_EMMC_PLL_DIV_EN_0 0xC702A0
69
70#define mmPSOC_EMMC_PLL_DIV_EN_1 0xC702A4
71
72#define mmPSOC_EMMC_PLL_DIV_EN_2 0xC702A8
73
74#define mmPSOC_EMMC_PLL_DIV_EN_3 0xC702AC
75
76#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_0 0xC702C0
77
78#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_1 0xC702C4
79
80#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_2 0xC702C8
81
82#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_3 0xC702CC
83
84#define mmPSOC_EMMC_PLL_CLK_GATER 0xC70300
85
86#define mmPSOC_EMMC_PLL_CLK_RLX_0 0xC70310
87
88#define mmPSOC_EMMC_PLL_CLK_RLX_1 0xC70314
89
90#define mmPSOC_EMMC_PLL_CLK_RLX_2 0xC70318
91
92#define mmPSOC_EMMC_PLL_CLK_RLX_3 0xC7031C
93
94#define mmPSOC_EMMC_PLL_REF_CNTR_PERIOD 0xC70400
95
96#define mmPSOC_EMMC_PLL_REF_LOW_THRESHOLD 0xC70410
97
98#define mmPSOC_EMMC_PLL_REF_HIGH_THRESHOLD 0xC70420
99
100#define mmPSOC_EMMC_PLL_PLL_NOT_STABLE 0xC70430
101
102#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440
103
104#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
new file mode 100644
index 000000000000..d4bf0e1db4df
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
@@ -0,0 +1,447 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
14#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
15
16/*
17 *****************************************
18 * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
19 *****************************************
20 */
21
22/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */
23#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0
24#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF
25
26/* PSOC_GLOBAL_CONF_PCI_FW_FSM */
27#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0
28#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1
29
30/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */
31#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0
32#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1
33
34/* PSOC_GLOBAL_CONF_BTM_FSM */
35#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0
36#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF
37
38/* PSOC_GLOBAL_CONF_SW_BTM_FSM */
39#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0
40#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF
41
42/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */
43#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0
44#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF
45
46/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */
47#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0
48#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF
49
50/* PSOC_GLOBAL_CONF_SPI_MEM_EN */
51#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0
52#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1
53
54/* PSOC_GLOBAL_CONF_PRSTN */
55#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0
56#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1
57
58/* PSOC_GLOBAL_CONF_PCIE_EN */
59#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0
60#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1
61
62/* PSOC_GLOBAL_CONF_SPI_IMG_STS */
63#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0
64#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1
65#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1
66#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2
67#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2
68#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4
69#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3
70#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8
71
72/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */
73#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0
74#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1
75#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1
76#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2
77#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2
78#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4
79#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3
80#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8
81#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4
82#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10
83#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5
84#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20
85#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6
86#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40
87#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7
88#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80
89#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8
90#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100
91
92/* PSOC_GLOBAL_CONF_SCRATCHPAD */
93#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0
94#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF
95
96/* PSOC_GLOBAL_CONF_SEMAPHORE */
97#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0
98#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF
99
100/* PSOC_GLOBAL_CONF_WARM_REBOOT */
101#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_SHIFT 0
102#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_MASK 0xFFFFFFFF
103
104/* PSOC_GLOBAL_CONF_UBOOT_MAGIC */
105#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_SHIFT 0
106#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_MASK 0xFFFFFFFF
107
108/* PSOC_GLOBAL_CONF_SPL_SOURCE */
109#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0
110#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7
111
112/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */
113#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0
114#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1
115#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1
116#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2
117#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2
118#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4
119#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3
120#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8
121#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4
122#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10
123#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5
124#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20
125#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6
126#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40
127#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7
128#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80
129#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8
130#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100
131#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9
132#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200
133#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10
134#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00
135#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15
136#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000
137#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19
138#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000
139
140/* PSOC_GLOBAL_CONF_I2C_SLV */
141#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0
142#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1
143
144/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */
145#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0
146#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1
147
148/* PSOC_GLOBAL_CONF_APP_STATUS */
149#define PSOC_GLOBAL_CONF_APP_STATUS_IND_SHIFT 0
150#define PSOC_GLOBAL_CONF_APP_STATUS_IND_MASK 0xFFFFFFFF
151
152/* PSOC_GLOBAL_CONF_BTL_STS */
153#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0
154#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1
155#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4
156#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10
157#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8
158#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00
159
160/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */
161#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0
162#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1
163#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1
164#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2
165#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2
166#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4
167#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3
168#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8
169#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4
170#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10
171#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5
172#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20
173#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6
174#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40
175#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7
176#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80
177
178/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */
179#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0
180#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1
181
182/* PSOC_GLOBAL_CONF_PERIPH_INTR */
183#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0
184#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1
185#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1
186#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2
187#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2
188#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4
189#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3
190#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8
191#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4
192#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10
193#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5
194#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20
195#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6
196#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40
197#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7
198#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80
199#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12
200#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000
201#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13
202#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000
203#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16
204#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000
205
206/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */
207#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0
208#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1
209
210/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */
211#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0
212#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1
213
214/* PSOC_GLOBAL_CONF_TARGETID */
215#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1
216#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE
217#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 12
218#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFFF000
219#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28
220#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000
221
222/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */
223#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0
224#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1
225
226/* PSOC_GLOBAL_CONF_MII_ADDR */
227#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_SHIFT 0
228#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_MASK 0xFF
229
230/* PSOC_GLOBAL_CONF_MII_SPEED */
231#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_SHIFT 0
232#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_MASK 0x3
233
234/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */
235#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 0
236#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x1
237#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 1
238#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x2
239#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 2
240#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x4
241#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 3
242#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x8
243#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 4
244#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x10
245#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 5
246#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0xFE0
247#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_SHIFT 12
248#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_MASK 0x3000
249#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_SHIFT 14
250#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_MASK 0x1FC000
251#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_SHIFT 21
252#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK 0x200000
253#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_SHIFT 22
254#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_MASK 0x1C00000
255#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_SHIFT 25
256#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_MASK 0x2000000
257#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_SHIFT 26
258#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_MASK 0x1C000000
259
260/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */
261#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0
262#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1
263#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1
264#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2
265
266/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */
267#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0
268#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1
269
270/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */
271#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0
272#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1
273#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1
274#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2
275
276/* PSOC_GLOBAL_CONF_MASK_REQ */
277#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0
278#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1
279
280/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG */
281#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_SHIFT 0
282#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_MASK 0x1
283#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_SHIFT 1
284#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_MASK 0x2
285#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_SHIFT 2
286#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_MASK 0x1FC
287#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_SHIFT 9
288#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_MASK 0x200
289#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_SHIFT 10
290#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_MASK 0x400
291#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_SHIFT 11
292#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_MASK 0x800
293#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_SHIFT 12
294#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_MASK 0x1000
295#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_SHIFT 13
296#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_MASK 0x2000
297#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_SHIFT 14
298#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_MASK 0x4000
299#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_SHIFT 15
300#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_MASK 0x1F8000
301#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_SHIFT 21
302#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_MASK 0x200000
303#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_SHIFT 22
304#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_MASK 0x400000
305
306/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG */
307#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_SHIFT 0
308#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_MASK 0x1
309#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_SHIFT 1
310#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_MASK 0x2
311#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_SHIFT 2
312#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_MASK 0x1FC
313#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT 9
314#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_MASK 0x200
315#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT 10
316#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_MASK 0x400
317#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT 11
318#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_MASK 0x800
319#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT 12
320#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_MASK 0x1000
321#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT 13
322#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_MASK 0x2000
323#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT 14
324#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_MASK 0x4000
325#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_SHIFT 15
326#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK 0x1F8000
327#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT 21
328#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_MASK 0x200000
329#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT 22
330#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_MASK 0x400000
331
332/* PSOC_GLOBAL_CONF_WD_RST_CFG */
333#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_SHIFT 0
334#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_MASK 0x1
335#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_SHIFT 1
336#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_MASK 0x2
337#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_SHIFT 2
338#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_MASK 0x1FC
339#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_SHIFT 9
340#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_MASK 0x200
341#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_SHIFT 10
342#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_MASK 0x400
343#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_SHIFT 11
344#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_MASK 0x800
345#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_SHIFT 12
346#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_MASK 0x1000
347#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_SHIFT 13
348#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_MASK 0x2000
349#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_SHIFT 14
350#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_MASK 0x4000
351#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_SHIFT 15
352#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_MASK 0x1F8000
353#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_SHIFT 21
354#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_MASK 0x200000
355#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_SHIFT 22
356#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_MASK 0x400000
357
358/* PSOC_GLOBAL_CONF_MNL_RST_CFG */
359#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_SHIFT 0
360#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_MASK 0x1
361#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_SHIFT 1
362#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_MASK 0x2
363#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_SHIFT 2
364#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_MASK 0x1FC
365#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_SHIFT 9
366#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_MASK 0x200
367#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_SHIFT 10
368#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_MASK 0x400
369#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_SHIFT 11
370#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_MASK 0x800
371#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_SHIFT 12
372#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_MASK 0x1000
373#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_SHIFT 13
374#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_MASK 0x2000
375#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_SHIFT 14
376#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_MASK 0x4000
377#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_SHIFT 15
378#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_MASK 0x1F8000
379#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_SHIFT 21
380#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_MASK 0x200000
381#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_SHIFT 22
382#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_MASK 0x400000
383
384/* PSOC_GLOBAL_CONF_UNIT_RST_N */
385#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_SHIFT 0
386#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_MASK 0x1
387#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_SHIFT 1
388#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_MASK 0x2
389#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_SHIFT 2
390#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_MASK 0x1FC
391#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_SHIFT 9
392#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_MASK 0x200
393#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_SHIFT 10
394#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_MASK 0x400
395#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_SHIFT 11
396#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_MASK 0x800
397#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT 12
398#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_MASK 0x1000
399#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_SHIFT 13
400#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_MASK 0x2000
401#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_SHIFT 14
402#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_MASK 0x4000
403#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_SHIFT 15
404#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_MASK 0x1F8000
405#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_SHIFT 21
406#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_MASK 0x200000
407#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_SHIFT 22
408#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_MASK 0x400000
409
410/* PSOC_GLOBAL_CONF_PRSTN_MASK */
411#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0
412#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1
413
414/* PSOC_GLOBAL_CONF_WD_MASK */
415#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0
416#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1
417
418/* PSOC_GLOBAL_CONF_RST_SRC */
419#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0
420#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF
421
422/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */
423#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0
424#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F
425
426/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */
427#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0
428#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F
429
430/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */
431#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0
432#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7
433
434/* PSOC_GLOBAL_CONF_BNK3V3_MS */
435#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0
436#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3
437
438/* PSOC_GLOBAL_CONF_PAD_DEFAULT */
439#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0
440#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF
441
442/* PSOC_GLOBAL_CONF_PAD_SEL */
443#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0
444#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
445
446#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
447
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
new file mode 100644
index 000000000000..cfbdd2c9c5c7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
@@ -0,0 +1,745 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
14#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
15
16/*
17 *****************************************
18 * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
19 *****************************************
20 */
21
22#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000
23
24#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004
25
26#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008
27
28#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C
29
30#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020
31
32#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024
33
34#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028
35
36#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030
37
38#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034
39
40#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038
41
42#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040
43
44#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044
45
46#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048
47
48#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050
49
50#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054
51
52#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100
53
54#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104
55
56#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108
57
58#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C
59
60#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110
61
62#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114
63
64#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118
65
66#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C
67
68#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120
69
70#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124
71
72#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128
73
74#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C
75
76#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130
77
78#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134
79
80#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138
81
82#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C
83
84#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140
85
86#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144
87
88#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148
89
90#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C
91
92#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150
93
94#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154
95
96#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158
97
98#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C
99
100#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160
101
102#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164
103
104#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168
105
106#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C
107
108#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170
109
110#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174
111
112#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178
113
114#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C
115
116#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200
117
118#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204
119
120#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208
121
122#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C
123
124#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210
125
126#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214
127
128#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218
129
130#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C
131
132#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220
133
134#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224
135
136#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228
137
138#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C
139
140#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230
141
142#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234
143
144#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238
145
146#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C
147
148#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240
149
150#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244
151
152#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248
153
154#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C
155
156#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250
157
158#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254
159
160#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258
161
162#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C
163
164#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260
165
166#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264
167
168#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268
169
170#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C
171
172#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270
173
174#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274
175
176#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278
177
178#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C
179
180#define mmPSOC_GLOBAL_CONF_WARM_REBOOT 0xC4B300
181
182#define mmPSOC_GLOBAL_CONF_UBOOT_MAGIC 0xC4B304
183
184#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308
185
186#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C
187
188#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310
189
190#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314
191
192#define mmPSOC_GLOBAL_CONF_APP_STATUS 0xC4B320
193
194#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340
195
196#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350
197
198#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354
199
200#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358
201
202#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C
203
204#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360
205
206#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400
207
208#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420
209
210#define mmPSOC_GLOBAL_CONF_MII_ADDR 0xC4B424
211
212#define mmPSOC_GLOBAL_CONF_MII_SPEED 0xC4B428
213
214#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430
215
216#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450
217
218#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454
219
220#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458
221
222#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C
223
224#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG 0xC4B470
225
226#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG 0xC4B474
227
228#define mmPSOC_GLOBAL_CONF_WD_RST_CFG 0xC4B478
229
230#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG 0xC4B47C
231
232#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B480
233
234#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B484
235
236#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B488
237
238#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B490
239
240#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500
241
242#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504
243
244#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508
245
246#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C
247
248#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510
249
250#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514
251
252#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518
253
254#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C
255
256#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520
257
258#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524
259
260#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528
261
262#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C
263
264#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530
265
266#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534
267
268#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538
269
270#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C
271
272#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540
273
274#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544
275
276#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548
277
278#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C
279
280#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550
281
282#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554
283
284#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558
285
286#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C
287
288#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560
289
290#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564
291
292#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568
293
294#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C
295
296#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570
297
298#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574
299
300#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578
301
302#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C
303
304#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580
305
306#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584
307
308#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588
309
310#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C
311
312#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590
313
314#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594
315
316#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598
317
318#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C
319
320#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0
321
322#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4
323
324#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8
325
326#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC
327
328#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0
329
330#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4
331
332#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8
333
334#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC
335
336#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0
337
338#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4
339
340#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8
341
342#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC
343
344#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0
345
346#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4
347
348#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8
349
350#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC
351
352#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0
353
354#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4
355
356#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8
357
358#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC
359
360#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0
361
362#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4
363
364#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8
365
366#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC
367
368#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600
369
370#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604
371
372#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608
373
374#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C
375
376#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610
377
378#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B640
379
380#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B644
381
382#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B648
383
384#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B64C
385
386#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B650
387
388#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B654
389
390#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B658
391
392#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B65C
393
394#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B660
395
396#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B664
397
398#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B668
399
400#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B66C
401
402#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B680
403
404#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B684
405
406#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B688
407
408#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B68C
409
410#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B690
411
412#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B694
413
414#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B6E0
415
416#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B700
417
418#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B704
419
420#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B708
421
422#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B70C
423
424#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B710
425
426#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B714
427
428#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B718
429
430#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B71C
431
432#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B720
433
434#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B724
435
436#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B728
437
438#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B72C
439
440#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B730
441
442#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B734
443
444#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B738
445
446#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B73C
447
448#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B740
449
450#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B744
451
452#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B748
453
454#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B74C
455
456#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B750
457
458#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B754
459
460#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B758
461
462#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B75C
463
464#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B760
465
466#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B764
467
468#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B768
469
470#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B76C
471
472#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B770
473
474#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B774
475
476#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B778
477
478#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B77C
479
480#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B780
481
482#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B784
483
484#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B788
485
486#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B78C
487
488#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B790
489
490#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B794
491
492#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B798
493
494#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B79C
495
496#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B7A0
497
498#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B7A4
499
500#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B7A8
501
502#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B7AC
503
504#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B7B0
505
506#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B7B4
507
508#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B7B8
509
510#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B7BC
511
512#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B7C0
513
514#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B7C4
515
516#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B7C8
517
518#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B7CC
519
520#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B7D0
521
522#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B7D4
523
524#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B7D8
525
526#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B7DC
527
528#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B7E0
529
530#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B7E4
531
532#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B7E8
533
534#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B7EC
535
536#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B7F0
537
538#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B7F4
539
540#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B7F8
541
542#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B7FC
543
544#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B800
545
546#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B804
547
548#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B808
549
550#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B80C
551
552#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B810
553
554#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B814
555
556#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B818
557
558#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B81C
559
560#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B820
561
562#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B824
563
564#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B828
565
566#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B82C
567
568#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B830
569
570#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B834
571
572#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B838
573
574#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B83C
575
576#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B840
577
578#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B844
579
580#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4B900
581
582#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4B904
583
584#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4B908
585
586#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4B90C
587
588#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4B910
589
590#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4B914
591
592#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4B918
593
594#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4B91C
595
596#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4B920
597
598#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4B924
599
600#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4B928
601
602#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4B92C
603
604#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4B930
605
606#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4B934
607
608#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4B938
609
610#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4B93C
611
612#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4B940
613
614#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4B944
615
616#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4B948
617
618#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4B94C
619
620#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4B950
621
622#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4B954
623
624#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4B958
625
626#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4B95C
627
628#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4B960
629
630#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4B964
631
632#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4B968
633
634#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4B96C
635
636#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4B970
637
638#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4B974
639
640#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4B978
641
642#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4B97C
643
644#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4B980
645
646#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4B984
647
648#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4B988
649
650#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4B98C
651
652#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4B990
653
654#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4B994
655
656#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4B998
657
658#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4B99C
659
660#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4B9A0
661
662#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4B9A4
663
664#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4B9A8
665
666#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4B9AC
667
668#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4B9B0
669
670#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4B9B4
671
672#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4B9B8
673
674#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4B9BC
675
676#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4B9C0
677
678#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4B9C4
679
680#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4B9C8
681
682#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4B9CC
683
684#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4B9D0
685
686#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4B9D4
687
688#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4B9D8
689
690#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4B9DC
691
692#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4B9E0
693
694#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4B9E4
695
696#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4B9E8
697
698#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4B9EC
699
700#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4B9F0
701
702#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4B9F4
703
704#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4B9F8
705
706#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4B9FC
707
708#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BA00
709
710#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BA04
711
712#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BA08
713
714#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BA0C
715
716#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BA10
717
718#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BA14
719
720#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BA18
721
722#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BA1C
723
724#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BA20
725
726#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BA24
727
728#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BA28
729
730#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BA2C
731
732#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BA30
733
734#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BA34
735
736#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BA38
737
738#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BA3C
739
740#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BA40
741
742#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44
743
744#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
745
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
new file mode 100644
index 000000000000..6723d8f76f30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PSOC_MME_PLL_REGS_H_
14#define ASIC_REG_PSOC_MME_PLL_REGS_H_
15
16/*
17 *****************************************
18 * PSOC_MME_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmPSOC_MME_PLL_NR 0xC71100
23
24#define mmPSOC_MME_PLL_NF 0xC71104
25
26#define mmPSOC_MME_PLL_OD 0xC71108
27
28#define mmPSOC_MME_PLL_NB 0xC7110C
29
30#define mmPSOC_MME_PLL_CFG 0xC71110
31
32#define mmPSOC_MME_PLL_LOSE_MASK 0xC71120
33
34#define mmPSOC_MME_PLL_LOCK_INTR 0xC71128
35
36#define mmPSOC_MME_PLL_LOCK_BYPASS 0xC7112C
37
38#define mmPSOC_MME_PLL_DATA_CHNG 0xC71130
39
40#define mmPSOC_MME_PLL_RST 0xC71134
41
42#define mmPSOC_MME_PLL_SLIP_WD_CNTR 0xC71150
43
44#define mmPSOC_MME_PLL_DIV_FACTOR_0 0xC71200
45
46#define mmPSOC_MME_PLL_DIV_FACTOR_1 0xC71204
47
48#define mmPSOC_MME_PLL_DIV_FACTOR_2 0xC71208
49
50#define mmPSOC_MME_PLL_DIV_FACTOR_3 0xC7120C
51
52#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_0 0xC71220
53
54#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_1 0xC71224
55
56#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_2 0xC71228
57
58#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_3 0xC7122C
59
60#define mmPSOC_MME_PLL_DIV_SEL_0 0xC71280
61
62#define mmPSOC_MME_PLL_DIV_SEL_1 0xC71284
63
64#define mmPSOC_MME_PLL_DIV_SEL_2 0xC71288
65
66#define mmPSOC_MME_PLL_DIV_SEL_3 0xC7128C
67
68#define mmPSOC_MME_PLL_DIV_EN_0 0xC712A0
69
70#define mmPSOC_MME_PLL_DIV_EN_1 0xC712A4
71
72#define mmPSOC_MME_PLL_DIV_EN_2 0xC712A8
73
74#define mmPSOC_MME_PLL_DIV_EN_3 0xC712AC
75
76#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_0 0xC712C0
77
78#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_1 0xC712C4
79
80#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_2 0xC712C8
81
82#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_3 0xC712CC
83
84#define mmPSOC_MME_PLL_CLK_GATER 0xC71300
85
86#define mmPSOC_MME_PLL_CLK_RLX_0 0xC71310
87
88#define mmPSOC_MME_PLL_CLK_RLX_1 0xC71314
89
90#define mmPSOC_MME_PLL_CLK_RLX_2 0xC71318
91
92#define mmPSOC_MME_PLL_CLK_RLX_3 0xC7131C
93
94#define mmPSOC_MME_PLL_REF_CNTR_PERIOD 0xC71400
95
96#define mmPSOC_MME_PLL_REF_LOW_THRESHOLD 0xC71410
97
98#define mmPSOC_MME_PLL_REF_HIGH_THRESHOLD 0xC71420
99
100#define mmPSOC_MME_PLL_PLL_NOT_STABLE 0xC71430
101
102#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440
103
104#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
new file mode 100644
index 000000000000..abcded0531c9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_
14#define ASIC_REG_PSOC_PCI_PLL_REGS_H_
15
16/*
17 *****************************************
18 * PSOC_PCI_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmPSOC_PCI_PLL_NR 0xC72100
23
24#define mmPSOC_PCI_PLL_NF 0xC72104
25
26#define mmPSOC_PCI_PLL_OD 0xC72108
27
28#define mmPSOC_PCI_PLL_NB 0xC7210C
29
30#define mmPSOC_PCI_PLL_CFG 0xC72110
31
32#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120
33
34#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128
35
36#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C
37
38#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130
39
40#define mmPSOC_PCI_PLL_RST 0xC72134
41
42#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150
43
44#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200
45
46#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204
47
48#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208
49
50#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C
51
52#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220
53
54#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224
55
56#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228
57
58#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C
59
60#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280
61
62#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284
63
64#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288
65
66#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C
67
68#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0
69
70#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4
71
72#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8
73
74#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC
75
76#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0
77
78#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4
79
80#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8
81
82#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC
83
84#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300
85
86#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310
87
88#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314
89
90#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318
91
92#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C
93
94#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400
95
96#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410
97
98#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420
99
100#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430
101
102#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
103
104#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
new file mode 100644
index 000000000000..5925c7477c25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
@@ -0,0 +1,143 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PSOC_SPI_REGS_H_
14#define ASIC_REG_PSOC_SPI_REGS_H_
15
16/*
17 *****************************************
18 * PSOC_SPI (Prototype: SPI)
19 *****************************************
20 */
21
22#define mmPSOC_SPI_CTRLR0 0xC43000
23
24#define mmPSOC_SPI_CTRLR1 0xC43004
25
26#define mmPSOC_SPI_SSIENR 0xC43008
27
28#define mmPSOC_SPI_MWCR 0xC4300C
29
30#define mmPSOC_SPI_SER 0xC43010
31
32#define mmPSOC_SPI_BAUDR 0xC43014
33
34#define mmPSOC_SPI_TXFTLR 0xC43018
35
36#define mmPSOC_SPI_RXFTLR 0xC4301C
37
38#define mmPSOC_SPI_TXFLR 0xC43020
39
40#define mmPSOC_SPI_RXFLR 0xC43024
41
42#define mmPSOC_SPI_SR 0xC43028
43
44#define mmPSOC_SPI_IMR 0xC4302C
45
46#define mmPSOC_SPI_ISR 0xC43030
47
48#define mmPSOC_SPI_RISR 0xC43034
49
50#define mmPSOC_SPI_TXOICR 0xC43038
51
52#define mmPSOC_SPI_RXOICR 0xC4303C
53
54#define mmPSOC_SPI_RXUICR 0xC43040
55
56#define mmPSOC_SPI_MSTICR 0xC43044
57
58#define mmPSOC_SPI_ICR 0xC43048
59
60#define mmPSOC_SPI_IDR 0xC43058
61
62#define mmPSOC_SPI_SSI_VERSION_ID 0xC4305C
63
64#define mmPSOC_SPI_DR0 0xC43060
65
66#define mmPSOC_SPI_DR1 0xC43064
67
68#define mmPSOC_SPI_DR2 0xC43068
69
70#define mmPSOC_SPI_DR3 0xC4306C
71
72#define mmPSOC_SPI_DR4 0xC43070
73
74#define mmPSOC_SPI_DR5 0xC43074
75
76#define mmPSOC_SPI_DR6 0xC43078
77
78#define mmPSOC_SPI_DR7 0xC4307C
79
80#define mmPSOC_SPI_DR8 0xC43080
81
82#define mmPSOC_SPI_DR9 0xC43084
83
84#define mmPSOC_SPI_DR10 0xC43088
85
86#define mmPSOC_SPI_DR11 0xC4308C
87
88#define mmPSOC_SPI_DR12 0xC43090
89
90#define mmPSOC_SPI_DR13 0xC43094
91
92#define mmPSOC_SPI_DR14 0xC43098
93
94#define mmPSOC_SPI_DR15 0xC4309C
95
96#define mmPSOC_SPI_DR16 0xC430A0
97
98#define mmPSOC_SPI_DR17 0xC430A4
99
100#define mmPSOC_SPI_DR18 0xC430A8
101
102#define mmPSOC_SPI_DR19 0xC430AC
103
104#define mmPSOC_SPI_DR20 0xC430B0
105
106#define mmPSOC_SPI_DR21 0xC430B4
107
108#define mmPSOC_SPI_DR22 0xC430B8
109
110#define mmPSOC_SPI_DR23 0xC430BC
111
112#define mmPSOC_SPI_DR24 0xC430C0
113
114#define mmPSOC_SPI_DR25 0xC430C4
115
116#define mmPSOC_SPI_DR26 0xC430C8
117
118#define mmPSOC_SPI_DR27 0xC430CC
119
120#define mmPSOC_SPI_DR28 0xC430D0
121
122#define mmPSOC_SPI_DR29 0xC430D4
123
124#define mmPSOC_SPI_DR30 0xC430D8
125
126#define mmPSOC_SPI_DR31 0xC430DC
127
128#define mmPSOC_SPI_DR32 0xC430E0
129
130#define mmPSOC_SPI_DR33 0xC430E4
131
132#define mmPSOC_SPI_DR34 0xC430E8
133
134#define mmPSOC_SPI_DR35 0xC430EC
135
136#define mmPSOC_SPI_RX_SAMPLE_DLY 0xC430F0
137
138#define mmPSOC_SPI_RSVD_1 0xC430F8
139
140#define mmPSOC_SPI_RSVD_2 0xC430FC
141
142#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
143
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
new file mode 100644
index 000000000000..d56c9fa0e7ba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
@@ -0,0 +1,83 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_
14#define ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_
15
16/*
17 *****************************************
18 * SRAM_Y0_X0_RTR (Prototype: IC_RTR)
19 *****************************************
20 */
21
22#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_E_ARB 0x201100
23
24#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_W_ARB 0x201104
25
26#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB 0x201110
27
28#define mmSRAM_Y0_X0_RTR_HBW_E_ARB_MAX 0x201120
29
30#define mmSRAM_Y0_X0_RTR_HBW_W_ARB_MAX 0x201124
31
32#define mmSRAM_Y0_X0_RTR_HBW_L_ARB_MAX 0x201130
33
34#define mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB 0x201140
35
36#define mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB 0x201144
37
38#define mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB 0x201148
39
40#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB 0x201160
41
42#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB 0x201164
43
44#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_L_ARB 0x201168
45
46#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_E_ARB 0x201200
47
48#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_W_ARB 0x201204
49
50#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_L_ARB 0x201210
51
52#define mmSRAM_Y0_X0_RTR_LBW_E_ARB_MAX 0x201220
53
54#define mmSRAM_Y0_X0_RTR_LBW_W_ARB_MAX 0x201224
55
56#define mmSRAM_Y0_X0_RTR_LBW_L_ARB_MAX 0x201230
57
58#define mmSRAM_Y0_X0_RTR_LBW_DATA_E_ARB 0x201240
59
60#define mmSRAM_Y0_X0_RTR_LBW_DATA_W_ARB 0x201244
61
62#define mmSRAM_Y0_X0_RTR_LBW_DATA_L_ARB 0x201248
63
64#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_E_ARB 0x201260
65
66#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_W_ARB 0x201264
67
68#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_L_ARB 0x201268
69
70#define mmSRAM_Y0_X0_RTR_DBG_E_ARB 0x201300
71
72#define mmSRAM_Y0_X0_RTR_DBG_W_ARB 0x201304
73
74#define mmSRAM_Y0_X0_RTR_DBG_L_ARB 0x201310
75
76#define mmSRAM_Y0_X0_RTR_DBG_E_ARB_MAX 0x201320
77
78#define mmSRAM_Y0_X0_RTR_DBG_W_ARB_MAX 0x201324
79
80#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330
81
82#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
new file mode 100644
index 000000000000..5624544303ca
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
@@ -0,0 +1,83 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_
14#define ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_
15
16/*
17 *****************************************
18 * SRAM_Y0_X1_RTR (Prototype: IC_RTR)
19 *****************************************
20 */
21
22#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_E_ARB 0x205100
23
24#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_W_ARB 0x205104
25
26#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB 0x205110
27
28#define mmSRAM_Y0_X1_RTR_HBW_E_ARB_MAX 0x205120
29
30#define mmSRAM_Y0_X1_RTR_HBW_W_ARB_MAX 0x205124
31
32#define mmSRAM_Y0_X1_RTR_HBW_L_ARB_MAX 0x205130
33
34#define mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB 0x205140
35
36#define mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB 0x205144
37
38#define mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB 0x205148
39
40#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB 0x205160
41
42#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB 0x205164
43
44#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_L_ARB 0x205168
45
46#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_E_ARB 0x205200
47
48#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_W_ARB 0x205204
49
50#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_L_ARB 0x205210
51
52#define mmSRAM_Y0_X1_RTR_LBW_E_ARB_MAX 0x205220
53
54#define mmSRAM_Y0_X1_RTR_LBW_W_ARB_MAX 0x205224
55
56#define mmSRAM_Y0_X1_RTR_LBW_L_ARB_MAX 0x205230
57
58#define mmSRAM_Y0_X1_RTR_LBW_DATA_E_ARB 0x205240
59
60#define mmSRAM_Y0_X1_RTR_LBW_DATA_W_ARB 0x205244
61
62#define mmSRAM_Y0_X1_RTR_LBW_DATA_L_ARB 0x205248
63
64#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_E_ARB 0x205260
65
66#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_W_ARB 0x205264
67
68#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_L_ARB 0x205268
69
70#define mmSRAM_Y0_X1_RTR_DBG_E_ARB 0x205300
71
72#define mmSRAM_Y0_X1_RTR_DBG_W_ARB 0x205304
73
74#define mmSRAM_Y0_X1_RTR_DBG_L_ARB 0x205310
75
76#define mmSRAM_Y0_X1_RTR_DBG_E_ARB_MAX 0x205320
77
78#define mmSRAM_Y0_X1_RTR_DBG_W_ARB_MAX 0x205324
79
80#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330
81
82#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
new file mode 100644
index 000000000000..3322bc0bd1df
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
@@ -0,0 +1,83 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_
14#define ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_
15
16/*
17 *****************************************
18 * SRAM_Y0_X2_RTR (Prototype: IC_RTR)
19 *****************************************
20 */
21
22#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_E_ARB 0x209100
23
24#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_W_ARB 0x209104
25
26#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB 0x209110
27
28#define mmSRAM_Y0_X2_RTR_HBW_E_ARB_MAX 0x209120
29
30#define mmSRAM_Y0_X2_RTR_HBW_W_ARB_MAX 0x209124
31
32#define mmSRAM_Y0_X2_RTR_HBW_L_ARB_MAX 0x209130
33
34#define mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB 0x209140
35
36#define mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB 0x209144
37
38#define mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB 0x209148
39
40#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB 0x209160
41
42#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB 0x209164
43
44#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_L_ARB 0x209168
45
46#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_E_ARB 0x209200
47
48#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_W_ARB 0x209204
49
50#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_L_ARB 0x209210
51
52#define mmSRAM_Y0_X2_RTR_LBW_E_ARB_MAX 0x209220
53
54#define mmSRAM_Y0_X2_RTR_LBW_W_ARB_MAX 0x209224
55
56#define mmSRAM_Y0_X2_RTR_LBW_L_ARB_MAX 0x209230
57
58#define mmSRAM_Y0_X2_RTR_LBW_DATA_E_ARB 0x209240
59
60#define mmSRAM_Y0_X2_RTR_LBW_DATA_W_ARB 0x209244
61
62#define mmSRAM_Y0_X2_RTR_LBW_DATA_L_ARB 0x209248
63
64#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_E_ARB 0x209260
65
66#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_W_ARB 0x209264
67
68#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_L_ARB 0x209268
69
70#define mmSRAM_Y0_X2_RTR_DBG_E_ARB 0x209300
71
72#define mmSRAM_Y0_X2_RTR_DBG_W_ARB 0x209304
73
74#define mmSRAM_Y0_X2_RTR_DBG_L_ARB 0x209310
75
76#define mmSRAM_Y0_X2_RTR_DBG_E_ARB_MAX 0x209320
77
78#define mmSRAM_Y0_X2_RTR_DBG_W_ARB_MAX 0x209324
79
80#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330
81
82#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
new file mode 100644
index 000000000000..81e393db2027
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
@@ -0,0 +1,83 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_
14#define ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_
15
16/*
17 *****************************************
18 * SRAM_Y0_X3_RTR (Prototype: IC_RTR)
19 *****************************************
20 */
21
22#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_E_ARB 0x20D100
23
24#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_W_ARB 0x20D104
25
26#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB 0x20D110
27
28#define mmSRAM_Y0_X3_RTR_HBW_E_ARB_MAX 0x20D120
29
30#define mmSRAM_Y0_X3_RTR_HBW_W_ARB_MAX 0x20D124
31
32#define mmSRAM_Y0_X3_RTR_HBW_L_ARB_MAX 0x20D130
33
34#define mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB 0x20D140
35
36#define mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB 0x20D144
37
38#define mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB 0x20D148
39
40#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB 0x20D160
41
42#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB 0x20D164
43
44#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_L_ARB 0x20D168
45
46#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_E_ARB 0x20D200
47
48#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_W_ARB 0x20D204
49
50#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_L_ARB 0x20D210
51
52#define mmSRAM_Y0_X3_RTR_LBW_E_ARB_MAX 0x20D220
53
54#define mmSRAM_Y0_X3_RTR_LBW_W_ARB_MAX 0x20D224
55
56#define mmSRAM_Y0_X3_RTR_LBW_L_ARB_MAX 0x20D230
57
58#define mmSRAM_Y0_X3_RTR_LBW_DATA_E_ARB 0x20D240
59
60#define mmSRAM_Y0_X3_RTR_LBW_DATA_W_ARB 0x20D244
61
62#define mmSRAM_Y0_X3_RTR_LBW_DATA_L_ARB 0x20D248
63
64#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_E_ARB 0x20D260
65
66#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_W_ARB 0x20D264
67
68#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_L_ARB 0x20D268
69
70#define mmSRAM_Y0_X3_RTR_DBG_E_ARB 0x20D300
71
72#define mmSRAM_Y0_X3_RTR_DBG_W_ARB 0x20D304
73
74#define mmSRAM_Y0_X3_RTR_DBG_L_ARB 0x20D310
75
76#define mmSRAM_Y0_X3_RTR_DBG_E_ARB_MAX 0x20D320
77
78#define mmSRAM_Y0_X3_RTR_DBG_W_ARB_MAX 0x20D324
79
80#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330
81
82#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
new file mode 100644
index 000000000000..b2e11b1de385
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
@@ -0,0 +1,83 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_
14#define ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_
15
16/*
17 *****************************************
18 * SRAM_Y0_X4_RTR (Prototype: IC_RTR)
19 *****************************************
20 */
21
22#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_E_ARB 0x211100
23
24#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_W_ARB 0x211104
25
26#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB 0x211110
27
28#define mmSRAM_Y0_X4_RTR_HBW_E_ARB_MAX 0x211120
29
30#define mmSRAM_Y0_X4_RTR_HBW_W_ARB_MAX 0x211124
31
32#define mmSRAM_Y0_X4_RTR_HBW_L_ARB_MAX 0x211130
33
34#define mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB 0x211140
35
36#define mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB 0x211144
37
38#define mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB 0x211148
39
40#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB 0x211160
41
42#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB 0x211164
43
44#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_L_ARB 0x211168
45
46#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_E_ARB 0x211200
47
48#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_W_ARB 0x211204
49
50#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_L_ARB 0x211210
51
52#define mmSRAM_Y0_X4_RTR_LBW_E_ARB_MAX 0x211220
53
54#define mmSRAM_Y0_X4_RTR_LBW_W_ARB_MAX 0x211224
55
56#define mmSRAM_Y0_X4_RTR_LBW_L_ARB_MAX 0x211230
57
58#define mmSRAM_Y0_X4_RTR_LBW_DATA_E_ARB 0x211240
59
60#define mmSRAM_Y0_X4_RTR_LBW_DATA_W_ARB 0x211244
61
62#define mmSRAM_Y0_X4_RTR_LBW_DATA_L_ARB 0x211248
63
64#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_E_ARB 0x211260
65
66#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_W_ARB 0x211264
67
68#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_L_ARB 0x211268
69
70#define mmSRAM_Y0_X4_RTR_DBG_E_ARB 0x211300
71
72#define mmSRAM_Y0_X4_RTR_DBG_W_ARB 0x211304
73
74#define mmSRAM_Y0_X4_RTR_DBG_L_ARB 0x211310
75
76#define mmSRAM_Y0_X4_RTR_DBG_E_ARB_MAX 0x211320
77
78#define mmSRAM_Y0_X4_RTR_DBG_W_ARB_MAX 0x211324
79
80#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330
81
82#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
new file mode 100644
index 000000000000..b4ea8cae2757
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
@@ -0,0 +1,117 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_STLB_MASKS_H_
14#define ASIC_REG_STLB_MASKS_H_
15
16/*
17 *****************************************
18 * STLB (Prototype: STLB)
19 *****************************************
20 */
21
22/* STLB_CACHE_INV */
23#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
24#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
25#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8
26#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
27
28/* STLB_CACHE_INV_BASE_39_8 */
29#define STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0
30#define STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF
31
32/* STLB_CACHE_INV_BASE_49_40 */
33#define STLB_CACHE_INV_BASE_49_40_PA_SHIFT 0
34#define STLB_CACHE_INV_BASE_49_40_PA_MASK 0x3FF
35
36/* STLB_STLB_FEATURE_EN */
37#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0
38#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1
39#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1
40#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2
41#define STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2
42#define STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4
43#define STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3
44#define STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8
45#define STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4
46#define STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10
47#define STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5
48#define STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20
49#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6
50#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40
51#define STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7
52#define STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0xF80
53
54/* STLB_STLB_AXI_CACHE */
55#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0
56#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF
57#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4
58#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0
59#define STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8
60#define STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00
61
62/* STLB_HOP_CONFIGURATION */
63#define STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0
64#define STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7
65#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SHIFT 4
66#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_MASK 0x70
67#define STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 8
68#define STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x700
69
70/* STLB_LINK_LIST_LOOKUP_MASK_49_32 */
71#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_SHIFT 0
72#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_MASK 0x3FFFF
73
74/* STLB_LINK_LIST_LOOKUP_MASK_31_0 */
75#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0
76#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF
77
78/* STLB_LINK_LIST */
79#define STLB_LINK_LIST_CLEAR_SHIFT 0
80#define STLB_LINK_LIST_CLEAR_MASK 0x1
81#define STLB_LINK_LIST_EN_SHIFT 1
82#define STLB_LINK_LIST_EN_MASK 0x2
83
84/* STLB_INV_ALL_START */
85#define STLB_INV_ALL_START_R_SHIFT 0
86#define STLB_INV_ALL_START_R_MASK 0x1
87
88/* STLB_INV_ALL_SET */
89#define STLB_INV_ALL_SET_R_SHIFT 0
90#define STLB_INV_ALL_SET_R_MASK 0xFF
91
92/* STLB_INV_PS */
93#define STLB_INV_PS_R_SHIFT 0
94#define STLB_INV_PS_R_MASK 0x3
95
96/* STLB_INV_CONSUMER_INDEX */
97#define STLB_INV_CONSUMER_INDEX_R_SHIFT 0
98#define STLB_INV_CONSUMER_INDEX_R_MASK 0xFF
99
100/* STLB_INV_HIT_COUNT */
101#define STLB_INV_HIT_COUNT_R_SHIFT 0
102#define STLB_INV_HIT_COUNT_R_MASK 0x7FF
103
104/* STLB_INV_SET */
105#define STLB_INV_SET_R_SHIFT 0
106#define STLB_INV_SET_R_MASK 0xFF
107
108/* STLB_SRAM_INIT */
109#define STLB_SRAM_INIT_BUSY_TAG_SHIFT 0
110#define STLB_SRAM_INIT_BUSY_TAG_MASK 0x3
111#define STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2
112#define STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC
113#define STLB_SRAM_INIT_BUSY_DATA_SHIFT 4
114#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
115
116#endif /* ASIC_REG_STLB_MASKS_H_ */
117
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
new file mode 100644
index 000000000000..0f5281d3e65b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
@@ -0,0 +1,55 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_STLB_REGS_H_
14#define ASIC_REG_STLB_REGS_H_
15
16/*
17 *****************************************
18 * STLB (Prototype: STLB)
19 *****************************************
20 */
21
22#define mmSTLB_CACHE_INV 0x490010
23
24#define mmSTLB_CACHE_INV_BASE_39_8 0x490014
25
26#define mmSTLB_CACHE_INV_BASE_49_40 0x490018
27
28#define mmSTLB_STLB_FEATURE_EN 0x49001C
29
30#define mmSTLB_STLB_AXI_CACHE 0x490020
31
32#define mmSTLB_HOP_CONFIGURATION 0x490024
33
34#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0x490028
35
36#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0x49002C
37
38#define mmSTLB_LINK_LIST 0x490030
39
40#define mmSTLB_INV_ALL_START 0x490034
41
42#define mmSTLB_INV_ALL_SET 0x490038
43
44#define mmSTLB_INV_PS 0x49003C
45
46#define mmSTLB_INV_CONSUMER_INDEX 0x490040
47
48#define mmSTLB_INV_HIT_COUNT 0x490044
49
50#define mmSTLB_INV_SET 0x490048
51
52#define mmSTLB_SRAM_INIT 0x49004C
53
54#endif /* ASIC_REG_STLB_REGS_H_ */
55
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
new file mode 100644
index 000000000000..e5587b49eecd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
@@ -0,0 +1,1607 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_CFG_MASKS_H_
14#define ASIC_REG_TPC0_CFG_MASKS_H_
15
16/*
17 *****************************************
18 * TPC0_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */
23#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
24#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
25
26/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */
27#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
28#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
29
30/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */
31#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0
32#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
33
34/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */
35#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
36#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
37#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
38#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
39#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
40#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
41
42/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */
43#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
44#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
45
46/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */
47#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
48#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
49
50/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET */
51#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0
52#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
53
54/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */
55#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
56#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
57
58/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */
59#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
60#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
61
62/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET */
63#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0
64#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
65
66/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */
67#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
68#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
69
70/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */
71#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
72#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
73
74/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET */
75#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0
76#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
77
78/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */
79#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
80#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
81
82/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */
83#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
84#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
85
86/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET */
87#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0
88#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
89
90/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */
91#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
92#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
93
94/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */
95#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
96#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
97
98/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET */
99#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0
100#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
101
102/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */
103#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
104#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
105
106/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */
107#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
108#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
109
110/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */
111#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0
112#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
113
114/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */
115#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
116#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
117#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
118#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
119#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
120#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
121
122/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */
123#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
124#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
125
126/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */
127#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
128#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
129
130/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET */
131#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0
132#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
133
134/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */
135#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
136#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
137
138/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */
139#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
140#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
141
142/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET */
143#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0
144#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
145
146/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */
147#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
148#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
149
150/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */
151#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
152#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
153
154/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET */
155#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0
156#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
157
158/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */
159#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
160#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
161
162/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */
163#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
164#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
165
166/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET */
167#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0
168#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
169
170/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */
171#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
172#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
173
174/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */
175#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
176#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
177
178/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET */
179#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0
180#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
181
182/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */
183#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
184#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
185
186/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */
187#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
188#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
189
190/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */
191#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0
192#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
193
194/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */
195#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
196#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
197#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
198#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
199#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
200#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
201
202/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */
203#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
204#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
205
206/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */
207#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
208#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
209
210/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET */
211#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0
212#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
213
214/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */
215#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
216#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
217
218/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */
219#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
220#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
221
222/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET */
223#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0
224#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
225
226/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */
227#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
228#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
229
230/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */
231#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
232#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
233
234/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET */
235#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0
236#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
237
238/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */
239#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
240#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
241
242/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */
243#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
244#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
245
246/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET */
247#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0
248#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
249
250/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */
251#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
252#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
253
254/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */
255#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
256#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
257
258/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET */
259#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0
260#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
261
262/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */
263#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
264#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
265
266/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */
267#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
268#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
269
270/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */
271#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0
272#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
273
274/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */
275#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
276#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
277#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
278#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
279#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
280#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
281
282/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */
283#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
284#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
285
286/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */
287#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
288#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
289
290/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET */
291#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0
292#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
293
294/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */
295#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
296#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
297
298/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */
299#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
300#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
301
302/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET */
303#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0
304#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
305
306/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */
307#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
308#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
309
310/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */
311#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
312#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
313
314/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET */
315#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0
316#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
317
318/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */
319#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
320#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
321
322/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */
323#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
324#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
325
326/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET */
327#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0
328#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
329
330/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */
331#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
332#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
333
334/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */
335#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
336#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
337
338/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET */
339#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0
340#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
341
342/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */
343#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
344#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
345
346/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */
347#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
348#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
349
350/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */
351#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0
352#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
353
354/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */
355#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
356#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
357#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
358#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
359#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
360#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
361
362/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */
363#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
364#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
365
366/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */
367#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
368#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
369
370/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET */
371#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0
372#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
373
374/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */
375#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
376#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
377
378/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */
379#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
380#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
381
382/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET */
383#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0
384#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
385
386/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */
387#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
388#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
389
390/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */
391#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
392#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
393
394/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET */
395#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0
396#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
397
398/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */
399#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
400#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
401
402/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */
403#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
404#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
405
406/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET */
407#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0
408#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
409
410/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */
411#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
412#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
413
414/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */
415#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
416#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
417
418/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET */
419#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0
420#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
421
422/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */
423#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
424#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
425
426/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */
427#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
428#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
429
430/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */
431#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0
432#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
433
434/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */
435#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
436#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
437#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
438#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
439#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
440#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
441
442/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */
443#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
444#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
445
446/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */
447#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
448#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
449
450/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET */
451#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0
452#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
453
454/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */
455#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
456#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
457
458/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */
459#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
460#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
461
462/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET */
463#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0
464#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
465
466/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */
467#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
468#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
469
470/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */
471#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
472#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
473
474/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET */
475#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0
476#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
477
478/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */
479#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
480#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
481
482/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */
483#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
484#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
485
486/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET */
487#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0
488#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
489
490/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */
491#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
492#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
493
494/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */
495#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
496#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
497
498/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET */
499#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0
500#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
501
502/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */
503#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
504#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
505
506/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */
507#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
508#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
509
510/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */
511#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0
512#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
513
514/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */
515#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
516#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
517#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
518#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
519#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
520#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
521
522/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */
523#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
524#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
525
526/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */
527#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
528#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
529
530/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET */
531#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0
532#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
533
534/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */
535#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
536#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
537
538/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */
539#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
540#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
541
542/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET */
543#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0
544#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
545
546/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */
547#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
548#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
549
550/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */
551#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
552#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
553
554/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET */
555#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0
556#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
557
558/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */
559#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
560#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
561
562/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */
563#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
564#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
565
566/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET */
567#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0
568#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
569
570/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */
571#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
572#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
573
574/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */
575#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
576#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
577
578/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET */
579#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0
580#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
581
582/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */
583#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
584#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
585
586/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */
587#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
588#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
589
590/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */
591#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0
592#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
593
594/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */
595#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
596#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
597#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
598#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
599#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
600#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
601
602/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */
603#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
604#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
605
606/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */
607#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
608#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
609
610/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET */
611#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0
612#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
613
614/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */
615#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
616#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
617
618/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */
619#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
620#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
621
622/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET */
623#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0
624#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
625
626/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */
627#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
628#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
629
630/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */
631#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
632#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
633
634/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET */
635#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0
636#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
637
638/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */
639#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
640#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
641
642/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */
643#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
644#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
645
646/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET */
647#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0
648#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
649
650/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */
651#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
652#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
653
654/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */
655#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
656#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
657
658/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET */
659#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0
660#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
661
662/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */
663#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
664#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
665
666/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */
667#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
668#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
669
670/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */
671#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0
672#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
673
674/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */
675#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0
676#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
677
678/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */
679#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0
680#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
681
682/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */
683#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0
684#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
685
686/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */
687#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0
688#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
689
690/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */
691#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0
692#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
693
694/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */
695#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0
696#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
697
698/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */
699#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0
700#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
701
702/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */
703#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0
704#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
705
706/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */
707#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0
708#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
709
710/* TPC0_CFG_KERNEL_SRF */
711#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0
712#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF
713
714/* TPC0_CFG_KERNEL_KERNEL_CONFIG */
715#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
716#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
717#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
718#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
719#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8
720#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00
721
722/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */
723#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
724#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
725#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
726#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
727#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
728#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
729
730/* TPC0_CFG_RESERVED_DESC_END */
731#define TPC0_CFG_RESERVED_DESC_END_V_SHIFT 0
732#define TPC0_CFG_RESERVED_DESC_END_V_MASK 0xFFFFFFFF
733
734/* TPC0_CFG_ROUND_CSR */
735#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0
736#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7
737
738/* TPC0_CFG_TBUF_BASE_ADDR_LOW */
739#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_SHIFT 0
740#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
741
742/* TPC0_CFG_TBUF_BASE_ADDR_HIGH */
743#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_SHIFT 0
744#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
745
746/* TPC0_CFG_SEMAPHORE */
747#define TPC0_CFG_SEMAPHORE_V_SHIFT 0
748#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF
749
750/* TPC0_CFG_VFLAGS */
751#define TPC0_CFG_VFLAGS_V_SHIFT 0
752#define TPC0_CFG_VFLAGS_V_MASK 0xF
753
754/* TPC0_CFG_SFLAGS */
755#define TPC0_CFG_SFLAGS_V_SHIFT 0
756#define TPC0_CFG_SFLAGS_V_MASK 0xF
757
758/* TPC0_CFG_LFSR_POLYNOM */
759#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0
760#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
761
762/* TPC0_CFG_STATUS */
763#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1
764#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2
765#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2
766#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4
767#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3
768#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8
769#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_SHIFT 4
770#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK 0x10
771
772/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */
773#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0
774#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
775
776/* TPC0_CFG_CFG_SUBTRACT_VALUE */
777#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0
778#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF
779
780/* TPC0_CFG_SM_BASE_ADDRESS_LOW */
781#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_SHIFT 0
782#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
783
784/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */
785#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
786#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
787
788/* TPC0_CFG_TPC_CMD */
789#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0
790#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1
791#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1
792#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2
793#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2
794#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4
795#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3
796#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8
797#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4
798#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10
799#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5
800#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20
801#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6
802#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40
803
804/* TPC0_CFG_TPC_EXECUTE */
805#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0
806#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1
807
808/* TPC0_CFG_TPC_STALL */
809#define TPC0_CFG_TPC_STALL_V_SHIFT 0
810#define TPC0_CFG_TPC_STALL_V_MASK 0x1
811
812/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */
813#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0
814#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF
815
816/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */
817#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0
818#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF
819
820/* TPC0_CFG_MSS_CONFIG */
821#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0
822#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF
823#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4
824#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0
825#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8
826#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300
827#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10
828#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400
829
830/* TPC0_CFG_TPC_INTR_CAUSE */
831#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0
832#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFFFFF
833
834/* TPC0_CFG_TPC_INTR_MASK */
835#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0
836#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFFFFF
837
838/* TPC0_CFG_TSB_CONFIG */
839#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_SHIFT 0
840#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_MASK 0x1F
841#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_SHIFT 5
842#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_MASK 0x3E0
843#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_SHIFT 10
844#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_MASK 0xFFC00
845#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_SHIFT 20
846#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_MASK 0x3FF00000
847
848/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */
849#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
850#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
851
852/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */
853#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
854#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
855
856/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */
857#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0
858#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
859
860/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */
861#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
862#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
863#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
864#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
865#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
866#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
867
868/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */
869#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
870#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
871
872/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */
873#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
874#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
875
876/* TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET */
877#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0
878#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
879
880/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */
881#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
882#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
883
884/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */
885#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
886#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
887
888/* TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET */
889#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0
890#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
891
892/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */
893#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
894#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
895
896/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */
897#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
898#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
899
900/* TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET */
901#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0
902#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
903
904/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */
905#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
906#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
907
908/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */
909#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
910#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
911
912/* TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET */
913#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0
914#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
915
916/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */
917#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
918#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
919
920/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */
921#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
922#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
923
924/* TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET */
925#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0
926#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
927
928/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */
929#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
930#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
931
932/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */
933#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
934#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
935
936/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */
937#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0
938#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
939
940/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */
941#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
942#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
943#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
944#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
945#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
946#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
947
948/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */
949#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
950#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
951
952/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */
953#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
954#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
955
956/* TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET */
957#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0
958#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
959
960/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */
961#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
962#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
963
964/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */
965#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
966#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
967
968/* TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET */
969#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0
970#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
971
972/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */
973#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
974#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
975
976/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */
977#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
978#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
979
980/* TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET */
981#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0
982#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
983
984/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */
985#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
986#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
987
988/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */
989#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
990#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
991
992/* TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET */
993#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0
994#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
995
996/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */
997#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
998#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
999
1000/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */
1001#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
1002#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1003
1004/* TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET */
1005#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0
1006#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1007
1008/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */
1009#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
1010#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1011
1012/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */
1013#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
1014#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1015
1016/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */
1017#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0
1018#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
1019
1020/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */
1021#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
1022#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
1023#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
1024#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
1025#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
1026#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
1027
1028/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */
1029#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
1030#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
1031
1032/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */
1033#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
1034#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
1035
1036/* TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET */
1037#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0
1038#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
1039
1040/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */
1041#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
1042#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
1043
1044/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */
1045#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
1046#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
1047
1048/* TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET */
1049#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0
1050#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
1051
1052/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */
1053#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
1054#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
1055
1056/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */
1057#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
1058#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
1059
1060/* TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET */
1061#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0
1062#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
1063
1064/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */
1065#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
1066#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
1067
1068/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */
1069#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
1070#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
1071
1072/* TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET */
1073#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0
1074#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
1075
1076/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */
1077#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
1078#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
1079
1080/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */
1081#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
1082#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1083
1084/* TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET */
1085#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0
1086#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1087
1088/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */
1089#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
1090#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1091
1092/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */
1093#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
1094#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1095
1096/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */
1097#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0
1098#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
1099
1100/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */
1101#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
1102#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
1103#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
1104#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
1105#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
1106#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
1107
1108/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */
1109#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
1110#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
1111
1112/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */
1113#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
1114#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
1115
1116/* TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET */
1117#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0
1118#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
1119
1120/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */
1121#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
1122#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
1123
1124/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */
1125#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
1126#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
1127
1128/* TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET */
1129#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0
1130#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
1131
1132/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */
1133#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
1134#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
1135
1136/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */
1137#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
1138#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
1139
1140/* TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET */
1141#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0
1142#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
1143
1144/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */
1145#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
1146#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
1147
1148/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */
1149#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
1150#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
1151
1152/* TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET */
1153#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0
1154#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
1155
1156/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */
1157#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
1158#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
1159
1160/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */
1161#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
1162#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1163
1164/* TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET */
1165#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0
1166#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1167
1168/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */
1169#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
1170#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1171
1172/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */
1173#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
1174#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1175
1176/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */
1177#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0
1178#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
1179
1180/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */
1181#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
1182#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
1183#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
1184#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
1185#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
1186#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
1187
1188/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */
1189#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
1190#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
1191
1192/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */
1193#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
1194#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
1195
1196/* TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET */
1197#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0
1198#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
1199
1200/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */
1201#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
1202#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
1203
1204/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */
1205#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
1206#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
1207
1208/* TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET */
1209#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0
1210#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
1211
1212/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */
1213#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
1214#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
1215
1216/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */
1217#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
1218#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
1219
1220/* TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET */
1221#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0
1222#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
1223
1224/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */
1225#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
1226#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
1227
1228/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */
1229#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
1230#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
1231
1232/* TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET */
1233#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0
1234#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
1235
1236/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */
1237#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
1238#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
1239
1240/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */
1241#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
1242#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1243
1244/* TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET */
1245#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0
1246#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1247
1248/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */
1249#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
1250#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1251
1252/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */
1253#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
1254#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1255
1256/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */
1257#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0
1258#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
1259
1260/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */
1261#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
1262#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
1263#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
1264#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
1265#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
1266#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
1267
1268/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */
1269#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
1270#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
1271
1272/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */
1273#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
1274#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
1275
1276/* TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET */
1277#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0
1278#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
1279
1280/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */
1281#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
1282#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
1283
1284/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */
1285#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
1286#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
1287
1288/* TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET */
1289#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0
1290#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
1291
1292/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */
1293#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
1294#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
1295
1296/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */
1297#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
1298#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
1299
1300/* TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET */
1301#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0
1302#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
1303
1304/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */
1305#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
1306#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
1307
1308/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */
1309#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
1310#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
1311
1312/* TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET */
1313#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0
1314#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
1315
1316/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */
1317#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
1318#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
1319
1320/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */
1321#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
1322#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1323
1324/* TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET */
1325#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0
1326#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1327
1328/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */
1329#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
1330#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1331
1332/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */
1333#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
1334#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1335
1336/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */
1337#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0
1338#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
1339
1340/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */
1341#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
1342#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
1343#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
1344#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
1345#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
1346#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
1347
1348/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */
1349#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
1350#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
1351
1352/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */
1353#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
1354#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
1355
1356/* TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET */
1357#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0
1358#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
1359
1360/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */
1361#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
1362#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
1363
1364/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */
1365#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
1366#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
1367
1368/* TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET */
1369#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0
1370#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
1371
1372/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */
1373#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
1374#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
1375
1376/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */
1377#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
1378#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
1379
1380/* TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET */
1381#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0
1382#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
1383
1384/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */
1385#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
1386#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
1387
1388/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */
1389#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
1390#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
1391
1392/* TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET */
1393#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0
1394#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
1395
1396/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */
1397#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
1398#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
1399
1400/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */
1401#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
1402#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1403
1404/* TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET */
1405#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0
1406#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1407
1408/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */
1409#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
1410#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
1411
1412/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */
1413#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
1414#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
1415
1416/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */
1417#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0
1418#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
1419
1420/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */
1421#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
1422#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
1423#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
1424#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
1425#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
1426#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
1427
1428/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */
1429#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
1430#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
1431
1432/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */
1433#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
1434#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
1435
1436/* TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET */
1437#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0
1438#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
1439
1440/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */
1441#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
1442#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
1443
1444/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */
1445#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
1446#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
1447
1448/* TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET */
1449#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0
1450#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
1451
1452/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */
1453#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
1454#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
1455
1456/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */
1457#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
1458#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
1459
1460/* TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET */
1461#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0
1462#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
1463
1464/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */
1465#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
1466#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
1467
1468/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */
1469#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
1470#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
1471
1472/* TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET */
1473#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0
1474#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
1475
1476/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */
1477#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
1478#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
1479
1480/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */
1481#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
1482#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
1483
1484/* TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET */
1485#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0
1486#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
1487
1488/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */
1489#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
1490#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
1491
1492/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */
1493#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
1494#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
1495
1496/* TPC0_CFG_QM_TID_BASE_DIM_0 */
1497#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0
1498#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
1499
1500/* TPC0_CFG_QM_TID_SIZE_DIM_0 */
1501#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0
1502#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
1503
1504/* TPC0_CFG_QM_TID_BASE_DIM_1 */
1505#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0
1506#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
1507
1508/* TPC0_CFG_QM_TID_SIZE_DIM_1 */
1509#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0
1510#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
1511
1512/* TPC0_CFG_QM_TID_BASE_DIM_2 */
1513#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0
1514#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
1515
1516/* TPC0_CFG_QM_TID_SIZE_DIM_2 */
1517#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0
1518#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
1519
1520/* TPC0_CFG_QM_TID_BASE_DIM_3 */
1521#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0
1522#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
1523
1524/* TPC0_CFG_QM_TID_SIZE_DIM_3 */
1525#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0
1526#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
1527
1528/* TPC0_CFG_QM_TID_BASE_DIM_4 */
1529#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0
1530#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
1531
1532/* TPC0_CFG_QM_TID_SIZE_DIM_4 */
1533#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0
1534#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
1535
1536/* TPC0_CFG_QM_SRF */
1537#define TPC0_CFG_QM_SRF_V_SHIFT 0
1538#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF
1539
1540/* TPC0_CFG_QM_KERNEL_CONFIG */
1541#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
1542#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
1543#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
1544#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
1545#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8
1546#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00
1547
1548/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */
1549#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
1550#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
1551#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
1552#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
1553#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
1554#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
1555
1556/* TPC0_CFG_ARUSER */
1557#define TPC0_CFG_ARUSER_ASID_SHIFT 0
1558#define TPC0_CFG_ARUSER_ASID_MASK 0x3FF
1559#define TPC0_CFG_ARUSER_MMBP_SHIFT 10
1560#define TPC0_CFG_ARUSER_MMBP_MASK 0x400
1561#define TPC0_CFG_ARUSER_V_SHIFT 11
1562#define TPC0_CFG_ARUSER_V_MASK 0xFFFFF800
1563
1564/* TPC0_CFG_AWUSER */
1565#define TPC0_CFG_AWUSER_ASID_SHIFT 0
1566#define TPC0_CFG_AWUSER_ASID_MASK 0x3FF
1567#define TPC0_CFG_AWUSER_MMBP_SHIFT 10
1568#define TPC0_CFG_AWUSER_MMBP_MASK 0x400
1569#define TPC0_CFG_AWUSER_V_SHIFT 11
1570#define TPC0_CFG_AWUSER_V_MASK 0xFFFFF800
1571
1572/* TPC0_CFG_FUNC_MBIST_CNTRL */
1573#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0
1574#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1
1575#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1
1576#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2
1577#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2
1578#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4
1579#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16
1580#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000
1581
1582/* TPC0_CFG_FUNC_MBIST_PAT */
1583#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0
1584#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3
1585#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2
1586#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC
1587#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4
1588#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30
1589#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6
1590#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0
1591#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8
1592#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300
1593#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10
1594#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00
1595
1596/* TPC0_CFG_FUNC_MBIST_MEM */
1597#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0
1598#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF
1599#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12
1600#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000
1601#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16
1602#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000
1603#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28
1604#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
1605
1606#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
1607
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
new file mode 100644
index 000000000000..2be28a63c50a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_CFG_REGS_H_
14#define ASIC_REG_TPC0_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC0_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400
23
24#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404
25
26#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408
27
28#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C
29
30#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410
31
32#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414
33
34#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE06418
35
36#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE0641C
37
38#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE06420
39
40#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE06424
41
42#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06428
43
44#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE0642C
45
46#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE06430
47
48#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06434
49
50#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE06438
51
52#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE0643C
53
54#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06440
55
56#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06444
57
58#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE06448
59
60#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE0644C
61
62#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE06450
63
64#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06454
65
66#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06458
67
68#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE0645C
69
70#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE06460
71
72#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE06464
73
74#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06468
75
76#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE0646C
77
78#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE06470
79
80#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06474
81
82#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE06478
83
84#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE0647C
85
86#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06480
87
88#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06484
89
90#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE06488
91
92#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE0648C
93
94#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE06490
95
96#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE06494
97
98#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06498
99
100#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE0649C
101
102#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE064A0
103
104#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE064A4
105
106#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE064A8
107
108#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE064AC
109
110#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE064B0
111
112#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE064B4
113
114#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE064B8
115
116#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE064BC
117
118#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE064C0
119
120#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE064C4
121
122#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE064C8
123
124#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE064CC
125
126#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE064D0
127
128#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE064D4
129
130#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064D8
131
132#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064DC
133
134#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE064E0
135
136#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064E4
137
138#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064E8
139
140#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064EC
141
142#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064F0
143
144#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064F4
145
146#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064F8
147
148#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE064FC
149
150#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE06500
151
152#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE06504
153
154#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE06508
155
156#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE0650C
157
158#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE06510
159
160#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE06514
161
162#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE06518
163
164#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE0651C
165
166#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE06520
167
168#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE06524
169
170#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE06528
171
172#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE0652C
173
174#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE06530
175
176#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE06534
177
178#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE06538
179
180#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE0653C
181
182#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE06540
183
184#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE06544
185
186#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE06548
187
188#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE0654C
189
190#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE06550
191
192#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE06554
193
194#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06558
195
196#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE0655C
197
198#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE06560
199
200#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06564
201
202#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE06568
203
204#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE0656C
205
206#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06570
207
208#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06574
209
210#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE06578
211
212#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE0657C
213
214#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE06580
215
216#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06584
217
218#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06588
219
220#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE0658C
221
222#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE06590
223
224#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE06594
225
226#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06598
227
228#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE0659C
229
230#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE065A0
231
232#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE065A4
233
234#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE065A8
235
236#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE065AC
237
238#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE065B0
239
240#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE065B4
241
242#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE065B8
243
244#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE065BC
245
246#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE065C0
247
248#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE065C4
249
250#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE065C8
251
252#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE065CC
253
254#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE065D0
255
256#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE065D4
257
258#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE065D8
259
260#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE065DC
261
262#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE065E0
263
264#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE065E4
265
266#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE065E8
267
268#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE065EC
269
270#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE065F0
271
272#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE065F4
273
274#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE065F8
275
276#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE065FC
277
278#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE06600
279
280#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE06604
281
282#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06608
283
284#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE0660C
285
286#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE06610
287
288#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06614
289
290#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE06618
291
292#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE0661C
293
294#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06620
295
296#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06624
297
298#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE06628
299
300#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE0662C
301
302#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE06630
303
304#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE06634
305
306#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE06638
307
308#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE0663C
309
310#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE06640
311
312#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE06644
313
314#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE06648
315
316#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE0664C
317
318#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE06650
319
320#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE06654
321
322#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE06658
323
324#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE0665C
325
326#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06660
327
328#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE06664
329
330#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06668
331
332#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE0666C
333
334#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06670
335
336#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE06674
337
338#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE06678
339
340#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE0667C
341
342#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE06680
343
344#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE06684
345
346#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE06688
347
348#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE0668C
349
350#define mmTPC0_CFG_KERNEL_SRF_0 0xE06690
351
352#define mmTPC0_CFG_KERNEL_SRF_1 0xE06694
353
354#define mmTPC0_CFG_KERNEL_SRF_2 0xE06698
355
356#define mmTPC0_CFG_KERNEL_SRF_3 0xE0669C
357
358#define mmTPC0_CFG_KERNEL_SRF_4 0xE066A0
359
360#define mmTPC0_CFG_KERNEL_SRF_5 0xE066A4
361
362#define mmTPC0_CFG_KERNEL_SRF_6 0xE066A8
363
364#define mmTPC0_CFG_KERNEL_SRF_7 0xE066AC
365
366#define mmTPC0_CFG_KERNEL_SRF_8 0xE066B0
367
368#define mmTPC0_CFG_KERNEL_SRF_9 0xE066B4
369
370#define mmTPC0_CFG_KERNEL_SRF_10 0xE066B8
371
372#define mmTPC0_CFG_KERNEL_SRF_11 0xE066BC
373
374#define mmTPC0_CFG_KERNEL_SRF_12 0xE066C0
375
376#define mmTPC0_CFG_KERNEL_SRF_13 0xE066C4
377
378#define mmTPC0_CFG_KERNEL_SRF_14 0xE066C8
379
380#define mmTPC0_CFG_KERNEL_SRF_15 0xE066CC
381
382#define mmTPC0_CFG_KERNEL_SRF_16 0xE066D0
383
384#define mmTPC0_CFG_KERNEL_SRF_17 0xE066D4
385
386#define mmTPC0_CFG_KERNEL_SRF_18 0xE066D8
387
388#define mmTPC0_CFG_KERNEL_SRF_19 0xE066DC
389
390#define mmTPC0_CFG_KERNEL_SRF_20 0xE066E0
391
392#define mmTPC0_CFG_KERNEL_SRF_21 0xE066E4
393
394#define mmTPC0_CFG_KERNEL_SRF_22 0xE066E8
395
396#define mmTPC0_CFG_KERNEL_SRF_23 0xE066EC
397
398#define mmTPC0_CFG_KERNEL_SRF_24 0xE066F0
399
400#define mmTPC0_CFG_KERNEL_SRF_25 0xE066F4
401
402#define mmTPC0_CFG_KERNEL_SRF_26 0xE066F8
403
404#define mmTPC0_CFG_KERNEL_SRF_27 0xE066FC
405
406#define mmTPC0_CFG_KERNEL_SRF_28 0xE06700
407
408#define mmTPC0_CFG_KERNEL_SRF_29 0xE06704
409
410#define mmTPC0_CFG_KERNEL_SRF_30 0xE06708
411
412#define mmTPC0_CFG_KERNEL_SRF_31 0xE0670C
413
414#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE06710
415
416#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06714
417
418#define mmTPC0_CFG_RESERVED_DESC_END 0xE06738
419
420#define mmTPC0_CFG_ROUND_CSR 0xE067FC
421
422#define mmTPC0_CFG_TBUF_BASE_ADDR_LOW 0xE06800
423
424#define mmTPC0_CFG_TBUF_BASE_ADDR_HIGH 0xE06804
425
426#define mmTPC0_CFG_SEMAPHORE 0xE06808
427
428#define mmTPC0_CFG_VFLAGS 0xE0680C
429
430#define mmTPC0_CFG_SFLAGS 0xE06810
431
432#define mmTPC0_CFG_LFSR_POLYNOM 0xE06818
433
434#define mmTPC0_CFG_STATUS 0xE0681C
435
436#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06820
437
438#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06824
439
440#define mmTPC0_CFG_SM_BASE_ADDRESS_LOW 0xE06828
441
442#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0682C
443
444#define mmTPC0_CFG_TPC_CMD 0xE06830
445
446#define mmTPC0_CFG_TPC_EXECUTE 0xE06838
447
448#define mmTPC0_CFG_TPC_STALL 0xE0683C
449
450#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06840
451
452#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06844
453
454#define mmTPC0_CFG_MSS_CONFIG 0xE06854
455
456#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06858
457
458#define mmTPC0_CFG_TPC_INTR_MASK 0xE0685C
459
460#define mmTPC0_CFG_TSB_CONFIG 0xE06860
461
462#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00
463
464#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04
465
466#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08
467
468#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C
469
470#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10
471
472#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14
473
474#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE06A18
475
476#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A1C
477
478#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A20
479
480#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE06A24
481
482#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A28
483
484#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A2C
485
486#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE06A30
487
488#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A34
489
490#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A38
491
492#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE06A3C
493
494#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A40
495
496#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A44
497
498#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE06A48
499
500#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A4C
501
502#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A50
503
504#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A54
505
506#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A58
507
508#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A5C
509
510#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A60
511
512#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE06A64
513
514#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A68
515
516#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A6C
517
518#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE06A70
519
520#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A74
521
522#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A78
523
524#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE06A7C
525
526#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A80
527
528#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A84
529
530#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE06A88
531
532#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A8C
533
534#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A90
535
536#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE06A94
537
538#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A98
539
540#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A9C
541
542#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06AA0
543
544#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06AA4
545
546#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06AA8
547
548#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06AAC
549
550#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE06AB0
551
552#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06AB4
553
554#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06AB8
555
556#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE06ABC
557
558#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06AC0
559
560#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06AC4
561
562#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE06AC8
563
564#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06ACC
565
566#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06AD0
567
568#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE06AD4
569
570#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AD8
571
572#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06ADC
573
574#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE06AE0
575
576#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AE4
577
578#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AE8
579
580#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AEC
581
582#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AF0
583
584#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AF4
585
586#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06AF8
587
588#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE06AFC
589
590#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06B00
591
592#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06B04
593
594#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE06B08
595
596#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06B0C
597
598#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06B10
599
600#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE06B14
601
602#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06B18
603
604#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06B1C
605
606#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE06B20
607
608#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06B24
609
610#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06B28
611
612#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE06B2C
613
614#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06B30
615
616#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06B34
617
618#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06B38
619
620#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06B3C
621
622#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06B40
623
624#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06B44
625
626#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE06B48
627
628#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06B4C
629
630#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06B50
631
632#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE06B54
633
634#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B58
635
636#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B5C
637
638#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE06B60
639
640#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B64
641
642#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B68
643
644#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE06B6C
645
646#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B70
647
648#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B74
649
650#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE06B78
651
652#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B7C
653
654#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B80
655
656#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B84
657
658#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B88
659
660#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B8C
661
662#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B90
663
664#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE06B94
665
666#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B98
667
668#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B9C
669
670#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE06BA0
671
672#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06BA4
673
674#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06BA8
675
676#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE06BAC
677
678#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06BB0
679
680#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06BB4
681
682#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE06BB8
683
684#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06BBC
685
686#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06BC0
687
688#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE06BC4
689
690#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06BC8
691
692#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06BCC
693
694#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06BD0
695
696#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06BD4
697
698#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06BD8
699
700#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06BDC
701
702#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE06BE0
703
704#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06BE4
705
706#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06BE8
707
708#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE06BEC
709
710#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06BF0
711
712#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06BF4
713
714#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE06BF8
715
716#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06BFC
717
718#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06C00
719
720#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE06C04
721
722#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06C08
723
724#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06C0C
725
726#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE06C10
727
728#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06C14
729
730#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06C18
731
732#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06C1C
733
734#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06C20
735
736#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06C24
737
738#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06C28
739
740#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE06C2C
741
742#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06C30
743
744#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06C34
745
746#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE06C38
747
748#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06C3C
749
750#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06C40
751
752#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE06C44
753
754#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06C48
755
756#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06C4C
757
758#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE06C50
759
760#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06C54
761
762#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06C58
763
764#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE06C5C
765
766#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06C60
767
768#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06C64
769
770#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06C68
771
772#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06C6C
773
774#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06C70
775
776#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06C74
777
778#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06C78
779
780#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06C7C
781
782#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06C80
783
784#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06C84
785
786#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06C88
787
788#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06C8C
789
790#define mmTPC0_CFG_QM_SRF_0 0xE06C90
791
792#define mmTPC0_CFG_QM_SRF_1 0xE06C94
793
794#define mmTPC0_CFG_QM_SRF_2 0xE06C98
795
796#define mmTPC0_CFG_QM_SRF_3 0xE06C9C
797
798#define mmTPC0_CFG_QM_SRF_4 0xE06CA0
799
800#define mmTPC0_CFG_QM_SRF_5 0xE06CA4
801
802#define mmTPC0_CFG_QM_SRF_6 0xE06CA8
803
804#define mmTPC0_CFG_QM_SRF_7 0xE06CAC
805
806#define mmTPC0_CFG_QM_SRF_8 0xE06CB0
807
808#define mmTPC0_CFG_QM_SRF_9 0xE06CB4
809
810#define mmTPC0_CFG_QM_SRF_10 0xE06CB8
811
812#define mmTPC0_CFG_QM_SRF_11 0xE06CBC
813
814#define mmTPC0_CFG_QM_SRF_12 0xE06CC0
815
816#define mmTPC0_CFG_QM_SRF_13 0xE06CC4
817
818#define mmTPC0_CFG_QM_SRF_14 0xE06CC8
819
820#define mmTPC0_CFG_QM_SRF_15 0xE06CCC
821
822#define mmTPC0_CFG_QM_SRF_16 0xE06CD0
823
824#define mmTPC0_CFG_QM_SRF_17 0xE06CD4
825
826#define mmTPC0_CFG_QM_SRF_18 0xE06CD8
827
828#define mmTPC0_CFG_QM_SRF_19 0xE06CDC
829
830#define mmTPC0_CFG_QM_SRF_20 0xE06CE0
831
832#define mmTPC0_CFG_QM_SRF_21 0xE06CE4
833
834#define mmTPC0_CFG_QM_SRF_22 0xE06CE8
835
836#define mmTPC0_CFG_QM_SRF_23 0xE06CEC
837
838#define mmTPC0_CFG_QM_SRF_24 0xE06CF0
839
840#define mmTPC0_CFG_QM_SRF_25 0xE06CF4
841
842#define mmTPC0_CFG_QM_SRF_26 0xE06CF8
843
844#define mmTPC0_CFG_QM_SRF_27 0xE06CFC
845
846#define mmTPC0_CFG_QM_SRF_28 0xE06D00
847
848#define mmTPC0_CFG_QM_SRF_29 0xE06D04
849
850#define mmTPC0_CFG_QM_SRF_30 0xE06D08
851
852#define mmTPC0_CFG_QM_SRF_31 0xE06D0C
853
854#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06D10
855
856#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D14
857
858#define mmTPC0_CFG_ARUSER 0xE06D18
859
860#define mmTPC0_CFG_AWUSER 0xE06D1C
861
862#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE06E00
863
864#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE06E04
865
866#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE06E08
867
868#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE06E0C
869
870#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE06E10
871
872#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE06E14
873
874#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE06E18
875
876#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE06E1C
877
878#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE06E20
879
880#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE06E24
881
882#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE06E28
883
884#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C
885
886#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
new file mode 100644
index 000000000000..9aa2d8b53207
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
@@ -0,0 +1,373 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_CMDQ_MASKS_H_
14#define ASIC_REG_TPC0_CMDQ_MASKS_H_
15
16/*
17 *****************************************
18 * TPC0_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22/* TPC0_CMDQ_GLBL_CFG0 */
23#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0
24#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1
25#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1
26#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2
27#define TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2
28#define TPC0_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4
29#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3
30#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8
31
32/* TPC0_CMDQ_GLBL_CFG1 */
33#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0
34#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1
35#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1
36#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2
37#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2
38#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4
39#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3
40#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8
41#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8
42#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100
43#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9
44#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200
45#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10
46#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400
47#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11
48#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800
49
50/* TPC0_CMDQ_GLBL_PROT */
51#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0
52#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1
53#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1
54#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2
55#define TPC0_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2
56#define TPC0_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4
57#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3
58#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8
59#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
60#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
61#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
62#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
63#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6
64#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40
65#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
66#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
67
68/* TPC0_CMDQ_GLBL_ERR_CFG */
69#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
70#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
71#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
72#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
73#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
74#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
75#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
76#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
77#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
78#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
79#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
80#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
81#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
82#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
83#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
84#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
85#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
86#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
87#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
88#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
89#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
90#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
91#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
92#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
93
94/* TPC0_CMDQ_GLBL_ERR_ADDR_LO */
95#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
96#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
97
98/* TPC0_CMDQ_GLBL_ERR_ADDR_HI */
99#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
100#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
101
102/* TPC0_CMDQ_GLBL_ERR_WDATA */
103#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0
104#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
105
106/* TPC0_CMDQ_GLBL_SECURE_PROPS */
107#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0
108#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
109#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10
110#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400
111
112/* TPC0_CMDQ_GLBL_NON_SECURE_PROPS */
113#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
114#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
115#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
116#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
117
118/* TPC0_CMDQ_GLBL_STS0 */
119#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0
120#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1
121#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1
122#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2
123#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2
124#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4
125#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3
126#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8
127#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4
128#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10
129#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5
130#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20
131#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6
132#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40
133#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7
134#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80
135
136/* TPC0_CMDQ_GLBL_STS1 */
137#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0
138#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1
139#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1
140#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2
141#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2
142#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4
143#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
144#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
145#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4
146#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10
147#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
148#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
149#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8
150#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100
151#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9
152#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200
153#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
154#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
155#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
156#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
157
158/* TPC0_CMDQ_CQ_CFG0 */
159#define TPC0_CMDQ_CQ_CFG0_RESERVED_SHIFT 0
160#define TPC0_CMDQ_CQ_CFG0_RESERVED_MASK 0x1
161
162/* TPC0_CMDQ_CQ_CFG1 */
163#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0
164#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
165#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
166#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
167
168/* TPC0_CMDQ_CQ_ARUSER */
169#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0
170#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1
171#define TPC0_CMDQ_CQ_ARUSER_WORD_SHIFT 1
172#define TPC0_CMDQ_CQ_ARUSER_WORD_MASK 0x2
173
174/* TPC0_CMDQ_CQ_PTR_LO */
175#define TPC0_CMDQ_CQ_PTR_LO_VAL_SHIFT 0
176#define TPC0_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
177
178/* TPC0_CMDQ_CQ_PTR_HI */
179#define TPC0_CMDQ_CQ_PTR_HI_VAL_SHIFT 0
180#define TPC0_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
181
182/* TPC0_CMDQ_CQ_TSIZE */
183#define TPC0_CMDQ_CQ_TSIZE_VAL_SHIFT 0
184#define TPC0_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
185
186/* TPC0_CMDQ_CQ_CTL */
187#define TPC0_CMDQ_CQ_CTL_RPT_SHIFT 0
188#define TPC0_CMDQ_CQ_CTL_RPT_MASK 0xFFFF
189#define TPC0_CMDQ_CQ_CTL_CTL_SHIFT 16
190#define TPC0_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000
191
192/* TPC0_CMDQ_CQ_PTR_LO_STS */
193#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0
194#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
195
196/* TPC0_CMDQ_CQ_PTR_HI_STS */
197#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0
198#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
199
200/* TPC0_CMDQ_CQ_TSIZE_STS */
201#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0
202#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
203
204/* TPC0_CMDQ_CQ_CTL_STS */
205#define TPC0_CMDQ_CQ_CTL_STS_RPT_SHIFT 0
206#define TPC0_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF
207#define TPC0_CMDQ_CQ_CTL_STS_CTL_SHIFT 16
208#define TPC0_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000
209
210/* TPC0_CMDQ_CQ_STS0 */
211#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
212#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
213#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16
214#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
215
216/* TPC0_CMDQ_CQ_STS1 */
217#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
218#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
219#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
220#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
221#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31
222#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000
223
224/* TPC0_CMDQ_CQ_RD_RATE_LIM_EN */
225#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
226#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
227
228/* TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */
229#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
230#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
231
232/* TPC0_CMDQ_CQ_RD_RATE_LIM_SAT */
233#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
234#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
235
236/* TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT */
237#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
238#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
239
240/* TPC0_CMDQ_CQ_IFIFO_CNT */
241#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0
242#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3
243
244/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO */
245#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
246#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
247
248/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI */
249#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
250#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
251
252/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO */
253#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
254#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
255
256/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI */
257#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
258#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
259
260/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO */
261#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
262#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
263
264/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI */
265#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
266#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
267
268/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO */
269#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
270#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
271
272/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI */
273#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
274#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
275
276/* TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET */
277#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
278#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
279
280/* TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */
281#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
282#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
283
284/* TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */
285#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
286#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
287
288/* TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */
289#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
290#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
291
292/* TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */
293#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
294#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
295
296/* TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET */
297#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
298#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
299
300/* TPC0_CMDQ_CP_FENCE0_RDATA */
301#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
302#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
303
304/* TPC0_CMDQ_CP_FENCE1_RDATA */
305#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
306#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
307
308/* TPC0_CMDQ_CP_FENCE2_RDATA */
309#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
310#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
311
312/* TPC0_CMDQ_CP_FENCE3_RDATA */
313#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
314#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
315
316/* TPC0_CMDQ_CP_FENCE0_CNT */
317#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0
318#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF
319
320/* TPC0_CMDQ_CP_FENCE1_CNT */
321#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0
322#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF
323
324/* TPC0_CMDQ_CP_FENCE2_CNT */
325#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0
326#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF
327
328/* TPC0_CMDQ_CP_FENCE3_CNT */
329#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0
330#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF
331
332/* TPC0_CMDQ_CP_STS */
333#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
334#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
335#define TPC0_CMDQ_CP_STS_ERDY_SHIFT 16
336#define TPC0_CMDQ_CP_STS_ERDY_MASK 0x10000
337#define TPC0_CMDQ_CP_STS_RRDY_SHIFT 17
338#define TPC0_CMDQ_CP_STS_RRDY_MASK 0x20000
339#define TPC0_CMDQ_CP_STS_MRDY_SHIFT 18
340#define TPC0_CMDQ_CP_STS_MRDY_MASK 0x40000
341#define TPC0_CMDQ_CP_STS_SW_STOP_SHIFT 19
342#define TPC0_CMDQ_CP_STS_SW_STOP_MASK 0x80000
343#define TPC0_CMDQ_CP_STS_FENCE_ID_SHIFT 20
344#define TPC0_CMDQ_CP_STS_FENCE_ID_MASK 0x300000
345#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
346#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
347
348/* TPC0_CMDQ_CP_CURRENT_INST_LO */
349#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0
350#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
351
352/* TPC0_CMDQ_CP_CURRENT_INST_HI */
353#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0
354#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
355
356/* TPC0_CMDQ_CP_BARRIER_CFG */
357#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0
358#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
359
360/* TPC0_CMDQ_CP_DBG_0 */
361#define TPC0_CMDQ_CP_DBG_0_VAL_SHIFT 0
362#define TPC0_CMDQ_CP_DBG_0_VAL_MASK 0xFF
363
364/* TPC0_CMDQ_CQ_BUF_ADDR */
365#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0
366#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
367
368/* TPC0_CMDQ_CQ_BUF_RDATA */
369#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0
370#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
371
372#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
373
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
new file mode 100644
index 000000000000..3572752ba66e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_CMDQ_REGS_H_
14#define ASIC_REG_TPC0_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC0_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC0_CMDQ_GLBL_CFG0 0xE09000
23
24#define mmTPC0_CMDQ_GLBL_CFG1 0xE09004
25
26#define mmTPC0_CMDQ_GLBL_PROT 0xE09008
27
28#define mmTPC0_CMDQ_GLBL_ERR_CFG 0xE0900C
29
30#define mmTPC0_CMDQ_GLBL_ERR_ADDR_LO 0xE09010
31
32#define mmTPC0_CMDQ_GLBL_ERR_ADDR_HI 0xE09014
33
34#define mmTPC0_CMDQ_GLBL_ERR_WDATA 0xE09018
35
36#define mmTPC0_CMDQ_GLBL_SECURE_PROPS 0xE0901C
37
38#define mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS 0xE09020
39
40#define mmTPC0_CMDQ_GLBL_STS0 0xE09024
41
42#define mmTPC0_CMDQ_GLBL_STS1 0xE09028
43
44#define mmTPC0_CMDQ_CQ_CFG0 0xE090B0
45
46#define mmTPC0_CMDQ_CQ_CFG1 0xE090B4
47
48#define mmTPC0_CMDQ_CQ_ARUSER 0xE090B8
49
50#define mmTPC0_CMDQ_CQ_PTR_LO 0xE090C0
51
52#define mmTPC0_CMDQ_CQ_PTR_HI 0xE090C4
53
54#define mmTPC0_CMDQ_CQ_TSIZE 0xE090C8
55
56#define mmTPC0_CMDQ_CQ_CTL 0xE090CC
57
58#define mmTPC0_CMDQ_CQ_PTR_LO_STS 0xE090D4
59
60#define mmTPC0_CMDQ_CQ_PTR_HI_STS 0xE090D8
61
62#define mmTPC0_CMDQ_CQ_TSIZE_STS 0xE090DC
63
64#define mmTPC0_CMDQ_CQ_CTL_STS 0xE090E0
65
66#define mmTPC0_CMDQ_CQ_STS0 0xE090E4
67
68#define mmTPC0_CMDQ_CQ_STS1 0xE090E8
69
70#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN 0xE090F0
71
72#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE090F4
73
74#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT 0xE090F8
75
76#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE090FC
77
78#define mmTPC0_CMDQ_CQ_IFIFO_CNT 0xE09108
79
80#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE09120
81
82#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE09124
83
84#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE09128
85
86#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE0912C
87
88#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE09130
89
90#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE09134
91
92#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE09138
93
94#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE0913C
95
96#define mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE09140
97
98#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE09144
99
100#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE09148
101
102#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE0914C
103
104#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE09150
105
106#define mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE09154
107
108#define mmTPC0_CMDQ_CP_FENCE0_RDATA 0xE09158
109
110#define mmTPC0_CMDQ_CP_FENCE1_RDATA 0xE0915C
111
112#define mmTPC0_CMDQ_CP_FENCE2_RDATA 0xE09160
113
114#define mmTPC0_CMDQ_CP_FENCE3_RDATA 0xE09164
115
116#define mmTPC0_CMDQ_CP_FENCE0_CNT 0xE09168
117
118#define mmTPC0_CMDQ_CP_FENCE1_CNT 0xE0916C
119
120#define mmTPC0_CMDQ_CP_FENCE2_CNT 0xE09170
121
122#define mmTPC0_CMDQ_CP_FENCE3_CNT 0xE09174
123
124#define mmTPC0_CMDQ_CP_STS 0xE09178
125
126#define mmTPC0_CMDQ_CP_CURRENT_INST_LO 0xE0917C
127
128#define mmTPC0_CMDQ_CP_CURRENT_INST_HI 0xE09180
129
130#define mmTPC0_CMDQ_CP_BARRIER_CFG 0xE09184
131
132#define mmTPC0_CMDQ_CP_DBG_0 0xE09188
133
134#define mmTPC0_CMDQ_CQ_BUF_ADDR 0xE09308
135
136#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C
137
138#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
new file mode 100644
index 000000000000..ed866d93c440
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
@@ -0,0 +1,347 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_EML_CFG_MASKS_H_
14#define ASIC_REG_TPC0_EML_CFG_MASKS_H_
15
16/*
17 *****************************************
18 * TPC0_EML_CFG (Prototype: TPC_EML_CFG)
19 *****************************************
20 */
21
22/* TPC0_EML_CFG_DBG_CNT */
23#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_SHIFT 0
24#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_MASK 0x1
25#define TPC0_EML_CFG_DBG_CNT_DBG_EN_SHIFT 1
26#define TPC0_EML_CFG_DBG_CNT_DBG_EN_MASK 0x2
27#define TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT 2
28#define TPC0_EML_CFG_DBG_CNT_CORE_RST_MASK 0x4
29#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_SHIFT 4
30#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_MASK 0x10
31#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_SHIFT 5
32#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_MASK 0x20
33#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_SHIFT 6
34#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40
35#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_SHIFT 7
36#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_MASK 0x80
37#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_SHIFT 16
38#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_MASK 0x10000
39
40/* TPC0_EML_CFG_DBG_STS */
41#define TPC0_EML_CFG_DBG_STS_DBG_MODE_SHIFT 0
42#define TPC0_EML_CFG_DBG_STS_DBG_MODE_MASK 0x1
43#define TPC0_EML_CFG_DBG_STS_CORE_READY_SHIFT 1
44#define TPC0_EML_CFG_DBG_STS_CORE_READY_MASK 0x2
45#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_SHIFT 2
46#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_MASK 0x4
47#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_SHIFT 3
48#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_MASK 0x8
49#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_SHIFT 4
50#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_MASK 0x10
51#define TPC0_EML_CFG_DBG_STS_QM_IDLE_SHIFT 5
52#define TPC0_EML_CFG_DBG_STS_QM_IDLE_MASK 0x20
53#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_SHIFT 6
54#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_MASK 0x40
55#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_SHIFT 7
56#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_MASK 0x80
57#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_SHIFT 8
58#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_MASK 0xFFFFFF00
59
60/* TPC0_EML_CFG_DBG_PADD */
61#define TPC0_EML_CFG_DBG_PADD_ADDRESS_SHIFT 0
62#define TPC0_EML_CFG_DBG_PADD_ADDRESS_MASK 0xFFFFFFFF
63
64/* TPC0_EML_CFG_DBG_PADD_COUNT */
65#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_SHIFT 0
66#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_MASK 0xFF
67
68/* TPC0_EML_CFG_DBG_PADD_COUNT_MATCH */
69#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_SHIFT 0
70#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_MASK 0xFF
71
72/* TPC0_EML_CFG_DBG_PADD_EN */
73#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_SHIFT 0
74#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_MASK 0x1
75#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_SHIFT 1
76#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_MASK 0x2
77#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_SHIFT 2
78#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_MASK 0x4
79#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_SHIFT 3
80#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_MASK 0x8
81#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_SHIFT 4
82#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_MASK 0x10
83#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_SHIFT 5
84#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_MASK 0x20
85#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_SHIFT 6
86#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_MASK 0x40
87#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_SHIFT 7
88#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_MASK 0x80
89
90/* TPC0_EML_CFG_DBG_VPADD_HIGH */
91#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_SHIFT 0
92#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_MASK 0x1FF
93
94/* TPC0_EML_CFG_DBG_VPADD_LOW */
95#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_SHIFT 0
96#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_MASK 0x1FF
97
98/* TPC0_EML_CFG_DBG_VPADD_COUNT */
99#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_SHIFT 0
100#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_MASK 0xFF
101
102/* TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH */
103#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_SHIFT 0
104#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_MASK 0xFF
105
106/* TPC0_EML_CFG_DBG_VPADD_EN */
107#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_SHIFT 0
108#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_MASK 0x1
109#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_SHIFT 1
110#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_MASK 0x2
111#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_SHIFT 2
112#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_MASK 0x4
113#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_SHIFT 3
114#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_MASK 0x8
115
116/* TPC0_EML_CFG_DBG_SPADD_HIGH */
117#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_SHIFT 0
118#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_MASK 0xFF
119
120/* TPC0_EML_CFG_DBG_SPADD_LOW */
121#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_SHIFT 0
122#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_MASK 0xFF
123
124/* TPC0_EML_CFG_DBG_SPADD_COUNT */
125#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_SHIFT 0
126#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_MASK 0xFF
127
128/* TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH */
129#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_SHIFT 0
130#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_MASK 0xFF
131
132/* TPC0_EML_CFG_DBG_SPADD_EN */
133#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_SHIFT 0
134#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_MASK 0x1
135#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_SHIFT 1
136#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_MASK 0x2
137#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_SHIFT 2
138#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_MASK 0x4
139#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_SHIFT 3
140#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_MASK 0x8
141
142/* TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH */
143#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_SHIFT 0
144#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
145
146/* TPC0_EML_CFG_DBG_AGUADD_MSB_LOW */
147#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_SHIFT 0
148#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
149
150/* TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH */
151#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_SHIFT 0
152#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
153
154/* TPC0_EML_CFG_DBG_AGUADD_LSB_LOW */
155#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_SHIFT 0
156#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
157
158/* TPC0_EML_CFG_DBG_AGUADD_COUNT */
159#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_SHIFT 0
160#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_MASK 0xFF
161
162/* TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH */
163#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_SHIFT 0
164#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_MASK 0xFF
165
166/* TPC0_EML_CFG_DBG_AGUADD_EN */
167#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_SHIFT 0
168#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_MASK 0x1
169#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_SHIFT 1
170#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_MASK 0x2
171#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_SHIFT 2
172#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_MASK 0x4
173#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_SHIFT 3
174#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_MASK 0x8
175
176/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH */
177#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_SHIFT 0
178#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
179
180/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW */
181#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_SHIFT 0
182#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
183
184/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH */
185#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_SHIFT 0
186#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
187
188/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW */
189#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_SHIFT 0
190#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
191
192/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT */
193#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_SHIFT 0
194#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_MASK 0xFF
195
196/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH */
197#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_SHIFT 0
198#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_MASK 0xFF
199
200/* TPC0_EML_CFG_DBG_AXIHBWADD_EN */
201#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_SHIFT 0
202#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_MASK 0x1
203#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_SHIFT 1
204#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_MASK 0x2
205#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_SHIFT 2
206#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_MASK 0x4
207#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_SHIFT 3
208#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_MASK 0x8
209
210/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH */
211#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_SHIFT 0
212#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
213
214/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW */
215#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_SHIFT 0
216#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
217
218/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH */
219#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_SHIFT 0
220#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
221
222/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW */
223#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_SHIFT 0
224#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
225
226/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT */
227#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_SHIFT 0
228#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_MASK 0xFF
229
230/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH */
231#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_SHIFT 0
232#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_MASK 0xFF
233
234/* TPC0_EML_CFG_DBG_AXILBWADD_EN */
235#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_SHIFT 0
236#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_MASK 0x1
237#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_SHIFT 1
238#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_MASK 0x2
239#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_SHIFT 2
240#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_MASK 0x4
241#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_SHIFT 3
242#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_MASK 0x8
243
244/* TPC0_EML_CFG_DBG_SPDATA */
245#define TPC0_EML_CFG_DBG_SPDATA_DATA_SHIFT 0
246#define TPC0_EML_CFG_DBG_SPDATA_DATA_MASK 0xFFFFFFFF
247
248/* TPC0_EML_CFG_DBG_SPDATA_COUNT */
249#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_SHIFT 0
250#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_MASK 0xFF
251
252/* TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH */
253#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_SHIFT 0
254#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_MASK 0xFF
255
256/* TPC0_EML_CFG_DBG_SPDATA_EN */
257#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_SHIFT 0
258#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_MASK 0x1
259#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_SHIFT 1
260#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_MASK 0x2
261#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_SHIFT 2
262#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_MASK 0x4
263#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_SHIFT 3
264#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_MASK 0x8
265
266/* TPC0_EML_CFG_DBG_AXIHBWDATA */
267#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_SHIFT 0
268#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_MASK 0xFFFFFFFF
269
270/* TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT */
271#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_SHIFT 0
272#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_MASK 0xFF
273
274/* TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH */
275#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_SHIFT 0
276#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_MASK 0xFF
277
278/* TPC0_EML_CFG_DBG_AXIHBWDATA_EN */
279#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_SHIFT 0
280#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_MASK 0x1
281#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_SHIFT 1
282#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_MASK 0x2
283
284/* TPC0_EML_CFG_DBG_AXILBWDATA */
285#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_SHIFT 0
286#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_MASK 0xFFFFFFFF
287
288/* TPC0_EML_CFG_DBG_AXILBWDATA_COUNT */
289#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_SHIFT 0
290#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_MASK 0xFF
291
292/* TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH */
293#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_SHIFT 0
294#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_MASK 0xFF
295
296/* TPC0_EML_CFG_DBG_AXILBWDATA_EN */
297#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_SHIFT 0
298#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_MASK 0x1
299#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_SHIFT 1
300#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_MASK 0x2
301
302/* TPC0_EML_CFG_DBG_D0_PC */
303#define TPC0_EML_CFG_DBG_D0_PC_PC_SHIFT 0
304#define TPC0_EML_CFG_DBG_D0_PC_PC_MASK 0xFFFFFFFF
305
306/* TPC0_EML_CFG_RTTCONFIG */
307#define TPC0_EML_CFG_RTTCONFIG_TR_EN_SHIFT 0
308#define TPC0_EML_CFG_RTTCONFIG_TR_EN_MASK 0x1
309#define TPC0_EML_CFG_RTTCONFIG_PRIO_SHIFT 1
310#define TPC0_EML_CFG_RTTCONFIG_PRIO_MASK 0x2
311
312/* TPC0_EML_CFG_RTTPREDICATE */
313#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_SHIFT 0
314#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_MASK 0x1
315#define TPC0_EML_CFG_RTTPREDICATE_GEN_SHIFT 1
316#define TPC0_EML_CFG_RTTPREDICATE_GEN_MASK 0x2
317#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_SHIFT 2
318#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_MASK 0x4
319#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_SHIFT 16
320#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_MASK 0xFFFF0000
321
322/* TPC0_EML_CFG_RTTPREDICATE_INTV */
323#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_SHIFT 0
324#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_MASK 0xFFFFFFFF
325
326/* TPC0_EML_CFG_RTTTS */
327#define TPC0_EML_CFG_RTTTS_TR_EN_SHIFT 0
328#define TPC0_EML_CFG_RTTTS_TR_EN_MASK 0x1
329#define TPC0_EML_CFG_RTTTS_GEN_SHIFT 1
330#define TPC0_EML_CFG_RTTTS_GEN_MASK 0x2
331#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_SHIFT 2
332#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_MASK 0x4
333
334/* TPC0_EML_CFG_RTTTS_INTV */
335#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_SHIFT 0
336#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_MASK 0xFFFFFFFF
337
338/* TPC0_EML_CFG_DBG_INST_INSERT */
339#define TPC0_EML_CFG_DBG_INST_INSERT_INST_SHIFT 0
340#define TPC0_EML_CFG_DBG_INST_INSERT_INST_MASK 0xFFFFFFFF
341
342/* TPC0_EML_CFG_DBG_INST_INSERT_CTL */
343#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_SHIFT 0
344#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1
345
346#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
347
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
new file mode 100644
index 000000000000..f1a1b4fa4841
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
@@ -0,0 +1,313 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_EML_CFG_REGS_H_
14#define ASIC_REG_TPC0_EML_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC0_EML_CFG (Prototype: TPC_EML_CFG)
19 *****************************************
20 */
21
22#define mmTPC0_EML_CFG_DBG_CNT 0x3040000
23
24#define mmTPC0_EML_CFG_DBG_STS 0x3040004
25
26#define mmTPC0_EML_CFG_DBG_PADD_0 0x3040008
27
28#define mmTPC0_EML_CFG_DBG_PADD_1 0x304000C
29
30#define mmTPC0_EML_CFG_DBG_PADD_2 0x3040010
31
32#define mmTPC0_EML_CFG_DBG_PADD_3 0x3040014
33
34#define mmTPC0_EML_CFG_DBG_PADD_4 0x3040018
35
36#define mmTPC0_EML_CFG_DBG_PADD_5 0x304001C
37
38#define mmTPC0_EML_CFG_DBG_PADD_6 0x3040020
39
40#define mmTPC0_EML_CFG_DBG_PADD_7 0x3040024
41
42#define mmTPC0_EML_CFG_DBG_PADD_COUNT_0 0x3040028
43
44#define mmTPC0_EML_CFG_DBG_PADD_COUNT_1 0x304002C
45
46#define mmTPC0_EML_CFG_DBG_PADD_COUNT_2 0x3040030
47
48#define mmTPC0_EML_CFG_DBG_PADD_COUNT_3 0x3040034
49
50#define mmTPC0_EML_CFG_DBG_PADD_COUNT_4 0x3040038
51
52#define mmTPC0_EML_CFG_DBG_PADD_COUNT_5 0x304003C
53
54#define mmTPC0_EML_CFG_DBG_PADD_COUNT_6 0x3040040
55
56#define mmTPC0_EML_CFG_DBG_PADD_COUNT_7 0x3040044
57
58#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_0 0x3040048
59
60#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_1 0x304004C
61
62#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_2 0x3040050
63
64#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_3 0x3040054
65
66#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_4 0x3040058
67
68#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_5 0x304005C
69
70#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_6 0x3040060
71
72#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_7 0x3040064
73
74#define mmTPC0_EML_CFG_DBG_PADD_EN 0x3040068
75
76#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_0 0x304006C
77
78#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_1 0x3040070
79
80#define mmTPC0_EML_CFG_DBG_VPADD_LOW_0 0x3040074
81
82#define mmTPC0_EML_CFG_DBG_VPADD_LOW_1 0x3040078
83
84#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_0 0x304007C
85
86#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_1 0x3040080
87
88#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_0 0x3040084
89
90#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_1 0x3040088
91
92#define mmTPC0_EML_CFG_DBG_VPADD_EN 0x304008C
93
94#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_0 0x3040090
95
96#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_1 0x3040094
97
98#define mmTPC0_EML_CFG_DBG_SPADD_LOW_0 0x3040098
99
100#define mmTPC0_EML_CFG_DBG_SPADD_LOW_1 0x304009C
101
102#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_0 0x30400A0
103
104#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_1 0x30400A4
105
106#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_0 0x30400A8
107
108#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_1 0x30400AC
109
110#define mmTPC0_EML_CFG_DBG_SPADD_EN 0x30400B0
111
112#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_0 0x30400B4
113
114#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_1 0x30400B8
115
116#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_0 0x30400BC
117
118#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_1 0x30400C0
119
120#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_0 0x30400C4
121
122#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_1 0x30400C8
123
124#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_0 0x30400CC
125
126#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_1 0x30400D0
127
128#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_0 0x30400D4
129
130#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_1 0x30400D8
131
132#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_0 0x30400DC
133
134#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_1 0x30400E0
135
136#define mmTPC0_EML_CFG_DBG_AGUADD_EN 0x30400E4
137
138#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_0 0x30400E8
139
140#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_1 0x30400EC
141
142#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_0 0x30400F0
143
144#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_1 0x30400F4
145
146#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_0 0x30400F8
147
148#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_1 0x30400FC
149
150#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_0 0x3040100
151
152#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_1 0x3040104
153
154#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_0 0x3040108
155
156#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_1 0x304010C
157
158#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_0 0x3040110
159
160#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_1 0x3040114
161
162#define mmTPC0_EML_CFG_DBG_AXIHBWADD_EN 0x3040118
163
164#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_0 0x304011C
165
166#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_1 0x3040120
167
168#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_0 0x3040124
169
170#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_1 0x3040128
171
172#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_0 0x304012C
173
174#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_1 0x3040130
175
176#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_0 0x3040134
177
178#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_1 0x3040138
179
180#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_0 0x304013C
181
182#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_1 0x3040140
183
184#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_0 0x3040144
185
186#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_1 0x3040148
187
188#define mmTPC0_EML_CFG_DBG_AXILBWADD_EN 0x304014C
189
190#define mmTPC0_EML_CFG_DBG_SPDATA_0 0x3040150
191
192#define mmTPC0_EML_CFG_DBG_SPDATA_1 0x3040154
193
194#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_0 0x3040158
195
196#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_1 0x304015C
197
198#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_0 0x3040160
199
200#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_1 0x3040164
201
202#define mmTPC0_EML_CFG_DBG_SPDATA_EN 0x3040168
203
204#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_0 0x304016C
205
206#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_1 0x3040170
207
208#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_2 0x3040174
209
210#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_3 0x3040178
211
212#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_4 0x304017C
213
214#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_5 0x3040180
215
216#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_6 0x3040184
217
218#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_7 0x3040188
219
220#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_8 0x304018C
221
222#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_9 0x3040190
223
224#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_10 0x3040194
225
226#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_11 0x3040198
227
228#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_12 0x304019C
229
230#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_13 0x30401A0
231
232#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_14 0x30401A4
233
234#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_15 0x30401A8
235
236#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_16 0x30401AC
237
238#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_17 0x30401B0
239
240#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_18 0x30401B4
241
242#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_19 0x30401B8
243
244#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_20 0x30401BC
245
246#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_21 0x30401C0
247
248#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_22 0x30401C4
249
250#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_23 0x30401C8
251
252#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_24 0x30401CC
253
254#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_25 0x30401D0
255
256#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_26 0x30401D4
257
258#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_27 0x30401D8
259
260#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_28 0x30401DC
261
262#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_29 0x30401E0
263
264#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_30 0x30401E4
265
266#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_31 0x30401E8
267
268#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_COUNT 0x30401EC
269
270#define mmTPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH 0x30401F0
271
272#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_EN 0x30401F4
273
274#define mmTPC0_EML_CFG_DBG_AXILBWDATA 0x30401F8
275
276#define mmTPC0_EML_CFG_DBG_AXILBWDATA_COUNT 0x30401FC
277
278#define mmTPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH 0x3040200
279
280#define mmTPC0_EML_CFG_DBG_AXILBWDATA_EN 0x3040204
281
282#define mmTPC0_EML_CFG_DBG_D0_PC 0x3040208
283
284#define mmTPC0_EML_CFG_RTTCONFIG 0x3040300
285
286#define mmTPC0_EML_CFG_RTTPREDICATE 0x3040304
287
288#define mmTPC0_EML_CFG_RTTPREDICATE_INTV 0x3040308
289
290#define mmTPC0_EML_CFG_RTTTS 0x304030C
291
292#define mmTPC0_EML_CFG_RTTTS_INTV 0x3040310
293
294#define mmTPC0_EML_CFG_DBG_INST_INSERT_0 0x3040314
295
296#define mmTPC0_EML_CFG_DBG_INST_INSERT_1 0x3040318
297
298#define mmTPC0_EML_CFG_DBG_INST_INSERT_2 0x304031C
299
300#define mmTPC0_EML_CFG_DBG_INST_INSERT_3 0x3040320
301
302#define mmTPC0_EML_CFG_DBG_INST_INSERT_4 0x3040324
303
304#define mmTPC0_EML_CFG_DBG_INST_INSERT_5 0x3040328
305
306#define mmTPC0_EML_CFG_DBG_INST_INSERT_6 0x304032C
307
308#define mmTPC0_EML_CFG_DBG_INST_INSERT_7 0x3040330
309
310#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334
311
312#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
313
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
new file mode 100644
index 000000000000..7f86621179a5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
@@ -0,0 +1,209 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_NRTR_MASKS_H_
14#define ASIC_REG_TPC0_NRTR_MASKS_H_
15
16/*
17 *****************************************
18 * TPC0_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22/* TPC0_NRTR_HBW_MAX_CRED */
23#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
24#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
25#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
26#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
27#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
28#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
29#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
30#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
31
32/* TPC0_NRTR_LBW_MAX_CRED */
33#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
34#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
35#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
36#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
37#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
38#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
39#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
40#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
41
42/* TPC0_NRTR_DBG_E_ARB */
43#define TPC0_NRTR_DBG_E_ARB_W_SHIFT 0
44#define TPC0_NRTR_DBG_E_ARB_W_MASK 0x7
45#define TPC0_NRTR_DBG_E_ARB_S_SHIFT 8
46#define TPC0_NRTR_DBG_E_ARB_S_MASK 0x700
47#define TPC0_NRTR_DBG_E_ARB_N_SHIFT 16
48#define TPC0_NRTR_DBG_E_ARB_N_MASK 0x70000
49#define TPC0_NRTR_DBG_E_ARB_L_SHIFT 24
50#define TPC0_NRTR_DBG_E_ARB_L_MASK 0x7000000
51
52/* TPC0_NRTR_DBG_W_ARB */
53#define TPC0_NRTR_DBG_W_ARB_E_SHIFT 0
54#define TPC0_NRTR_DBG_W_ARB_E_MASK 0x7
55#define TPC0_NRTR_DBG_W_ARB_S_SHIFT 8
56#define TPC0_NRTR_DBG_W_ARB_S_MASK 0x700
57#define TPC0_NRTR_DBG_W_ARB_N_SHIFT 16
58#define TPC0_NRTR_DBG_W_ARB_N_MASK 0x70000
59#define TPC0_NRTR_DBG_W_ARB_L_SHIFT 24
60#define TPC0_NRTR_DBG_W_ARB_L_MASK 0x7000000
61
62/* TPC0_NRTR_DBG_N_ARB */
63#define TPC0_NRTR_DBG_N_ARB_W_SHIFT 0
64#define TPC0_NRTR_DBG_N_ARB_W_MASK 0x7
65#define TPC0_NRTR_DBG_N_ARB_E_SHIFT 8
66#define TPC0_NRTR_DBG_N_ARB_E_MASK 0x700
67#define TPC0_NRTR_DBG_N_ARB_S_SHIFT 16
68#define TPC0_NRTR_DBG_N_ARB_S_MASK 0x70000
69#define TPC0_NRTR_DBG_N_ARB_L_SHIFT 24
70#define TPC0_NRTR_DBG_N_ARB_L_MASK 0x7000000
71
72/* TPC0_NRTR_DBG_S_ARB */
73#define TPC0_NRTR_DBG_S_ARB_W_SHIFT 0
74#define TPC0_NRTR_DBG_S_ARB_W_MASK 0x7
75#define TPC0_NRTR_DBG_S_ARB_E_SHIFT 8
76#define TPC0_NRTR_DBG_S_ARB_E_MASK 0x700
77#define TPC0_NRTR_DBG_S_ARB_N_SHIFT 16
78#define TPC0_NRTR_DBG_S_ARB_N_MASK 0x70000
79#define TPC0_NRTR_DBG_S_ARB_L_SHIFT 24
80#define TPC0_NRTR_DBG_S_ARB_L_MASK 0x7000000
81
82/* TPC0_NRTR_DBG_L_ARB */
83#define TPC0_NRTR_DBG_L_ARB_W_SHIFT 0
84#define TPC0_NRTR_DBG_L_ARB_W_MASK 0x7
85#define TPC0_NRTR_DBG_L_ARB_E_SHIFT 8
86#define TPC0_NRTR_DBG_L_ARB_E_MASK 0x700
87#define TPC0_NRTR_DBG_L_ARB_S_SHIFT 16
88#define TPC0_NRTR_DBG_L_ARB_S_MASK 0x70000
89#define TPC0_NRTR_DBG_L_ARB_N_SHIFT 24
90#define TPC0_NRTR_DBG_L_ARB_N_MASK 0x7000000
91
92/* TPC0_NRTR_DBG_E_ARB_MAX */
93#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
94#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
95
96/* TPC0_NRTR_DBG_W_ARB_MAX */
97#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
98#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
99
100/* TPC0_NRTR_DBG_N_ARB_MAX */
101#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
102#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
103
104/* TPC0_NRTR_DBG_S_ARB_MAX */
105#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
106#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
107
108/* TPC0_NRTR_DBG_L_ARB_MAX */
109#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
110#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
111
112/* TPC0_NRTR_SPLIT_COEF */
113#define TPC0_NRTR_SPLIT_COEF_VAL_SHIFT 0
114#define TPC0_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
115
116/* TPC0_NRTR_SPLIT_CFG */
117#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
118#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
119#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
120#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
121#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
122#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
123#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
124#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
125#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
126#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
127#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
128#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
129
130/* TPC0_NRTR_SPLIT_RD_SAT */
131#define TPC0_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
132#define TPC0_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
133
134/* TPC0_NRTR_SPLIT_RD_RST_TOKEN */
135#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
136#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
137
138/* TPC0_NRTR_SPLIT_RD_TIMEOUT */
139#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
140#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
141
142/* TPC0_NRTR_SPLIT_WR_SAT */
143#define TPC0_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
144#define TPC0_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
145
146/* TPC0_NRTR_WPLIT_WR_TST_TOLEN */
147#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
148#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
149
150/* TPC0_NRTR_SPLIT_WR_TIMEOUT */
151#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
152#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
153
154/* TPC0_NRTR_HBW_RANGE_HIT */
155#define TPC0_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
156#define TPC0_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
157
158/* TPC0_NRTR_HBW_RANGE_MASK_L */
159#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
160#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
161
162/* TPC0_NRTR_HBW_RANGE_MASK_H */
163#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
164#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
165
166/* TPC0_NRTR_HBW_RANGE_BASE_L */
167#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
168#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
169
170/* TPC0_NRTR_HBW_RANGE_BASE_H */
171#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
172#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
173
174/* TPC0_NRTR_LBW_RANGE_HIT */
175#define TPC0_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
176#define TPC0_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
177
178/* TPC0_NRTR_LBW_RANGE_MASK */
179#define TPC0_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
180#define TPC0_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
181
182/* TPC0_NRTR_LBW_RANGE_BASE */
183#define TPC0_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
184#define TPC0_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
185
186/* TPC0_NRTR_RGLTR */
187#define TPC0_NRTR_RGLTR_WR_EN_SHIFT 0
188#define TPC0_NRTR_RGLTR_WR_EN_MASK 0x1
189#define TPC0_NRTR_RGLTR_RD_EN_SHIFT 4
190#define TPC0_NRTR_RGLTR_RD_EN_MASK 0x10
191
192/* TPC0_NRTR_RGLTR_WR_RESULT */
193#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
194#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
195
196/* TPC0_NRTR_RGLTR_RD_RESULT */
197#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
198#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
199
200/* TPC0_NRTR_SCRAMB_EN */
201#define TPC0_NRTR_SCRAMB_EN_VAL_SHIFT 0
202#define TPC0_NRTR_SCRAMB_EN_VAL_MASK 0x1
203
204/* TPC0_NRTR_NON_LIN_SCRAMB */
205#define TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
206#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
207
208#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
new file mode 100644
index 000000000000..dc280f4e6608
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
@@ -0,0 +1,227 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_NRTR_REGS_H_
14#define ASIC_REG_TPC0_NRTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC0_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22#define mmTPC0_NRTR_HBW_MAX_CRED 0xE00100
23
24#define mmTPC0_NRTR_LBW_MAX_CRED 0xE00120
25
26#define mmTPC0_NRTR_DBG_E_ARB 0xE00300
27
28#define mmTPC0_NRTR_DBG_W_ARB 0xE00304
29
30#define mmTPC0_NRTR_DBG_N_ARB 0xE00308
31
32#define mmTPC0_NRTR_DBG_S_ARB 0xE0030C
33
34#define mmTPC0_NRTR_DBG_L_ARB 0xE00310
35
36#define mmTPC0_NRTR_DBG_E_ARB_MAX 0xE00320
37
38#define mmTPC0_NRTR_DBG_W_ARB_MAX 0xE00324
39
40#define mmTPC0_NRTR_DBG_N_ARB_MAX 0xE00328
41
42#define mmTPC0_NRTR_DBG_S_ARB_MAX 0xE0032C
43
44#define mmTPC0_NRTR_DBG_L_ARB_MAX 0xE00330
45
46#define mmTPC0_NRTR_SPLIT_COEF_0 0xE00400
47
48#define mmTPC0_NRTR_SPLIT_COEF_1 0xE00404
49
50#define mmTPC0_NRTR_SPLIT_COEF_2 0xE00408
51
52#define mmTPC0_NRTR_SPLIT_COEF_3 0xE0040C
53
54#define mmTPC0_NRTR_SPLIT_COEF_4 0xE00410
55
56#define mmTPC0_NRTR_SPLIT_COEF_5 0xE00414
57
58#define mmTPC0_NRTR_SPLIT_COEF_6 0xE00418
59
60#define mmTPC0_NRTR_SPLIT_COEF_7 0xE0041C
61
62#define mmTPC0_NRTR_SPLIT_COEF_8 0xE00420
63
64#define mmTPC0_NRTR_SPLIT_COEF_9 0xE00424
65
66#define mmTPC0_NRTR_SPLIT_CFG 0xE00440
67
68#define mmTPC0_NRTR_SPLIT_RD_SAT 0xE00444
69
70#define mmTPC0_NRTR_SPLIT_RD_RST_TOKEN 0xE00448
71
72#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_0 0xE0044C
73
74#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_1 0xE00450
75
76#define mmTPC0_NRTR_SPLIT_WR_SAT 0xE00454
77
78#define mmTPC0_NRTR_WPLIT_WR_TST_TOLEN 0xE00458
79
80#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_0 0xE0045C
81
82#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_1 0xE00460
83
84#define mmTPC0_NRTR_HBW_RANGE_HIT 0xE00470
85
86#define mmTPC0_NRTR_HBW_RANGE_MASK_L_0 0xE00480
87
88#define mmTPC0_NRTR_HBW_RANGE_MASK_L_1 0xE00484
89
90#define mmTPC0_NRTR_HBW_RANGE_MASK_L_2 0xE00488
91
92#define mmTPC0_NRTR_HBW_RANGE_MASK_L_3 0xE0048C
93
94#define mmTPC0_NRTR_HBW_RANGE_MASK_L_4 0xE00490
95
96#define mmTPC0_NRTR_HBW_RANGE_MASK_L_5 0xE00494
97
98#define mmTPC0_NRTR_HBW_RANGE_MASK_L_6 0xE00498
99
100#define mmTPC0_NRTR_HBW_RANGE_MASK_L_7 0xE0049C
101
102#define mmTPC0_NRTR_HBW_RANGE_MASK_H_0 0xE004A0
103
104#define mmTPC0_NRTR_HBW_RANGE_MASK_H_1 0xE004A4
105
106#define mmTPC0_NRTR_HBW_RANGE_MASK_H_2 0xE004A8
107
108#define mmTPC0_NRTR_HBW_RANGE_MASK_H_3 0xE004AC
109
110#define mmTPC0_NRTR_HBW_RANGE_MASK_H_4 0xE004B0
111
112#define mmTPC0_NRTR_HBW_RANGE_MASK_H_5 0xE004B4
113
114#define mmTPC0_NRTR_HBW_RANGE_MASK_H_6 0xE004B8
115
116#define mmTPC0_NRTR_HBW_RANGE_MASK_H_7 0xE004BC
117
118#define mmTPC0_NRTR_HBW_RANGE_BASE_L_0 0xE004C0
119
120#define mmTPC0_NRTR_HBW_RANGE_BASE_L_1 0xE004C4
121
122#define mmTPC0_NRTR_HBW_RANGE_BASE_L_2 0xE004C8
123
124#define mmTPC0_NRTR_HBW_RANGE_BASE_L_3 0xE004CC
125
126#define mmTPC0_NRTR_HBW_RANGE_BASE_L_4 0xE004D0
127
128#define mmTPC0_NRTR_HBW_RANGE_BASE_L_5 0xE004D4
129
130#define mmTPC0_NRTR_HBW_RANGE_BASE_L_6 0xE004D8
131
132#define mmTPC0_NRTR_HBW_RANGE_BASE_L_7 0xE004DC
133
134#define mmTPC0_NRTR_HBW_RANGE_BASE_H_0 0xE004E0
135
136#define mmTPC0_NRTR_HBW_RANGE_BASE_H_1 0xE004E4
137
138#define mmTPC0_NRTR_HBW_RANGE_BASE_H_2 0xE004E8
139
140#define mmTPC0_NRTR_HBW_RANGE_BASE_H_3 0xE004EC
141
142#define mmTPC0_NRTR_HBW_RANGE_BASE_H_4 0xE004F0
143
144#define mmTPC0_NRTR_HBW_RANGE_BASE_H_5 0xE004F4
145
146#define mmTPC0_NRTR_HBW_RANGE_BASE_H_6 0xE004F8
147
148#define mmTPC0_NRTR_HBW_RANGE_BASE_H_7 0xE004FC
149
150#define mmTPC0_NRTR_LBW_RANGE_HIT 0xE00500
151
152#define mmTPC0_NRTR_LBW_RANGE_MASK_0 0xE00510
153
154#define mmTPC0_NRTR_LBW_RANGE_MASK_1 0xE00514
155
156#define mmTPC0_NRTR_LBW_RANGE_MASK_2 0xE00518
157
158#define mmTPC0_NRTR_LBW_RANGE_MASK_3 0xE0051C
159
160#define mmTPC0_NRTR_LBW_RANGE_MASK_4 0xE00520
161
162#define mmTPC0_NRTR_LBW_RANGE_MASK_5 0xE00524
163
164#define mmTPC0_NRTR_LBW_RANGE_MASK_6 0xE00528
165
166#define mmTPC0_NRTR_LBW_RANGE_MASK_7 0xE0052C
167
168#define mmTPC0_NRTR_LBW_RANGE_MASK_8 0xE00530
169
170#define mmTPC0_NRTR_LBW_RANGE_MASK_9 0xE00534
171
172#define mmTPC0_NRTR_LBW_RANGE_MASK_10 0xE00538
173
174#define mmTPC0_NRTR_LBW_RANGE_MASK_11 0xE0053C
175
176#define mmTPC0_NRTR_LBW_RANGE_MASK_12 0xE00540
177
178#define mmTPC0_NRTR_LBW_RANGE_MASK_13 0xE00544
179
180#define mmTPC0_NRTR_LBW_RANGE_MASK_14 0xE00548
181
182#define mmTPC0_NRTR_LBW_RANGE_MASK_15 0xE0054C
183
184#define mmTPC0_NRTR_LBW_RANGE_BASE_0 0xE00550
185
186#define mmTPC0_NRTR_LBW_RANGE_BASE_1 0xE00554
187
188#define mmTPC0_NRTR_LBW_RANGE_BASE_2 0xE00558
189
190#define mmTPC0_NRTR_LBW_RANGE_BASE_3 0xE0055C
191
192#define mmTPC0_NRTR_LBW_RANGE_BASE_4 0xE00560
193
194#define mmTPC0_NRTR_LBW_RANGE_BASE_5 0xE00564
195
196#define mmTPC0_NRTR_LBW_RANGE_BASE_6 0xE00568
197
198#define mmTPC0_NRTR_LBW_RANGE_BASE_7 0xE0056C
199
200#define mmTPC0_NRTR_LBW_RANGE_BASE_8 0xE00570
201
202#define mmTPC0_NRTR_LBW_RANGE_BASE_9 0xE00574
203
204#define mmTPC0_NRTR_LBW_RANGE_BASE_10 0xE00578
205
206#define mmTPC0_NRTR_LBW_RANGE_BASE_11 0xE0057C
207
208#define mmTPC0_NRTR_LBW_RANGE_BASE_12 0xE00580
209
210#define mmTPC0_NRTR_LBW_RANGE_BASE_13 0xE00584
211
212#define mmTPC0_NRTR_LBW_RANGE_BASE_14 0xE00588
213
214#define mmTPC0_NRTR_LBW_RANGE_BASE_15 0xE0058C
215
216#define mmTPC0_NRTR_RGLTR 0xE00590
217
218#define mmTPC0_NRTR_RGLTR_WR_RESULT 0xE00594
219
220#define mmTPC0_NRTR_RGLTR_RD_RESULT 0xE00598
221
222#define mmTPC0_NRTR_SCRAMB_EN 0xE00600
223
224#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604
225
226#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
new file mode 100644
index 000000000000..80d97ee3d8d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
@@ -0,0 +1,465 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_QM_MASKS_H_
14#define ASIC_REG_TPC0_QM_MASKS_H_
15
16/*
17 *****************************************
18 * TPC0_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22/* TPC0_QM_GLBL_CFG0 */
23#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
24#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0x1
25#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 1
26#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x2
27#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 2
28#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x4
29#define TPC0_QM_GLBL_CFG0_DMA_EN_SHIFT 3
30#define TPC0_QM_GLBL_CFG0_DMA_EN_MASK 0x8
31
32/* TPC0_QM_GLBL_CFG1 */
33#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
34#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0x1
35#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 1
36#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x2
37#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 2
38#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x4
39#define TPC0_QM_GLBL_CFG1_DMA_STOP_SHIFT 3
40#define TPC0_QM_GLBL_CFG1_DMA_STOP_MASK 0x8
41#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8
42#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100
43#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9
44#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200
45#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10
46#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400
47#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11
48#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800
49
50/* TPC0_QM_GLBL_PROT */
51#define TPC0_QM_GLBL_PROT_PQF_PROT_SHIFT 0
52#define TPC0_QM_GLBL_PROT_PQF_PROT_MASK 0x1
53#define TPC0_QM_GLBL_PROT_CQF_PROT_SHIFT 1
54#define TPC0_QM_GLBL_PROT_CQF_PROT_MASK 0x2
55#define TPC0_QM_GLBL_PROT_CP_PROT_SHIFT 2
56#define TPC0_QM_GLBL_PROT_CP_PROT_MASK 0x4
57#define TPC0_QM_GLBL_PROT_DMA_PROT_SHIFT 3
58#define TPC0_QM_GLBL_PROT_DMA_PROT_MASK 0x8
59#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
60#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
61#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
62#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
63#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6
64#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40
65#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
66#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
67
68/* TPC0_QM_GLBL_ERR_CFG */
69#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
70#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
71#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
72#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
73#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
74#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
75#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
76#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
77#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
78#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
79#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
80#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
81#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
82#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
83#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
84#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
85#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
86#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
87#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
88#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
89#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
90#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
91#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
92#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
93
94/* TPC0_QM_GLBL_ERR_ADDR_LO */
95#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
96#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
97
98/* TPC0_QM_GLBL_ERR_ADDR_HI */
99#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
100#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
101
102/* TPC0_QM_GLBL_ERR_WDATA */
103#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
104#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
105
106/* TPC0_QM_GLBL_SECURE_PROPS */
107#define TPC0_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0
108#define TPC0_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
109#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10
110#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400
111
112/* TPC0_QM_GLBL_NON_SECURE_PROPS */
113#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
114#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
115#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
116#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
117
118/* TPC0_QM_GLBL_STS0 */
119#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
120#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0x1
121#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 1
122#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x2
123#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 2
124#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x4
125#define TPC0_QM_GLBL_STS0_DMA_IDLE_SHIFT 3
126#define TPC0_QM_GLBL_STS0_DMA_IDLE_MASK 0x8
127#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4
128#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10
129#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5
130#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20
131#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6
132#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40
133#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7
134#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80
135
136/* TPC0_QM_GLBL_STS1 */
137#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
138#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
139#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
140#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
141#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
142#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
143#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
144#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
145#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
146#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
147#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
148#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
149#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8
150#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100
151#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9
152#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200
153#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
154#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
155#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
156#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
157
158/* TPC0_QM_PQ_BASE_LO */
159#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0
160#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
161
162/* TPC0_QM_PQ_BASE_HI */
163#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0
164#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
165
166/* TPC0_QM_PQ_SIZE */
167#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0
168#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
169
170/* TPC0_QM_PQ_PI */
171#define TPC0_QM_PQ_PI_VAL_SHIFT 0
172#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
173
174/* TPC0_QM_PQ_CI */
175#define TPC0_QM_PQ_CI_VAL_SHIFT 0
176#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
177
178/* TPC0_QM_PQ_CFG0 */
179#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0
180#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1
181
182/* TPC0_QM_PQ_CFG1 */
183#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
184#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
185#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
186#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
187
188/* TPC0_QM_PQ_ARUSER */
189#define TPC0_QM_PQ_ARUSER_NOSNOOP_SHIFT 0
190#define TPC0_QM_PQ_ARUSER_NOSNOOP_MASK 0x1
191#define TPC0_QM_PQ_ARUSER_WORD_SHIFT 1
192#define TPC0_QM_PQ_ARUSER_WORD_MASK 0x2
193
194/* TPC0_QM_PQ_PUSH0 */
195#define TPC0_QM_PQ_PUSH0_PTR_LO_SHIFT 0
196#define TPC0_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
197
198/* TPC0_QM_PQ_PUSH1 */
199#define TPC0_QM_PQ_PUSH1_PTR_HI_SHIFT 0
200#define TPC0_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
201
202/* TPC0_QM_PQ_PUSH2 */
203#define TPC0_QM_PQ_PUSH2_TSIZE_SHIFT 0
204#define TPC0_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
205
206/* TPC0_QM_PQ_PUSH3 */
207#define TPC0_QM_PQ_PUSH3_RPT_SHIFT 0
208#define TPC0_QM_PQ_PUSH3_RPT_MASK 0xFFFF
209#define TPC0_QM_PQ_PUSH3_CTL_SHIFT 16
210#define TPC0_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000
211
212/* TPC0_QM_PQ_STS0 */
213#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
214#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
215#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
216#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
217
218/* TPC0_QM_PQ_STS1 */
219#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
220#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
221#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
222#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
223#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
224#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
225
226/* TPC0_QM_PQ_RD_RATE_LIM_EN */
227#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
228#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
229
230/* TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN */
231#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
232#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
233
234/* TPC0_QM_PQ_RD_RATE_LIM_SAT */
235#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
236#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
237
238/* TPC0_QM_PQ_RD_RATE_LIM_TOUT */
239#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
240#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
241
242/* TPC0_QM_CQ_CFG0 */
243#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0
244#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1
245
246/* TPC0_QM_CQ_CFG1 */
247#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
248#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
249#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
250#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
251
252/* TPC0_QM_CQ_ARUSER */
253#define TPC0_QM_CQ_ARUSER_NOSNOOP_SHIFT 0
254#define TPC0_QM_CQ_ARUSER_NOSNOOP_MASK 0x1
255#define TPC0_QM_CQ_ARUSER_WORD_SHIFT 1
256#define TPC0_QM_CQ_ARUSER_WORD_MASK 0x2
257
258/* TPC0_QM_CQ_PTR_LO */
259#define TPC0_QM_CQ_PTR_LO_VAL_SHIFT 0
260#define TPC0_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
261
262/* TPC0_QM_CQ_PTR_HI */
263#define TPC0_QM_CQ_PTR_HI_VAL_SHIFT 0
264#define TPC0_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
265
266/* TPC0_QM_CQ_TSIZE */
267#define TPC0_QM_CQ_TSIZE_VAL_SHIFT 0
268#define TPC0_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
269
270/* TPC0_QM_CQ_CTL */
271#define TPC0_QM_CQ_CTL_RPT_SHIFT 0
272#define TPC0_QM_CQ_CTL_RPT_MASK 0xFFFF
273#define TPC0_QM_CQ_CTL_CTL_SHIFT 16
274#define TPC0_QM_CQ_CTL_CTL_MASK 0xFFFF0000
275
276/* TPC0_QM_CQ_PTR_LO_STS */
277#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
278#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
279
280/* TPC0_QM_CQ_PTR_HI_STS */
281#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
282#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
283
284/* TPC0_QM_CQ_TSIZE_STS */
285#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
286#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
287
288/* TPC0_QM_CQ_CTL_STS */
289#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0
290#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
291#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16
292#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
293
294/* TPC0_QM_CQ_STS0 */
295#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
296#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
297#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
298#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
299
300/* TPC0_QM_CQ_STS1 */
301#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
302#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
303#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
304#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
305#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
306#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
307
308/* TPC0_QM_CQ_RD_RATE_LIM_EN */
309#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
310#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
311
312/* TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN */
313#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
314#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
315
316/* TPC0_QM_CQ_RD_RATE_LIM_SAT */
317#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
318#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
319
320/* TPC0_QM_CQ_RD_RATE_LIM_TOUT */
321#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
322#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
323
324/* TPC0_QM_CQ_IFIFO_CNT */
325#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
326#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
327
328/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */
329#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
330#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
331
332/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */
333#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
334#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
335
336/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */
337#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
338#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
339
340/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */
341#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
342#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
343
344/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */
345#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
346#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
347
348/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */
349#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
350#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
351
352/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */
353#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
354#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
355
356/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */
357#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
358#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
359
360/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */
361#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
362#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
363
364/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
365#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
366#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
367
368/* TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET */
369#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
370#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
371
372/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
373#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
374#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
375
376/* TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET */
377#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
378#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
379
380/* TPC0_QM_CP_LDMA_COMMIT_OFFSET */
381#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
382#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
383
384/* TPC0_QM_CP_FENCE0_RDATA */
385#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
386#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
387
388/* TPC0_QM_CP_FENCE1_RDATA */
389#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
390#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
391
392/* TPC0_QM_CP_FENCE2_RDATA */
393#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
394#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
395
396/* TPC0_QM_CP_FENCE3_RDATA */
397#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
398#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
399
400/* TPC0_QM_CP_FENCE0_CNT */
401#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
402#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0xFF
403
404/* TPC0_QM_CP_FENCE1_CNT */
405#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
406#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0xFF
407
408/* TPC0_QM_CP_FENCE2_CNT */
409#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
410#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0xFF
411
412/* TPC0_QM_CP_FENCE3_CNT */
413#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
414#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0xFF
415
416/* TPC0_QM_CP_STS */
417#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
418#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
419#define TPC0_QM_CP_STS_ERDY_SHIFT 16
420#define TPC0_QM_CP_STS_ERDY_MASK 0x10000
421#define TPC0_QM_CP_STS_RRDY_SHIFT 17
422#define TPC0_QM_CP_STS_RRDY_MASK 0x20000
423#define TPC0_QM_CP_STS_MRDY_SHIFT 18
424#define TPC0_QM_CP_STS_MRDY_MASK 0x40000
425#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19
426#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000
427#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20
428#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000
429#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
430#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
431
432/* TPC0_QM_CP_CURRENT_INST_LO */
433#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
434#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
435
436/* TPC0_QM_CP_CURRENT_INST_HI */
437#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
438#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
439
440/* TPC0_QM_CP_BARRIER_CFG */
441#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
442#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
443
444/* TPC0_QM_CP_DBG_0 */
445#define TPC0_QM_CP_DBG_0_VAL_SHIFT 0
446#define TPC0_QM_CP_DBG_0_VAL_MASK 0xFF
447
448/* TPC0_QM_PQ_BUF_ADDR */
449#define TPC0_QM_PQ_BUF_ADDR_VAL_SHIFT 0
450#define TPC0_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
451
452/* TPC0_QM_PQ_BUF_RDATA */
453#define TPC0_QM_PQ_BUF_RDATA_VAL_SHIFT 0
454#define TPC0_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
455
456/* TPC0_QM_CQ_BUF_ADDR */
457#define TPC0_QM_CQ_BUF_ADDR_VAL_SHIFT 0
458#define TPC0_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
459
460/* TPC0_QM_CQ_BUF_RDATA */
461#define TPC0_QM_CQ_BUF_RDATA_VAL_SHIFT 0
462#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
463
464#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
465
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
new file mode 100644
index 000000000000..7552d4ba61fe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC0_QM_REGS_H_
14#define ASIC_REG_TPC0_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC0_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC0_QM_GLBL_CFG0 0xE08000
23
24#define mmTPC0_QM_GLBL_CFG1 0xE08004
25
26#define mmTPC0_QM_GLBL_PROT 0xE08008
27
28#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C
29
30#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08010
31
32#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08014
33
34#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08018
35
36#define mmTPC0_QM_GLBL_SECURE_PROPS 0xE0801C
37
38#define mmTPC0_QM_GLBL_NON_SECURE_PROPS 0xE08020
39
40#define mmTPC0_QM_GLBL_STS0 0xE08024
41
42#define mmTPC0_QM_GLBL_STS1 0xE08028
43
44#define mmTPC0_QM_PQ_BASE_LO 0xE08060
45
46#define mmTPC0_QM_PQ_BASE_HI 0xE08064
47
48#define mmTPC0_QM_PQ_SIZE 0xE08068
49
50#define mmTPC0_QM_PQ_PI 0xE0806C
51
52#define mmTPC0_QM_PQ_CI 0xE08070
53
54#define mmTPC0_QM_PQ_CFG0 0xE08074
55
56#define mmTPC0_QM_PQ_CFG1 0xE08078
57
58#define mmTPC0_QM_PQ_ARUSER 0xE0807C
59
60#define mmTPC0_QM_PQ_PUSH0 0xE08080
61
62#define mmTPC0_QM_PQ_PUSH1 0xE08084
63
64#define mmTPC0_QM_PQ_PUSH2 0xE08088
65
66#define mmTPC0_QM_PQ_PUSH3 0xE0808C
67
68#define mmTPC0_QM_PQ_STS0 0xE08090
69
70#define mmTPC0_QM_PQ_STS1 0xE08094
71
72#define mmTPC0_QM_PQ_RD_RATE_LIM_EN 0xE080A0
73
74#define mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE080A4
75
76#define mmTPC0_QM_PQ_RD_RATE_LIM_SAT 0xE080A8
77
78#define mmTPC0_QM_PQ_RD_RATE_LIM_TOUT 0xE080AC
79
80#define mmTPC0_QM_CQ_CFG0 0xE080B0
81
82#define mmTPC0_QM_CQ_CFG1 0xE080B4
83
84#define mmTPC0_QM_CQ_ARUSER 0xE080B8
85
86#define mmTPC0_QM_CQ_PTR_LO 0xE080C0
87
88#define mmTPC0_QM_CQ_PTR_HI 0xE080C4
89
90#define mmTPC0_QM_CQ_TSIZE 0xE080C8
91
92#define mmTPC0_QM_CQ_CTL 0xE080CC
93
94#define mmTPC0_QM_CQ_PTR_LO_STS 0xE080D4
95
96#define mmTPC0_QM_CQ_PTR_HI_STS 0xE080D8
97
98#define mmTPC0_QM_CQ_TSIZE_STS 0xE080DC
99
100#define mmTPC0_QM_CQ_CTL_STS 0xE080E0
101
102#define mmTPC0_QM_CQ_STS0 0xE080E4
103
104#define mmTPC0_QM_CQ_STS1 0xE080E8
105
106#define mmTPC0_QM_CQ_RD_RATE_LIM_EN 0xE080F0
107
108#define mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE080F4
109
110#define mmTPC0_QM_CQ_RD_RATE_LIM_SAT 0xE080F8
111
112#define mmTPC0_QM_CQ_RD_RATE_LIM_TOUT 0xE080FC
113
114#define mmTPC0_QM_CQ_IFIFO_CNT 0xE08108
115
116#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO 0xE08120
117
118#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI 0xE08124
119
120#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO 0xE08128
121
122#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI 0xE0812C
123
124#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO 0xE08130
125
126#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI 0xE08134
127
128#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO 0xE08138
129
130#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI 0xE0813C
131
132#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET 0xE08140
133
134#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE08144
135
136#define mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE08148
137
138#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE0814C
139
140#define mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE08150
141
142#define mmTPC0_QM_CP_LDMA_COMMIT_OFFSET 0xE08154
143
144#define mmTPC0_QM_CP_FENCE0_RDATA 0xE08158
145
146#define mmTPC0_QM_CP_FENCE1_RDATA 0xE0815C
147
148#define mmTPC0_QM_CP_FENCE2_RDATA 0xE08160
149
150#define mmTPC0_QM_CP_FENCE3_RDATA 0xE08164
151
152#define mmTPC0_QM_CP_FENCE0_CNT 0xE08168
153
154#define mmTPC0_QM_CP_FENCE1_CNT 0xE0816C
155
156#define mmTPC0_QM_CP_FENCE2_CNT 0xE08170
157
158#define mmTPC0_QM_CP_FENCE3_CNT 0xE08174
159
160#define mmTPC0_QM_CP_STS 0xE08178
161
162#define mmTPC0_QM_CP_CURRENT_INST_LO 0xE0817C
163
164#define mmTPC0_QM_CP_CURRENT_INST_HI 0xE08180
165
166#define mmTPC0_QM_CP_BARRIER_CFG 0xE08184
167
168#define mmTPC0_QM_CP_DBG_0 0xE08188
169
170#define mmTPC0_QM_PQ_BUF_ADDR 0xE08300
171
172#define mmTPC0_QM_PQ_BUF_RDATA 0xE08304
173
174#define mmTPC0_QM_CQ_BUF_ADDR 0xE08308
175
176#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C
177
178#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
new file mode 100644
index 000000000000..19894413474a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC1_CFG_REGS_H_
14#define ASIC_REG_TPC1_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC1_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400
23
24#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404
25
26#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408
27
28#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C
29
30#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410
31
32#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414
33
34#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE46418
35
36#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE4641C
37
38#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE46420
39
40#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE46424
41
42#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46428
43
44#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE4642C
45
46#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE46430
47
48#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46434
49
50#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE46438
51
52#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE4643C
53
54#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46440
55
56#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46444
57
58#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE46448
59
60#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE4644C
61
62#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE46450
63
64#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46454
65
66#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46458
67
68#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE4645C
69
70#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE46460
71
72#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE46464
73
74#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46468
75
76#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE4646C
77
78#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE46470
79
80#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46474
81
82#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE46478
83
84#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE4647C
85
86#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46480
87
88#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46484
89
90#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE46488
91
92#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE4648C
93
94#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE46490
95
96#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE46494
97
98#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46498
99
100#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE4649C
101
102#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE464A0
103
104#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE464A4
105
106#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE464A8
107
108#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE464AC
109
110#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE464B0
111
112#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE464B4
113
114#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE464B8
115
116#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE464BC
117
118#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE464C0
119
120#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE464C4
121
122#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE464C8
123
124#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE464CC
125
126#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE464D0
127
128#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE464D4
129
130#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464D8
131
132#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464DC
133
134#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE464E0
135
136#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464E4
137
138#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464E8
139
140#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464EC
141
142#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464F0
143
144#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464F4
145
146#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464F8
147
148#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE464FC
149
150#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE46500
151
152#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE46504
153
154#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE46508
155
156#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE4650C
157
158#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE46510
159
160#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE46514
161
162#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE46518
163
164#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE4651C
165
166#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE46520
167
168#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE46524
169
170#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE46528
171
172#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE4652C
173
174#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE46530
175
176#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE46534
177
178#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE46538
179
180#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE4653C
181
182#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE46540
183
184#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE46544
185
186#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE46548
187
188#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE4654C
189
190#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE46550
191
192#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE46554
193
194#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46558
195
196#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE4655C
197
198#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE46560
199
200#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46564
201
202#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE46568
203
204#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE4656C
205
206#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46570
207
208#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46574
209
210#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE46578
211
212#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE4657C
213
214#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE46580
215
216#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46584
217
218#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46588
219
220#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE4658C
221
222#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE46590
223
224#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE46594
225
226#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46598
227
228#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE4659C
229
230#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE465A0
231
232#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE465A4
233
234#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE465A8
235
236#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE465AC
237
238#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE465B0
239
240#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE465B4
241
242#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE465B8
243
244#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE465BC
245
246#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE465C0
247
248#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE465C4
249
250#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE465C8
251
252#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE465CC
253
254#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE465D0
255
256#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE465D4
257
258#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE465D8
259
260#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE465DC
261
262#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE465E0
263
264#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE465E4
265
266#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE465E8
267
268#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE465EC
269
270#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE465F0
271
272#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE465F4
273
274#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE465F8
275
276#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE465FC
277
278#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE46600
279
280#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE46604
281
282#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46608
283
284#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE4660C
285
286#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE46610
287
288#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46614
289
290#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE46618
291
292#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE4661C
293
294#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46620
295
296#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46624
297
298#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE46628
299
300#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE4662C
301
302#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE46630
303
304#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE46634
305
306#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE46638
307
308#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE4663C
309
310#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE46640
311
312#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE46644
313
314#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE46648
315
316#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE4664C
317
318#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE46650
319
320#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE46654
321
322#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE46658
323
324#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE4665C
325
326#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46660
327
328#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE46664
329
330#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46668
331
332#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE4666C
333
334#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46670
335
336#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE46674
337
338#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE46678
339
340#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE4667C
341
342#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE46680
343
344#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE46684
345
346#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE46688
347
348#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE4668C
349
350#define mmTPC1_CFG_KERNEL_SRF_0 0xE46690
351
352#define mmTPC1_CFG_KERNEL_SRF_1 0xE46694
353
354#define mmTPC1_CFG_KERNEL_SRF_2 0xE46698
355
356#define mmTPC1_CFG_KERNEL_SRF_3 0xE4669C
357
358#define mmTPC1_CFG_KERNEL_SRF_4 0xE466A0
359
360#define mmTPC1_CFG_KERNEL_SRF_5 0xE466A4
361
362#define mmTPC1_CFG_KERNEL_SRF_6 0xE466A8
363
364#define mmTPC1_CFG_KERNEL_SRF_7 0xE466AC
365
366#define mmTPC1_CFG_KERNEL_SRF_8 0xE466B0
367
368#define mmTPC1_CFG_KERNEL_SRF_9 0xE466B4
369
370#define mmTPC1_CFG_KERNEL_SRF_10 0xE466B8
371
372#define mmTPC1_CFG_KERNEL_SRF_11 0xE466BC
373
374#define mmTPC1_CFG_KERNEL_SRF_12 0xE466C0
375
376#define mmTPC1_CFG_KERNEL_SRF_13 0xE466C4
377
378#define mmTPC1_CFG_KERNEL_SRF_14 0xE466C8
379
380#define mmTPC1_CFG_KERNEL_SRF_15 0xE466CC
381
382#define mmTPC1_CFG_KERNEL_SRF_16 0xE466D0
383
384#define mmTPC1_CFG_KERNEL_SRF_17 0xE466D4
385
386#define mmTPC1_CFG_KERNEL_SRF_18 0xE466D8
387
388#define mmTPC1_CFG_KERNEL_SRF_19 0xE466DC
389
390#define mmTPC1_CFG_KERNEL_SRF_20 0xE466E0
391
392#define mmTPC1_CFG_KERNEL_SRF_21 0xE466E4
393
394#define mmTPC1_CFG_KERNEL_SRF_22 0xE466E8
395
396#define mmTPC1_CFG_KERNEL_SRF_23 0xE466EC
397
398#define mmTPC1_CFG_KERNEL_SRF_24 0xE466F0
399
400#define mmTPC1_CFG_KERNEL_SRF_25 0xE466F4
401
402#define mmTPC1_CFG_KERNEL_SRF_26 0xE466F8
403
404#define mmTPC1_CFG_KERNEL_SRF_27 0xE466FC
405
406#define mmTPC1_CFG_KERNEL_SRF_28 0xE46700
407
408#define mmTPC1_CFG_KERNEL_SRF_29 0xE46704
409
410#define mmTPC1_CFG_KERNEL_SRF_30 0xE46708
411
412#define mmTPC1_CFG_KERNEL_SRF_31 0xE4670C
413
414#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE46710
415
416#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46714
417
418#define mmTPC1_CFG_RESERVED_DESC_END 0xE46738
419
420#define mmTPC1_CFG_ROUND_CSR 0xE467FC
421
422#define mmTPC1_CFG_TBUF_BASE_ADDR_LOW 0xE46800
423
424#define mmTPC1_CFG_TBUF_BASE_ADDR_HIGH 0xE46804
425
426#define mmTPC1_CFG_SEMAPHORE 0xE46808
427
428#define mmTPC1_CFG_VFLAGS 0xE4680C
429
430#define mmTPC1_CFG_SFLAGS 0xE46810
431
432#define mmTPC1_CFG_LFSR_POLYNOM 0xE46818
433
434#define mmTPC1_CFG_STATUS 0xE4681C
435
436#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46820
437
438#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46824
439
440#define mmTPC1_CFG_SM_BASE_ADDRESS_LOW 0xE46828
441
442#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4682C
443
444#define mmTPC1_CFG_TPC_CMD 0xE46830
445
446#define mmTPC1_CFG_TPC_EXECUTE 0xE46838
447
448#define mmTPC1_CFG_TPC_STALL 0xE4683C
449
450#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46840
451
452#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46844
453
454#define mmTPC1_CFG_MSS_CONFIG 0xE46854
455
456#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46858
457
458#define mmTPC1_CFG_TPC_INTR_MASK 0xE4685C
459
460#define mmTPC1_CFG_TSB_CONFIG 0xE46860
461
462#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00
463
464#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04
465
466#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08
467
468#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C
469
470#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10
471
472#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14
473
474#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE46A18
475
476#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A1C
477
478#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A20
479
480#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE46A24
481
482#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A28
483
484#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A2C
485
486#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE46A30
487
488#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A34
489
490#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A38
491
492#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE46A3C
493
494#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A40
495
496#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A44
497
498#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE46A48
499
500#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A4C
501
502#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A50
503
504#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A54
505
506#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A58
507
508#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A5C
509
510#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A60
511
512#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE46A64
513
514#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A68
515
516#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A6C
517
518#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE46A70
519
520#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A74
521
522#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A78
523
524#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE46A7C
525
526#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A80
527
528#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A84
529
530#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE46A88
531
532#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A8C
533
534#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A90
535
536#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE46A94
537
538#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A98
539
540#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A9C
541
542#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46AA0
543
544#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46AA4
545
546#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46AA8
547
548#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46AAC
549
550#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE46AB0
551
552#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46AB4
553
554#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46AB8
555
556#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE46ABC
557
558#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46AC0
559
560#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46AC4
561
562#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE46AC8
563
564#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46ACC
565
566#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46AD0
567
568#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE46AD4
569
570#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AD8
571
572#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46ADC
573
574#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE46AE0
575
576#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AE4
577
578#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AE8
579
580#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AEC
581
582#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AF0
583
584#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AF4
585
586#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46AF8
587
588#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE46AFC
589
590#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46B00
591
592#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46B04
593
594#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE46B08
595
596#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46B0C
597
598#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46B10
599
600#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE46B14
601
602#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46B18
603
604#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46B1C
605
606#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE46B20
607
608#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46B24
609
610#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46B28
611
612#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE46B2C
613
614#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46B30
615
616#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46B34
617
618#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46B38
619
620#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46B3C
621
622#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46B40
623
624#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46B44
625
626#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE46B48
627
628#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46B4C
629
630#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46B50
631
632#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE46B54
633
634#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B58
635
636#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B5C
637
638#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE46B60
639
640#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B64
641
642#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B68
643
644#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE46B6C
645
646#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B70
647
648#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B74
649
650#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE46B78
651
652#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B7C
653
654#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B80
655
656#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B84
657
658#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B88
659
660#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B8C
661
662#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B90
663
664#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE46B94
665
666#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B98
667
668#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B9C
669
670#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE46BA0
671
672#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46BA4
673
674#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46BA8
675
676#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE46BAC
677
678#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46BB0
679
680#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46BB4
681
682#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE46BB8
683
684#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46BBC
685
686#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46BC0
687
688#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE46BC4
689
690#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46BC8
691
692#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46BCC
693
694#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46BD0
695
696#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46BD4
697
698#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46BD8
699
700#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46BDC
701
702#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE46BE0
703
704#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46BE4
705
706#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46BE8
707
708#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE46BEC
709
710#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46BF0
711
712#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46BF4
713
714#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE46BF8
715
716#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46BFC
717
718#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46C00
719
720#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE46C04
721
722#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46C08
723
724#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46C0C
725
726#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE46C10
727
728#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46C14
729
730#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46C18
731
732#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46C1C
733
734#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46C20
735
736#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46C24
737
738#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46C28
739
740#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE46C2C
741
742#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46C30
743
744#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46C34
745
746#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE46C38
747
748#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46C3C
749
750#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46C40
751
752#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE46C44
753
754#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46C48
755
756#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46C4C
757
758#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE46C50
759
760#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46C54
761
762#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46C58
763
764#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE46C5C
765
766#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46C60
767
768#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46C64
769
770#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46C68
771
772#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46C6C
773
774#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46C70
775
776#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46C74
777
778#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46C78
779
780#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46C7C
781
782#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46C80
783
784#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46C84
785
786#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46C88
787
788#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46C8C
789
790#define mmTPC1_CFG_QM_SRF_0 0xE46C90
791
792#define mmTPC1_CFG_QM_SRF_1 0xE46C94
793
794#define mmTPC1_CFG_QM_SRF_2 0xE46C98
795
796#define mmTPC1_CFG_QM_SRF_3 0xE46C9C
797
798#define mmTPC1_CFG_QM_SRF_4 0xE46CA0
799
800#define mmTPC1_CFG_QM_SRF_5 0xE46CA4
801
802#define mmTPC1_CFG_QM_SRF_6 0xE46CA8
803
804#define mmTPC1_CFG_QM_SRF_7 0xE46CAC
805
806#define mmTPC1_CFG_QM_SRF_8 0xE46CB0
807
808#define mmTPC1_CFG_QM_SRF_9 0xE46CB4
809
810#define mmTPC1_CFG_QM_SRF_10 0xE46CB8
811
812#define mmTPC1_CFG_QM_SRF_11 0xE46CBC
813
814#define mmTPC1_CFG_QM_SRF_12 0xE46CC0
815
816#define mmTPC1_CFG_QM_SRF_13 0xE46CC4
817
818#define mmTPC1_CFG_QM_SRF_14 0xE46CC8
819
820#define mmTPC1_CFG_QM_SRF_15 0xE46CCC
821
822#define mmTPC1_CFG_QM_SRF_16 0xE46CD0
823
824#define mmTPC1_CFG_QM_SRF_17 0xE46CD4
825
826#define mmTPC1_CFG_QM_SRF_18 0xE46CD8
827
828#define mmTPC1_CFG_QM_SRF_19 0xE46CDC
829
830#define mmTPC1_CFG_QM_SRF_20 0xE46CE0
831
832#define mmTPC1_CFG_QM_SRF_21 0xE46CE4
833
834#define mmTPC1_CFG_QM_SRF_22 0xE46CE8
835
836#define mmTPC1_CFG_QM_SRF_23 0xE46CEC
837
838#define mmTPC1_CFG_QM_SRF_24 0xE46CF0
839
840#define mmTPC1_CFG_QM_SRF_25 0xE46CF4
841
842#define mmTPC1_CFG_QM_SRF_26 0xE46CF8
843
844#define mmTPC1_CFG_QM_SRF_27 0xE46CFC
845
846#define mmTPC1_CFG_QM_SRF_28 0xE46D00
847
848#define mmTPC1_CFG_QM_SRF_29 0xE46D04
849
850#define mmTPC1_CFG_QM_SRF_30 0xE46D08
851
852#define mmTPC1_CFG_QM_SRF_31 0xE46D0C
853
854#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46D10
855
856#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D14
857
858#define mmTPC1_CFG_ARUSER 0xE46D18
859
860#define mmTPC1_CFG_AWUSER 0xE46D1C
861
862#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE46E00
863
864#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE46E04
865
866#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE46E08
867
868#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE46E0C
869
870#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE46E10
871
872#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE46E14
873
874#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE46E18
875
876#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE46E1C
877
878#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE46E20
879
880#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE46E24
881
882#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE46E28
883
884#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C
885
886#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
new file mode 100644
index 000000000000..9099ebd7ab23
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC1_CMDQ_REGS_H_
14#define ASIC_REG_TPC1_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC1_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC1_CMDQ_GLBL_CFG0 0xE49000
23
24#define mmTPC1_CMDQ_GLBL_CFG1 0xE49004
25
26#define mmTPC1_CMDQ_GLBL_PROT 0xE49008
27
28#define mmTPC1_CMDQ_GLBL_ERR_CFG 0xE4900C
29
30#define mmTPC1_CMDQ_GLBL_ERR_ADDR_LO 0xE49010
31
32#define mmTPC1_CMDQ_GLBL_ERR_ADDR_HI 0xE49014
33
34#define mmTPC1_CMDQ_GLBL_ERR_WDATA 0xE49018
35
36#define mmTPC1_CMDQ_GLBL_SECURE_PROPS 0xE4901C
37
38#define mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS 0xE49020
39
40#define mmTPC1_CMDQ_GLBL_STS0 0xE49024
41
42#define mmTPC1_CMDQ_GLBL_STS1 0xE49028
43
44#define mmTPC1_CMDQ_CQ_CFG0 0xE490B0
45
46#define mmTPC1_CMDQ_CQ_CFG1 0xE490B4
47
48#define mmTPC1_CMDQ_CQ_ARUSER 0xE490B8
49
50#define mmTPC1_CMDQ_CQ_PTR_LO 0xE490C0
51
52#define mmTPC1_CMDQ_CQ_PTR_HI 0xE490C4
53
54#define mmTPC1_CMDQ_CQ_TSIZE 0xE490C8
55
56#define mmTPC1_CMDQ_CQ_CTL 0xE490CC
57
58#define mmTPC1_CMDQ_CQ_PTR_LO_STS 0xE490D4
59
60#define mmTPC1_CMDQ_CQ_PTR_HI_STS 0xE490D8
61
62#define mmTPC1_CMDQ_CQ_TSIZE_STS 0xE490DC
63
64#define mmTPC1_CMDQ_CQ_CTL_STS 0xE490E0
65
66#define mmTPC1_CMDQ_CQ_STS0 0xE490E4
67
68#define mmTPC1_CMDQ_CQ_STS1 0xE490E8
69
70#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN 0xE490F0
71
72#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE490F4
73
74#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT 0xE490F8
75
76#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE490FC
77
78#define mmTPC1_CMDQ_CQ_IFIFO_CNT 0xE49108
79
80#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE49120
81
82#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE49124
83
84#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE49128
85
86#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE4912C
87
88#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE49130
89
90#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE49134
91
92#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE49138
93
94#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE4913C
95
96#define mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE49140
97
98#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE49144
99
100#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE49148
101
102#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE4914C
103
104#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE49150
105
106#define mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE49154
107
108#define mmTPC1_CMDQ_CP_FENCE0_RDATA 0xE49158
109
110#define mmTPC1_CMDQ_CP_FENCE1_RDATA 0xE4915C
111
112#define mmTPC1_CMDQ_CP_FENCE2_RDATA 0xE49160
113
114#define mmTPC1_CMDQ_CP_FENCE3_RDATA 0xE49164
115
116#define mmTPC1_CMDQ_CP_FENCE0_CNT 0xE49168
117
118#define mmTPC1_CMDQ_CP_FENCE1_CNT 0xE4916C
119
120#define mmTPC1_CMDQ_CP_FENCE2_CNT 0xE49170
121
122#define mmTPC1_CMDQ_CP_FENCE3_CNT 0xE49174
123
124#define mmTPC1_CMDQ_CP_STS 0xE49178
125
126#define mmTPC1_CMDQ_CP_CURRENT_INST_LO 0xE4917C
127
128#define mmTPC1_CMDQ_CP_CURRENT_INST_HI 0xE49180
129
130#define mmTPC1_CMDQ_CP_BARRIER_CFG 0xE49184
131
132#define mmTPC1_CMDQ_CP_DBG_0 0xE49188
133
134#define mmTPC1_CMDQ_CQ_BUF_ADDR 0xE49308
135
136#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C
137
138#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
new file mode 100644
index 000000000000..bc8b9a10391f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC1_QM_REGS_H_
14#define ASIC_REG_TPC1_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC1_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC1_QM_GLBL_CFG0 0xE48000
23
24#define mmTPC1_QM_GLBL_CFG1 0xE48004
25
26#define mmTPC1_QM_GLBL_PROT 0xE48008
27
28#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C
29
30#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48010
31
32#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48014
33
34#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48018
35
36#define mmTPC1_QM_GLBL_SECURE_PROPS 0xE4801C
37
38#define mmTPC1_QM_GLBL_NON_SECURE_PROPS 0xE48020
39
40#define mmTPC1_QM_GLBL_STS0 0xE48024
41
42#define mmTPC1_QM_GLBL_STS1 0xE48028
43
44#define mmTPC1_QM_PQ_BASE_LO 0xE48060
45
46#define mmTPC1_QM_PQ_BASE_HI 0xE48064
47
48#define mmTPC1_QM_PQ_SIZE 0xE48068
49
50#define mmTPC1_QM_PQ_PI 0xE4806C
51
52#define mmTPC1_QM_PQ_CI 0xE48070
53
54#define mmTPC1_QM_PQ_CFG0 0xE48074
55
56#define mmTPC1_QM_PQ_CFG1 0xE48078
57
58#define mmTPC1_QM_PQ_ARUSER 0xE4807C
59
60#define mmTPC1_QM_PQ_PUSH0 0xE48080
61
62#define mmTPC1_QM_PQ_PUSH1 0xE48084
63
64#define mmTPC1_QM_PQ_PUSH2 0xE48088
65
66#define mmTPC1_QM_PQ_PUSH3 0xE4808C
67
68#define mmTPC1_QM_PQ_STS0 0xE48090
69
70#define mmTPC1_QM_PQ_STS1 0xE48094
71
72#define mmTPC1_QM_PQ_RD_RATE_LIM_EN 0xE480A0
73
74#define mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE480A4
75
76#define mmTPC1_QM_PQ_RD_RATE_LIM_SAT 0xE480A8
77
78#define mmTPC1_QM_PQ_RD_RATE_LIM_TOUT 0xE480AC
79
80#define mmTPC1_QM_CQ_CFG0 0xE480B0
81
82#define mmTPC1_QM_CQ_CFG1 0xE480B4
83
84#define mmTPC1_QM_CQ_ARUSER 0xE480B8
85
86#define mmTPC1_QM_CQ_PTR_LO 0xE480C0
87
88#define mmTPC1_QM_CQ_PTR_HI 0xE480C4
89
90#define mmTPC1_QM_CQ_TSIZE 0xE480C8
91
92#define mmTPC1_QM_CQ_CTL 0xE480CC
93
94#define mmTPC1_QM_CQ_PTR_LO_STS 0xE480D4
95
96#define mmTPC1_QM_CQ_PTR_HI_STS 0xE480D8
97
98#define mmTPC1_QM_CQ_TSIZE_STS 0xE480DC
99
100#define mmTPC1_QM_CQ_CTL_STS 0xE480E0
101
102#define mmTPC1_QM_CQ_STS0 0xE480E4
103
104#define mmTPC1_QM_CQ_STS1 0xE480E8
105
106#define mmTPC1_QM_CQ_RD_RATE_LIM_EN 0xE480F0
107
108#define mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE480F4
109
110#define mmTPC1_QM_CQ_RD_RATE_LIM_SAT 0xE480F8
111
112#define mmTPC1_QM_CQ_RD_RATE_LIM_TOUT 0xE480FC
113
114#define mmTPC1_QM_CQ_IFIFO_CNT 0xE48108
115
116#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO 0xE48120
117
118#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI 0xE48124
119
120#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO 0xE48128
121
122#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI 0xE4812C
123
124#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO 0xE48130
125
126#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI 0xE48134
127
128#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO 0xE48138
129
130#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI 0xE4813C
131
132#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET 0xE48140
133
134#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE48144
135
136#define mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE48148
137
138#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE4814C
139
140#define mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE48150
141
142#define mmTPC1_QM_CP_LDMA_COMMIT_OFFSET 0xE48154
143
144#define mmTPC1_QM_CP_FENCE0_RDATA 0xE48158
145
146#define mmTPC1_QM_CP_FENCE1_RDATA 0xE4815C
147
148#define mmTPC1_QM_CP_FENCE2_RDATA 0xE48160
149
150#define mmTPC1_QM_CP_FENCE3_RDATA 0xE48164
151
152#define mmTPC1_QM_CP_FENCE0_CNT 0xE48168
153
154#define mmTPC1_QM_CP_FENCE1_CNT 0xE4816C
155
156#define mmTPC1_QM_CP_FENCE2_CNT 0xE48170
157
158#define mmTPC1_QM_CP_FENCE3_CNT 0xE48174
159
160#define mmTPC1_QM_CP_STS 0xE48178
161
162#define mmTPC1_QM_CP_CURRENT_INST_LO 0xE4817C
163
164#define mmTPC1_QM_CP_CURRENT_INST_HI 0xE48180
165
166#define mmTPC1_QM_CP_BARRIER_CFG 0xE48184
167
168#define mmTPC1_QM_CP_DBG_0 0xE48188
169
170#define mmTPC1_QM_PQ_BUF_ADDR 0xE48300
171
172#define mmTPC1_QM_PQ_BUF_RDATA 0xE48304
173
174#define mmTPC1_QM_CQ_BUF_ADDR 0xE48308
175
176#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C
177
178#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
new file mode 100644
index 000000000000..ae267f8f457e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
@@ -0,0 +1,323 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC1_RTR_REGS_H_
14#define ASIC_REG_TPC1_RTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC1_RTR (Prototype: TPC_RTR)
19 *****************************************
20 */
21
22#define mmTPC1_RTR_HBW_RD_RQ_E_ARB 0xE40100
23
24#define mmTPC1_RTR_HBW_RD_RQ_W_ARB 0xE40104
25
26#define mmTPC1_RTR_HBW_RD_RQ_N_ARB 0xE40108
27
28#define mmTPC1_RTR_HBW_RD_RQ_S_ARB 0xE4010C
29
30#define mmTPC1_RTR_HBW_RD_RQ_L_ARB 0xE40110
31
32#define mmTPC1_RTR_HBW_E_ARB_MAX 0xE40120
33
34#define mmTPC1_RTR_HBW_W_ARB_MAX 0xE40124
35
36#define mmTPC1_RTR_HBW_N_ARB_MAX 0xE40128
37
38#define mmTPC1_RTR_HBW_S_ARB_MAX 0xE4012C
39
40#define mmTPC1_RTR_HBW_L_ARB_MAX 0xE40130
41
42#define mmTPC1_RTR_HBW_RD_RS_E_ARB 0xE40140
43
44#define mmTPC1_RTR_HBW_RD_RS_W_ARB 0xE40144
45
46#define mmTPC1_RTR_HBW_RD_RS_N_ARB 0xE40148
47
48#define mmTPC1_RTR_HBW_RD_RS_S_ARB 0xE4014C
49
50#define mmTPC1_RTR_HBW_RD_RS_L_ARB 0xE40150
51
52#define mmTPC1_RTR_HBW_WR_RQ_E_ARB 0xE40170
53
54#define mmTPC1_RTR_HBW_WR_RQ_W_ARB 0xE40174
55
56#define mmTPC1_RTR_HBW_WR_RQ_N_ARB 0xE40178
57
58#define mmTPC1_RTR_HBW_WR_RQ_S_ARB 0xE4017C
59
60#define mmTPC1_RTR_HBW_WR_RQ_L_ARB 0xE40180
61
62#define mmTPC1_RTR_HBW_WR_RS_E_ARB 0xE40190
63
64#define mmTPC1_RTR_HBW_WR_RS_W_ARB 0xE40194
65
66#define mmTPC1_RTR_HBW_WR_RS_N_ARB 0xE40198
67
68#define mmTPC1_RTR_HBW_WR_RS_S_ARB 0xE4019C
69
70#define mmTPC1_RTR_HBW_WR_RS_L_ARB 0xE401A0
71
72#define mmTPC1_RTR_LBW_RD_RQ_E_ARB 0xE40200
73
74#define mmTPC1_RTR_LBW_RD_RQ_W_ARB 0xE40204
75
76#define mmTPC1_RTR_LBW_RD_RQ_N_ARB 0xE40208
77
78#define mmTPC1_RTR_LBW_RD_RQ_S_ARB 0xE4020C
79
80#define mmTPC1_RTR_LBW_RD_RQ_L_ARB 0xE40210
81
82#define mmTPC1_RTR_LBW_E_ARB_MAX 0xE40220
83
84#define mmTPC1_RTR_LBW_W_ARB_MAX 0xE40224
85
86#define mmTPC1_RTR_LBW_N_ARB_MAX 0xE40228
87
88#define mmTPC1_RTR_LBW_S_ARB_MAX 0xE4022C
89
90#define mmTPC1_RTR_LBW_L_ARB_MAX 0xE40230
91
92#define mmTPC1_RTR_LBW_RD_RS_E_ARB 0xE40250
93
94#define mmTPC1_RTR_LBW_RD_RS_W_ARB 0xE40254
95
96#define mmTPC1_RTR_LBW_RD_RS_N_ARB 0xE40258
97
98#define mmTPC1_RTR_LBW_RD_RS_S_ARB 0xE4025C
99
100#define mmTPC1_RTR_LBW_RD_RS_L_ARB 0xE40260
101
102#define mmTPC1_RTR_LBW_WR_RQ_E_ARB 0xE40270
103
104#define mmTPC1_RTR_LBW_WR_RQ_W_ARB 0xE40274
105
106#define mmTPC1_RTR_LBW_WR_RQ_N_ARB 0xE40278
107
108#define mmTPC1_RTR_LBW_WR_RQ_S_ARB 0xE4027C
109
110#define mmTPC1_RTR_LBW_WR_RQ_L_ARB 0xE40280
111
112#define mmTPC1_RTR_LBW_WR_RS_E_ARB 0xE40290
113
114#define mmTPC1_RTR_LBW_WR_RS_W_ARB 0xE40294
115
116#define mmTPC1_RTR_LBW_WR_RS_N_ARB 0xE40298
117
118#define mmTPC1_RTR_LBW_WR_RS_S_ARB 0xE4029C
119
120#define mmTPC1_RTR_LBW_WR_RS_L_ARB 0xE402A0
121
122#define mmTPC1_RTR_DBG_E_ARB 0xE40300
123
124#define mmTPC1_RTR_DBG_W_ARB 0xE40304
125
126#define mmTPC1_RTR_DBG_N_ARB 0xE40308
127
128#define mmTPC1_RTR_DBG_S_ARB 0xE4030C
129
130#define mmTPC1_RTR_DBG_L_ARB 0xE40310
131
132#define mmTPC1_RTR_DBG_E_ARB_MAX 0xE40320
133
134#define mmTPC1_RTR_DBG_W_ARB_MAX 0xE40324
135
136#define mmTPC1_RTR_DBG_N_ARB_MAX 0xE40328
137
138#define mmTPC1_RTR_DBG_S_ARB_MAX 0xE4032C
139
140#define mmTPC1_RTR_DBG_L_ARB_MAX 0xE40330
141
142#define mmTPC1_RTR_SPLIT_COEF_0 0xE40400
143
144#define mmTPC1_RTR_SPLIT_COEF_1 0xE40404
145
146#define mmTPC1_RTR_SPLIT_COEF_2 0xE40408
147
148#define mmTPC1_RTR_SPLIT_COEF_3 0xE4040C
149
150#define mmTPC1_RTR_SPLIT_COEF_4 0xE40410
151
152#define mmTPC1_RTR_SPLIT_COEF_5 0xE40414
153
154#define mmTPC1_RTR_SPLIT_COEF_6 0xE40418
155
156#define mmTPC1_RTR_SPLIT_COEF_7 0xE4041C
157
158#define mmTPC1_RTR_SPLIT_COEF_8 0xE40420
159
160#define mmTPC1_RTR_SPLIT_COEF_9 0xE40424
161
162#define mmTPC1_RTR_SPLIT_CFG 0xE40440
163
164#define mmTPC1_RTR_SPLIT_RD_SAT 0xE40444
165
166#define mmTPC1_RTR_SPLIT_RD_RST_TOKEN 0xE40448
167
168#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_0 0xE4044C
169
170#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_1 0xE40450
171
172#define mmTPC1_RTR_SPLIT_WR_SAT 0xE40454
173
174#define mmTPC1_RTR_WPLIT_WR_TST_TOLEN 0xE40458
175
176#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_0 0xE4045C
177
178#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_1 0xE40460
179
180#define mmTPC1_RTR_HBW_RANGE_HIT 0xE40470
181
182#define mmTPC1_RTR_HBW_RANGE_MASK_L_0 0xE40480
183
184#define mmTPC1_RTR_HBW_RANGE_MASK_L_1 0xE40484
185
186#define mmTPC1_RTR_HBW_RANGE_MASK_L_2 0xE40488
187
188#define mmTPC1_RTR_HBW_RANGE_MASK_L_3 0xE4048C
189
190#define mmTPC1_RTR_HBW_RANGE_MASK_L_4 0xE40490
191
192#define mmTPC1_RTR_HBW_RANGE_MASK_L_5 0xE40494
193
194#define mmTPC1_RTR_HBW_RANGE_MASK_L_6 0xE40498
195
196#define mmTPC1_RTR_HBW_RANGE_MASK_L_7 0xE4049C
197
198#define mmTPC1_RTR_HBW_RANGE_MASK_H_0 0xE404A0
199
200#define mmTPC1_RTR_HBW_RANGE_MASK_H_1 0xE404A4
201
202#define mmTPC1_RTR_HBW_RANGE_MASK_H_2 0xE404A8
203
204#define mmTPC1_RTR_HBW_RANGE_MASK_H_3 0xE404AC
205
206#define mmTPC1_RTR_HBW_RANGE_MASK_H_4 0xE404B0
207
208#define mmTPC1_RTR_HBW_RANGE_MASK_H_5 0xE404B4
209
210#define mmTPC1_RTR_HBW_RANGE_MASK_H_6 0xE404B8
211
212#define mmTPC1_RTR_HBW_RANGE_MASK_H_7 0xE404BC
213
214#define mmTPC1_RTR_HBW_RANGE_BASE_L_0 0xE404C0
215
216#define mmTPC1_RTR_HBW_RANGE_BASE_L_1 0xE404C4
217
218#define mmTPC1_RTR_HBW_RANGE_BASE_L_2 0xE404C8
219
220#define mmTPC1_RTR_HBW_RANGE_BASE_L_3 0xE404CC
221
222#define mmTPC1_RTR_HBW_RANGE_BASE_L_4 0xE404D0
223
224#define mmTPC1_RTR_HBW_RANGE_BASE_L_5 0xE404D4
225
226#define mmTPC1_RTR_HBW_RANGE_BASE_L_6 0xE404D8
227
228#define mmTPC1_RTR_HBW_RANGE_BASE_L_7 0xE404DC
229
230#define mmTPC1_RTR_HBW_RANGE_BASE_H_0 0xE404E0
231
232#define mmTPC1_RTR_HBW_RANGE_BASE_H_1 0xE404E4
233
234#define mmTPC1_RTR_HBW_RANGE_BASE_H_2 0xE404E8
235
236#define mmTPC1_RTR_HBW_RANGE_BASE_H_3 0xE404EC
237
238#define mmTPC1_RTR_HBW_RANGE_BASE_H_4 0xE404F0
239
240#define mmTPC1_RTR_HBW_RANGE_BASE_H_5 0xE404F4
241
242#define mmTPC1_RTR_HBW_RANGE_BASE_H_6 0xE404F8
243
244#define mmTPC1_RTR_HBW_RANGE_BASE_H_7 0xE404FC
245
246#define mmTPC1_RTR_LBW_RANGE_HIT 0xE40500
247
248#define mmTPC1_RTR_LBW_RANGE_MASK_0 0xE40510
249
250#define mmTPC1_RTR_LBW_RANGE_MASK_1 0xE40514
251
252#define mmTPC1_RTR_LBW_RANGE_MASK_2 0xE40518
253
254#define mmTPC1_RTR_LBW_RANGE_MASK_3 0xE4051C
255
256#define mmTPC1_RTR_LBW_RANGE_MASK_4 0xE40520
257
258#define mmTPC1_RTR_LBW_RANGE_MASK_5 0xE40524
259
260#define mmTPC1_RTR_LBW_RANGE_MASK_6 0xE40528
261
262#define mmTPC1_RTR_LBW_RANGE_MASK_7 0xE4052C
263
264#define mmTPC1_RTR_LBW_RANGE_MASK_8 0xE40530
265
266#define mmTPC1_RTR_LBW_RANGE_MASK_9 0xE40534
267
268#define mmTPC1_RTR_LBW_RANGE_MASK_10 0xE40538
269
270#define mmTPC1_RTR_LBW_RANGE_MASK_11 0xE4053C
271
272#define mmTPC1_RTR_LBW_RANGE_MASK_12 0xE40540
273
274#define mmTPC1_RTR_LBW_RANGE_MASK_13 0xE40544
275
276#define mmTPC1_RTR_LBW_RANGE_MASK_14 0xE40548
277
278#define mmTPC1_RTR_LBW_RANGE_MASK_15 0xE4054C
279
280#define mmTPC1_RTR_LBW_RANGE_BASE_0 0xE40550
281
282#define mmTPC1_RTR_LBW_RANGE_BASE_1 0xE40554
283
284#define mmTPC1_RTR_LBW_RANGE_BASE_2 0xE40558
285
286#define mmTPC1_RTR_LBW_RANGE_BASE_3 0xE4055C
287
288#define mmTPC1_RTR_LBW_RANGE_BASE_4 0xE40560
289
290#define mmTPC1_RTR_LBW_RANGE_BASE_5 0xE40564
291
292#define mmTPC1_RTR_LBW_RANGE_BASE_6 0xE40568
293
294#define mmTPC1_RTR_LBW_RANGE_BASE_7 0xE4056C
295
296#define mmTPC1_RTR_LBW_RANGE_BASE_8 0xE40570
297
298#define mmTPC1_RTR_LBW_RANGE_BASE_9 0xE40574
299
300#define mmTPC1_RTR_LBW_RANGE_BASE_10 0xE40578
301
302#define mmTPC1_RTR_LBW_RANGE_BASE_11 0xE4057C
303
304#define mmTPC1_RTR_LBW_RANGE_BASE_12 0xE40580
305
306#define mmTPC1_RTR_LBW_RANGE_BASE_13 0xE40584
307
308#define mmTPC1_RTR_LBW_RANGE_BASE_14 0xE40588
309
310#define mmTPC1_RTR_LBW_RANGE_BASE_15 0xE4058C
311
312#define mmTPC1_RTR_RGLTR 0xE40590
313
314#define mmTPC1_RTR_RGLTR_WR_RESULT 0xE40594
315
316#define mmTPC1_RTR_RGLTR_RD_RESULT 0xE40598
317
318#define mmTPC1_RTR_SCRAMB_EN 0xE40600
319
320#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604
321
322#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
new file mode 100644
index 000000000000..9c33fc039036
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC2_CFG_REGS_H_
14#define ASIC_REG_TPC2_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC2_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400
23
24#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404
25
26#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408
27
28#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C
29
30#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410
31
32#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414
33
34#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE86418
35
36#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE8641C
37
38#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE86420
39
40#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE86424
41
42#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86428
43
44#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE8642C
45
46#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE86430
47
48#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86434
49
50#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE86438
51
52#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE8643C
53
54#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86440
55
56#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86444
57
58#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE86448
59
60#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE8644C
61
62#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE86450
63
64#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86454
65
66#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86458
67
68#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE8645C
69
70#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE86460
71
72#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE86464
73
74#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86468
75
76#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE8646C
77
78#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE86470
79
80#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86474
81
82#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE86478
83
84#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE8647C
85
86#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86480
87
88#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86484
89
90#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE86488
91
92#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE8648C
93
94#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE86490
95
96#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE86494
97
98#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86498
99
100#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE8649C
101
102#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE864A0
103
104#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE864A4
105
106#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE864A8
107
108#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE864AC
109
110#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE864B0
111
112#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE864B4
113
114#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE864B8
115
116#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE864BC
117
118#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE864C0
119
120#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE864C4
121
122#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE864C8
123
124#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE864CC
125
126#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE864D0
127
128#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE864D4
129
130#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864D8
131
132#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864DC
133
134#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE864E0
135
136#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864E4
137
138#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864E8
139
140#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864EC
141
142#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864F0
143
144#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864F4
145
146#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864F8
147
148#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE864FC
149
150#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE86500
151
152#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE86504
153
154#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE86508
155
156#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE8650C
157
158#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE86510
159
160#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE86514
161
162#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE86518
163
164#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE8651C
165
166#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE86520
167
168#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE86524
169
170#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE86528
171
172#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE8652C
173
174#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE86530
175
176#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE86534
177
178#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE86538
179
180#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE8653C
181
182#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE86540
183
184#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE86544
185
186#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE86548
187
188#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE8654C
189
190#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE86550
191
192#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE86554
193
194#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86558
195
196#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE8655C
197
198#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE86560
199
200#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86564
201
202#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE86568
203
204#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE8656C
205
206#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86570
207
208#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86574
209
210#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE86578
211
212#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE8657C
213
214#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE86580
215
216#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86584
217
218#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86588
219
220#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE8658C
221
222#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE86590
223
224#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE86594
225
226#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86598
227
228#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE8659C
229
230#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE865A0
231
232#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE865A4
233
234#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE865A8
235
236#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE865AC
237
238#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE865B0
239
240#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE865B4
241
242#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE865B8
243
244#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE865BC
245
246#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE865C0
247
248#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE865C4
249
250#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE865C8
251
252#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE865CC
253
254#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE865D0
255
256#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE865D4
257
258#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE865D8
259
260#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE865DC
261
262#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE865E0
263
264#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE865E4
265
266#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE865E8
267
268#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE865EC
269
270#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE865F0
271
272#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE865F4
273
274#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE865F8
275
276#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE865FC
277
278#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE86600
279
280#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE86604
281
282#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86608
283
284#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE8660C
285
286#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE86610
287
288#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86614
289
290#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE86618
291
292#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE8661C
293
294#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86620
295
296#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86624
297
298#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE86628
299
300#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE8662C
301
302#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE86630
303
304#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE86634
305
306#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE86638
307
308#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE8663C
309
310#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE86640
311
312#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE86644
313
314#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE86648
315
316#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE8664C
317
318#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE86650
319
320#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE86654
321
322#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE86658
323
324#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE8665C
325
326#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86660
327
328#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE86664
329
330#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86668
331
332#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE8666C
333
334#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86670
335
336#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE86674
337
338#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE86678
339
340#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE8667C
341
342#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE86680
343
344#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE86684
345
346#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE86688
347
348#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE8668C
349
350#define mmTPC2_CFG_KERNEL_SRF_0 0xE86690
351
352#define mmTPC2_CFG_KERNEL_SRF_1 0xE86694
353
354#define mmTPC2_CFG_KERNEL_SRF_2 0xE86698
355
356#define mmTPC2_CFG_KERNEL_SRF_3 0xE8669C
357
358#define mmTPC2_CFG_KERNEL_SRF_4 0xE866A0
359
360#define mmTPC2_CFG_KERNEL_SRF_5 0xE866A4
361
362#define mmTPC2_CFG_KERNEL_SRF_6 0xE866A8
363
364#define mmTPC2_CFG_KERNEL_SRF_7 0xE866AC
365
366#define mmTPC2_CFG_KERNEL_SRF_8 0xE866B0
367
368#define mmTPC2_CFG_KERNEL_SRF_9 0xE866B4
369
370#define mmTPC2_CFG_KERNEL_SRF_10 0xE866B8
371
372#define mmTPC2_CFG_KERNEL_SRF_11 0xE866BC
373
374#define mmTPC2_CFG_KERNEL_SRF_12 0xE866C0
375
376#define mmTPC2_CFG_KERNEL_SRF_13 0xE866C4
377
378#define mmTPC2_CFG_KERNEL_SRF_14 0xE866C8
379
380#define mmTPC2_CFG_KERNEL_SRF_15 0xE866CC
381
382#define mmTPC2_CFG_KERNEL_SRF_16 0xE866D0
383
384#define mmTPC2_CFG_KERNEL_SRF_17 0xE866D4
385
386#define mmTPC2_CFG_KERNEL_SRF_18 0xE866D8
387
388#define mmTPC2_CFG_KERNEL_SRF_19 0xE866DC
389
390#define mmTPC2_CFG_KERNEL_SRF_20 0xE866E0
391
392#define mmTPC2_CFG_KERNEL_SRF_21 0xE866E4
393
394#define mmTPC2_CFG_KERNEL_SRF_22 0xE866E8
395
396#define mmTPC2_CFG_KERNEL_SRF_23 0xE866EC
397
398#define mmTPC2_CFG_KERNEL_SRF_24 0xE866F0
399
400#define mmTPC2_CFG_KERNEL_SRF_25 0xE866F4
401
402#define mmTPC2_CFG_KERNEL_SRF_26 0xE866F8
403
404#define mmTPC2_CFG_KERNEL_SRF_27 0xE866FC
405
406#define mmTPC2_CFG_KERNEL_SRF_28 0xE86700
407
408#define mmTPC2_CFG_KERNEL_SRF_29 0xE86704
409
410#define mmTPC2_CFG_KERNEL_SRF_30 0xE86708
411
412#define mmTPC2_CFG_KERNEL_SRF_31 0xE8670C
413
414#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE86710
415
416#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86714
417
418#define mmTPC2_CFG_RESERVED_DESC_END 0xE86738
419
420#define mmTPC2_CFG_ROUND_CSR 0xE867FC
421
422#define mmTPC2_CFG_TBUF_BASE_ADDR_LOW 0xE86800
423
424#define mmTPC2_CFG_TBUF_BASE_ADDR_HIGH 0xE86804
425
426#define mmTPC2_CFG_SEMAPHORE 0xE86808
427
428#define mmTPC2_CFG_VFLAGS 0xE8680C
429
430#define mmTPC2_CFG_SFLAGS 0xE86810
431
432#define mmTPC2_CFG_LFSR_POLYNOM 0xE86818
433
434#define mmTPC2_CFG_STATUS 0xE8681C
435
436#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86820
437
438#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86824
439
440#define mmTPC2_CFG_SM_BASE_ADDRESS_LOW 0xE86828
441
442#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8682C
443
444#define mmTPC2_CFG_TPC_CMD 0xE86830
445
446#define mmTPC2_CFG_TPC_EXECUTE 0xE86838
447
448#define mmTPC2_CFG_TPC_STALL 0xE8683C
449
450#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86840
451
452#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86844
453
454#define mmTPC2_CFG_MSS_CONFIG 0xE86854
455
456#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86858
457
458#define mmTPC2_CFG_TPC_INTR_MASK 0xE8685C
459
460#define mmTPC2_CFG_TSB_CONFIG 0xE86860
461
462#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00
463
464#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04
465
466#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08
467
468#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C
469
470#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10
471
472#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14
473
474#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE86A18
475
476#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A1C
477
478#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A20
479
480#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE86A24
481
482#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A28
483
484#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A2C
485
486#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE86A30
487
488#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A34
489
490#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A38
491
492#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE86A3C
493
494#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A40
495
496#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A44
497
498#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE86A48
499
500#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A4C
501
502#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A50
503
504#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A54
505
506#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A58
507
508#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A5C
509
510#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A60
511
512#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE86A64
513
514#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A68
515
516#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A6C
517
518#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE86A70
519
520#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A74
521
522#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A78
523
524#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE86A7C
525
526#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A80
527
528#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A84
529
530#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE86A88
531
532#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A8C
533
534#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A90
535
536#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE86A94
537
538#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A98
539
540#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A9C
541
542#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86AA0
543
544#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86AA4
545
546#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86AA8
547
548#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86AAC
549
550#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE86AB0
551
552#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86AB4
553
554#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86AB8
555
556#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE86ABC
557
558#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86AC0
559
560#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86AC4
561
562#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE86AC8
563
564#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86ACC
565
566#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86AD0
567
568#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE86AD4
569
570#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AD8
571
572#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86ADC
573
574#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE86AE0
575
576#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AE4
577
578#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AE8
579
580#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AEC
581
582#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AF0
583
584#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AF4
585
586#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86AF8
587
588#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE86AFC
589
590#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86B00
591
592#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86B04
593
594#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE86B08
595
596#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86B0C
597
598#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86B10
599
600#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE86B14
601
602#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86B18
603
604#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86B1C
605
606#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE86B20
607
608#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86B24
609
610#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86B28
611
612#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE86B2C
613
614#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86B30
615
616#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86B34
617
618#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86B38
619
620#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86B3C
621
622#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86B40
623
624#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86B44
625
626#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE86B48
627
628#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86B4C
629
630#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86B50
631
632#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE86B54
633
634#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B58
635
636#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B5C
637
638#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE86B60
639
640#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B64
641
642#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B68
643
644#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE86B6C
645
646#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B70
647
648#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B74
649
650#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE86B78
651
652#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B7C
653
654#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B80
655
656#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B84
657
658#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B88
659
660#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B8C
661
662#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B90
663
664#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE86B94
665
666#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B98
667
668#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B9C
669
670#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE86BA0
671
672#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86BA4
673
674#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86BA8
675
676#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE86BAC
677
678#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86BB0
679
680#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86BB4
681
682#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE86BB8
683
684#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86BBC
685
686#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86BC0
687
688#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE86BC4
689
690#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86BC8
691
692#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86BCC
693
694#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86BD0
695
696#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86BD4
697
698#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86BD8
699
700#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86BDC
701
702#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE86BE0
703
704#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86BE4
705
706#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86BE8
707
708#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE86BEC
709
710#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86BF0
711
712#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86BF4
713
714#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE86BF8
715
716#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86BFC
717
718#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86C00
719
720#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE86C04
721
722#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86C08
723
724#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86C0C
725
726#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE86C10
727
728#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86C14
729
730#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86C18
731
732#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86C1C
733
734#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86C20
735
736#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86C24
737
738#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86C28
739
740#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE86C2C
741
742#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86C30
743
744#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86C34
745
746#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE86C38
747
748#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86C3C
749
750#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86C40
751
752#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE86C44
753
754#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86C48
755
756#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86C4C
757
758#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE86C50
759
760#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86C54
761
762#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86C58
763
764#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE86C5C
765
766#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86C60
767
768#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86C64
769
770#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86C68
771
772#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86C6C
773
774#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86C70
775
776#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86C74
777
778#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86C78
779
780#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86C7C
781
782#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86C80
783
784#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86C84
785
786#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86C88
787
788#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86C8C
789
790#define mmTPC2_CFG_QM_SRF_0 0xE86C90
791
792#define mmTPC2_CFG_QM_SRF_1 0xE86C94
793
794#define mmTPC2_CFG_QM_SRF_2 0xE86C98
795
796#define mmTPC2_CFG_QM_SRF_3 0xE86C9C
797
798#define mmTPC2_CFG_QM_SRF_4 0xE86CA0
799
800#define mmTPC2_CFG_QM_SRF_5 0xE86CA4
801
802#define mmTPC2_CFG_QM_SRF_6 0xE86CA8
803
804#define mmTPC2_CFG_QM_SRF_7 0xE86CAC
805
806#define mmTPC2_CFG_QM_SRF_8 0xE86CB0
807
808#define mmTPC2_CFG_QM_SRF_9 0xE86CB4
809
810#define mmTPC2_CFG_QM_SRF_10 0xE86CB8
811
812#define mmTPC2_CFG_QM_SRF_11 0xE86CBC
813
814#define mmTPC2_CFG_QM_SRF_12 0xE86CC0
815
816#define mmTPC2_CFG_QM_SRF_13 0xE86CC4
817
818#define mmTPC2_CFG_QM_SRF_14 0xE86CC8
819
820#define mmTPC2_CFG_QM_SRF_15 0xE86CCC
821
822#define mmTPC2_CFG_QM_SRF_16 0xE86CD0
823
824#define mmTPC2_CFG_QM_SRF_17 0xE86CD4
825
826#define mmTPC2_CFG_QM_SRF_18 0xE86CD8
827
828#define mmTPC2_CFG_QM_SRF_19 0xE86CDC
829
830#define mmTPC2_CFG_QM_SRF_20 0xE86CE0
831
832#define mmTPC2_CFG_QM_SRF_21 0xE86CE4
833
834#define mmTPC2_CFG_QM_SRF_22 0xE86CE8
835
836#define mmTPC2_CFG_QM_SRF_23 0xE86CEC
837
838#define mmTPC2_CFG_QM_SRF_24 0xE86CF0
839
840#define mmTPC2_CFG_QM_SRF_25 0xE86CF4
841
842#define mmTPC2_CFG_QM_SRF_26 0xE86CF8
843
844#define mmTPC2_CFG_QM_SRF_27 0xE86CFC
845
846#define mmTPC2_CFG_QM_SRF_28 0xE86D00
847
848#define mmTPC2_CFG_QM_SRF_29 0xE86D04
849
850#define mmTPC2_CFG_QM_SRF_30 0xE86D08
851
852#define mmTPC2_CFG_QM_SRF_31 0xE86D0C
853
854#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86D10
855
856#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D14
857
858#define mmTPC2_CFG_ARUSER 0xE86D18
859
860#define mmTPC2_CFG_AWUSER 0xE86D1C
861
862#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE86E00
863
864#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE86E04
865
866#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE86E08
867
868#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE86E0C
869
870#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE86E10
871
872#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE86E14
873
874#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE86E18
875
876#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE86E1C
877
878#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE86E20
879
880#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE86E24
881
882#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE86E28
883
884#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C
885
886#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
new file mode 100644
index 000000000000..7a643887d6e1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC2_CMDQ_REGS_H_
14#define ASIC_REG_TPC2_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC2_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC2_CMDQ_GLBL_CFG0 0xE89000
23
24#define mmTPC2_CMDQ_GLBL_CFG1 0xE89004
25
26#define mmTPC2_CMDQ_GLBL_PROT 0xE89008
27
28#define mmTPC2_CMDQ_GLBL_ERR_CFG 0xE8900C
29
30#define mmTPC2_CMDQ_GLBL_ERR_ADDR_LO 0xE89010
31
32#define mmTPC2_CMDQ_GLBL_ERR_ADDR_HI 0xE89014
33
34#define mmTPC2_CMDQ_GLBL_ERR_WDATA 0xE89018
35
36#define mmTPC2_CMDQ_GLBL_SECURE_PROPS 0xE8901C
37
38#define mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS 0xE89020
39
40#define mmTPC2_CMDQ_GLBL_STS0 0xE89024
41
42#define mmTPC2_CMDQ_GLBL_STS1 0xE89028
43
44#define mmTPC2_CMDQ_CQ_CFG0 0xE890B0
45
46#define mmTPC2_CMDQ_CQ_CFG1 0xE890B4
47
48#define mmTPC2_CMDQ_CQ_ARUSER 0xE890B8
49
50#define mmTPC2_CMDQ_CQ_PTR_LO 0xE890C0
51
52#define mmTPC2_CMDQ_CQ_PTR_HI 0xE890C4
53
54#define mmTPC2_CMDQ_CQ_TSIZE 0xE890C8
55
56#define mmTPC2_CMDQ_CQ_CTL 0xE890CC
57
58#define mmTPC2_CMDQ_CQ_PTR_LO_STS 0xE890D4
59
60#define mmTPC2_CMDQ_CQ_PTR_HI_STS 0xE890D8
61
62#define mmTPC2_CMDQ_CQ_TSIZE_STS 0xE890DC
63
64#define mmTPC2_CMDQ_CQ_CTL_STS 0xE890E0
65
66#define mmTPC2_CMDQ_CQ_STS0 0xE890E4
67
68#define mmTPC2_CMDQ_CQ_STS1 0xE890E8
69
70#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN 0xE890F0
71
72#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE890F4
73
74#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT 0xE890F8
75
76#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE890FC
77
78#define mmTPC2_CMDQ_CQ_IFIFO_CNT 0xE89108
79
80#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE89120
81
82#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE89124
83
84#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE89128
85
86#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE8912C
87
88#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE89130
89
90#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE89134
91
92#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE89138
93
94#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE8913C
95
96#define mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE89140
97
98#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE89144
99
100#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE89148
101
102#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE8914C
103
104#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE89150
105
106#define mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE89154
107
108#define mmTPC2_CMDQ_CP_FENCE0_RDATA 0xE89158
109
110#define mmTPC2_CMDQ_CP_FENCE1_RDATA 0xE8915C
111
112#define mmTPC2_CMDQ_CP_FENCE2_RDATA 0xE89160
113
114#define mmTPC2_CMDQ_CP_FENCE3_RDATA 0xE89164
115
116#define mmTPC2_CMDQ_CP_FENCE0_CNT 0xE89168
117
118#define mmTPC2_CMDQ_CP_FENCE1_CNT 0xE8916C
119
120#define mmTPC2_CMDQ_CP_FENCE2_CNT 0xE89170
121
122#define mmTPC2_CMDQ_CP_FENCE3_CNT 0xE89174
123
124#define mmTPC2_CMDQ_CP_STS 0xE89178
125
126#define mmTPC2_CMDQ_CP_CURRENT_INST_LO 0xE8917C
127
128#define mmTPC2_CMDQ_CP_CURRENT_INST_HI 0xE89180
129
130#define mmTPC2_CMDQ_CP_BARRIER_CFG 0xE89184
131
132#define mmTPC2_CMDQ_CP_DBG_0 0xE89188
133
134#define mmTPC2_CMDQ_CQ_BUF_ADDR 0xE89308
135
136#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C
137
138#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
new file mode 100644
index 000000000000..f3e32c018064
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC2_QM_REGS_H_
14#define ASIC_REG_TPC2_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC2_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC2_QM_GLBL_CFG0 0xE88000
23
24#define mmTPC2_QM_GLBL_CFG1 0xE88004
25
26#define mmTPC2_QM_GLBL_PROT 0xE88008
27
28#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C
29
30#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88010
31
32#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88014
33
34#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88018
35
36#define mmTPC2_QM_GLBL_SECURE_PROPS 0xE8801C
37
38#define mmTPC2_QM_GLBL_NON_SECURE_PROPS 0xE88020
39
40#define mmTPC2_QM_GLBL_STS0 0xE88024
41
42#define mmTPC2_QM_GLBL_STS1 0xE88028
43
44#define mmTPC2_QM_PQ_BASE_LO 0xE88060
45
46#define mmTPC2_QM_PQ_BASE_HI 0xE88064
47
48#define mmTPC2_QM_PQ_SIZE 0xE88068
49
50#define mmTPC2_QM_PQ_PI 0xE8806C
51
52#define mmTPC2_QM_PQ_CI 0xE88070
53
54#define mmTPC2_QM_PQ_CFG0 0xE88074
55
56#define mmTPC2_QM_PQ_CFG1 0xE88078
57
58#define mmTPC2_QM_PQ_ARUSER 0xE8807C
59
60#define mmTPC2_QM_PQ_PUSH0 0xE88080
61
62#define mmTPC2_QM_PQ_PUSH1 0xE88084
63
64#define mmTPC2_QM_PQ_PUSH2 0xE88088
65
66#define mmTPC2_QM_PQ_PUSH3 0xE8808C
67
68#define mmTPC2_QM_PQ_STS0 0xE88090
69
70#define mmTPC2_QM_PQ_STS1 0xE88094
71
72#define mmTPC2_QM_PQ_RD_RATE_LIM_EN 0xE880A0
73
74#define mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE880A4
75
76#define mmTPC2_QM_PQ_RD_RATE_LIM_SAT 0xE880A8
77
78#define mmTPC2_QM_PQ_RD_RATE_LIM_TOUT 0xE880AC
79
80#define mmTPC2_QM_CQ_CFG0 0xE880B0
81
82#define mmTPC2_QM_CQ_CFG1 0xE880B4
83
84#define mmTPC2_QM_CQ_ARUSER 0xE880B8
85
86#define mmTPC2_QM_CQ_PTR_LO 0xE880C0
87
88#define mmTPC2_QM_CQ_PTR_HI 0xE880C4
89
90#define mmTPC2_QM_CQ_TSIZE 0xE880C8
91
92#define mmTPC2_QM_CQ_CTL 0xE880CC
93
94#define mmTPC2_QM_CQ_PTR_LO_STS 0xE880D4
95
96#define mmTPC2_QM_CQ_PTR_HI_STS 0xE880D8
97
98#define mmTPC2_QM_CQ_TSIZE_STS 0xE880DC
99
100#define mmTPC2_QM_CQ_CTL_STS 0xE880E0
101
102#define mmTPC2_QM_CQ_STS0 0xE880E4
103
104#define mmTPC2_QM_CQ_STS1 0xE880E8
105
106#define mmTPC2_QM_CQ_RD_RATE_LIM_EN 0xE880F0
107
108#define mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE880F4
109
110#define mmTPC2_QM_CQ_RD_RATE_LIM_SAT 0xE880F8
111
112#define mmTPC2_QM_CQ_RD_RATE_LIM_TOUT 0xE880FC
113
114#define mmTPC2_QM_CQ_IFIFO_CNT 0xE88108
115
116#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO 0xE88120
117
118#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI 0xE88124
119
120#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO 0xE88128
121
122#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI 0xE8812C
123
124#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO 0xE88130
125
126#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI 0xE88134
127
128#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO 0xE88138
129
130#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI 0xE8813C
131
132#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET 0xE88140
133
134#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE88144
135
136#define mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE88148
137
138#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE8814C
139
140#define mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE88150
141
142#define mmTPC2_QM_CP_LDMA_COMMIT_OFFSET 0xE88154
143
144#define mmTPC2_QM_CP_FENCE0_RDATA 0xE88158
145
146#define mmTPC2_QM_CP_FENCE1_RDATA 0xE8815C
147
148#define mmTPC2_QM_CP_FENCE2_RDATA 0xE88160
149
150#define mmTPC2_QM_CP_FENCE3_RDATA 0xE88164
151
152#define mmTPC2_QM_CP_FENCE0_CNT 0xE88168
153
154#define mmTPC2_QM_CP_FENCE1_CNT 0xE8816C
155
156#define mmTPC2_QM_CP_FENCE2_CNT 0xE88170
157
158#define mmTPC2_QM_CP_FENCE3_CNT 0xE88174
159
160#define mmTPC2_QM_CP_STS 0xE88178
161
162#define mmTPC2_QM_CP_CURRENT_INST_LO 0xE8817C
163
164#define mmTPC2_QM_CP_CURRENT_INST_HI 0xE88180
165
166#define mmTPC2_QM_CP_BARRIER_CFG 0xE88184
167
168#define mmTPC2_QM_CP_DBG_0 0xE88188
169
170#define mmTPC2_QM_PQ_BUF_ADDR 0xE88300
171
172#define mmTPC2_QM_PQ_BUF_RDATA 0xE88304
173
174#define mmTPC2_QM_CQ_BUF_ADDR 0xE88308
175
176#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C
177
178#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
new file mode 100644
index 000000000000..0eb0cd1fbd19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
@@ -0,0 +1,323 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC2_RTR_REGS_H_
14#define ASIC_REG_TPC2_RTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC2_RTR (Prototype: TPC_RTR)
19 *****************************************
20 */
21
22#define mmTPC2_RTR_HBW_RD_RQ_E_ARB 0xE80100
23
24#define mmTPC2_RTR_HBW_RD_RQ_W_ARB 0xE80104
25
26#define mmTPC2_RTR_HBW_RD_RQ_N_ARB 0xE80108
27
28#define mmTPC2_RTR_HBW_RD_RQ_S_ARB 0xE8010C
29
30#define mmTPC2_RTR_HBW_RD_RQ_L_ARB 0xE80110
31
32#define mmTPC2_RTR_HBW_E_ARB_MAX 0xE80120
33
34#define mmTPC2_RTR_HBW_W_ARB_MAX 0xE80124
35
36#define mmTPC2_RTR_HBW_N_ARB_MAX 0xE80128
37
38#define mmTPC2_RTR_HBW_S_ARB_MAX 0xE8012C
39
40#define mmTPC2_RTR_HBW_L_ARB_MAX 0xE80130
41
42#define mmTPC2_RTR_HBW_RD_RS_E_ARB 0xE80140
43
44#define mmTPC2_RTR_HBW_RD_RS_W_ARB 0xE80144
45
46#define mmTPC2_RTR_HBW_RD_RS_N_ARB 0xE80148
47
48#define mmTPC2_RTR_HBW_RD_RS_S_ARB 0xE8014C
49
50#define mmTPC2_RTR_HBW_RD_RS_L_ARB 0xE80150
51
52#define mmTPC2_RTR_HBW_WR_RQ_E_ARB 0xE80170
53
54#define mmTPC2_RTR_HBW_WR_RQ_W_ARB 0xE80174
55
56#define mmTPC2_RTR_HBW_WR_RQ_N_ARB 0xE80178
57
58#define mmTPC2_RTR_HBW_WR_RQ_S_ARB 0xE8017C
59
60#define mmTPC2_RTR_HBW_WR_RQ_L_ARB 0xE80180
61
62#define mmTPC2_RTR_HBW_WR_RS_E_ARB 0xE80190
63
64#define mmTPC2_RTR_HBW_WR_RS_W_ARB 0xE80194
65
66#define mmTPC2_RTR_HBW_WR_RS_N_ARB 0xE80198
67
68#define mmTPC2_RTR_HBW_WR_RS_S_ARB 0xE8019C
69
70#define mmTPC2_RTR_HBW_WR_RS_L_ARB 0xE801A0
71
72#define mmTPC2_RTR_LBW_RD_RQ_E_ARB 0xE80200
73
74#define mmTPC2_RTR_LBW_RD_RQ_W_ARB 0xE80204
75
76#define mmTPC2_RTR_LBW_RD_RQ_N_ARB 0xE80208
77
78#define mmTPC2_RTR_LBW_RD_RQ_S_ARB 0xE8020C
79
80#define mmTPC2_RTR_LBW_RD_RQ_L_ARB 0xE80210
81
82#define mmTPC2_RTR_LBW_E_ARB_MAX 0xE80220
83
84#define mmTPC2_RTR_LBW_W_ARB_MAX 0xE80224
85
86#define mmTPC2_RTR_LBW_N_ARB_MAX 0xE80228
87
88#define mmTPC2_RTR_LBW_S_ARB_MAX 0xE8022C
89
90#define mmTPC2_RTR_LBW_L_ARB_MAX 0xE80230
91
92#define mmTPC2_RTR_LBW_RD_RS_E_ARB 0xE80250
93
94#define mmTPC2_RTR_LBW_RD_RS_W_ARB 0xE80254
95
96#define mmTPC2_RTR_LBW_RD_RS_N_ARB 0xE80258
97
98#define mmTPC2_RTR_LBW_RD_RS_S_ARB 0xE8025C
99
100#define mmTPC2_RTR_LBW_RD_RS_L_ARB 0xE80260
101
102#define mmTPC2_RTR_LBW_WR_RQ_E_ARB 0xE80270
103
104#define mmTPC2_RTR_LBW_WR_RQ_W_ARB 0xE80274
105
106#define mmTPC2_RTR_LBW_WR_RQ_N_ARB 0xE80278
107
108#define mmTPC2_RTR_LBW_WR_RQ_S_ARB 0xE8027C
109
110#define mmTPC2_RTR_LBW_WR_RQ_L_ARB 0xE80280
111
112#define mmTPC2_RTR_LBW_WR_RS_E_ARB 0xE80290
113
114#define mmTPC2_RTR_LBW_WR_RS_W_ARB 0xE80294
115
116#define mmTPC2_RTR_LBW_WR_RS_N_ARB 0xE80298
117
118#define mmTPC2_RTR_LBW_WR_RS_S_ARB 0xE8029C
119
120#define mmTPC2_RTR_LBW_WR_RS_L_ARB 0xE802A0
121
122#define mmTPC2_RTR_DBG_E_ARB 0xE80300
123
124#define mmTPC2_RTR_DBG_W_ARB 0xE80304
125
126#define mmTPC2_RTR_DBG_N_ARB 0xE80308
127
128#define mmTPC2_RTR_DBG_S_ARB 0xE8030C
129
130#define mmTPC2_RTR_DBG_L_ARB 0xE80310
131
132#define mmTPC2_RTR_DBG_E_ARB_MAX 0xE80320
133
134#define mmTPC2_RTR_DBG_W_ARB_MAX 0xE80324
135
136#define mmTPC2_RTR_DBG_N_ARB_MAX 0xE80328
137
138#define mmTPC2_RTR_DBG_S_ARB_MAX 0xE8032C
139
140#define mmTPC2_RTR_DBG_L_ARB_MAX 0xE80330
141
142#define mmTPC2_RTR_SPLIT_COEF_0 0xE80400
143
144#define mmTPC2_RTR_SPLIT_COEF_1 0xE80404
145
146#define mmTPC2_RTR_SPLIT_COEF_2 0xE80408
147
148#define mmTPC2_RTR_SPLIT_COEF_3 0xE8040C
149
150#define mmTPC2_RTR_SPLIT_COEF_4 0xE80410
151
152#define mmTPC2_RTR_SPLIT_COEF_5 0xE80414
153
154#define mmTPC2_RTR_SPLIT_COEF_6 0xE80418
155
156#define mmTPC2_RTR_SPLIT_COEF_7 0xE8041C
157
158#define mmTPC2_RTR_SPLIT_COEF_8 0xE80420
159
160#define mmTPC2_RTR_SPLIT_COEF_9 0xE80424
161
162#define mmTPC2_RTR_SPLIT_CFG 0xE80440
163
164#define mmTPC2_RTR_SPLIT_RD_SAT 0xE80444
165
166#define mmTPC2_RTR_SPLIT_RD_RST_TOKEN 0xE80448
167
168#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_0 0xE8044C
169
170#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_1 0xE80450
171
172#define mmTPC2_RTR_SPLIT_WR_SAT 0xE80454
173
174#define mmTPC2_RTR_WPLIT_WR_TST_TOLEN 0xE80458
175
176#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_0 0xE8045C
177
178#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_1 0xE80460
179
180#define mmTPC2_RTR_HBW_RANGE_HIT 0xE80470
181
182#define mmTPC2_RTR_HBW_RANGE_MASK_L_0 0xE80480
183
184#define mmTPC2_RTR_HBW_RANGE_MASK_L_1 0xE80484
185
186#define mmTPC2_RTR_HBW_RANGE_MASK_L_2 0xE80488
187
188#define mmTPC2_RTR_HBW_RANGE_MASK_L_3 0xE8048C
189
190#define mmTPC2_RTR_HBW_RANGE_MASK_L_4 0xE80490
191
192#define mmTPC2_RTR_HBW_RANGE_MASK_L_5 0xE80494
193
194#define mmTPC2_RTR_HBW_RANGE_MASK_L_6 0xE80498
195
196#define mmTPC2_RTR_HBW_RANGE_MASK_L_7 0xE8049C
197
198#define mmTPC2_RTR_HBW_RANGE_MASK_H_0 0xE804A0
199
200#define mmTPC2_RTR_HBW_RANGE_MASK_H_1 0xE804A4
201
202#define mmTPC2_RTR_HBW_RANGE_MASK_H_2 0xE804A8
203
204#define mmTPC2_RTR_HBW_RANGE_MASK_H_3 0xE804AC
205
206#define mmTPC2_RTR_HBW_RANGE_MASK_H_4 0xE804B0
207
208#define mmTPC2_RTR_HBW_RANGE_MASK_H_5 0xE804B4
209
210#define mmTPC2_RTR_HBW_RANGE_MASK_H_6 0xE804B8
211
212#define mmTPC2_RTR_HBW_RANGE_MASK_H_7 0xE804BC
213
214#define mmTPC2_RTR_HBW_RANGE_BASE_L_0 0xE804C0
215
216#define mmTPC2_RTR_HBW_RANGE_BASE_L_1 0xE804C4
217
218#define mmTPC2_RTR_HBW_RANGE_BASE_L_2 0xE804C8
219
220#define mmTPC2_RTR_HBW_RANGE_BASE_L_3 0xE804CC
221
222#define mmTPC2_RTR_HBW_RANGE_BASE_L_4 0xE804D0
223
224#define mmTPC2_RTR_HBW_RANGE_BASE_L_5 0xE804D4
225
226#define mmTPC2_RTR_HBW_RANGE_BASE_L_6 0xE804D8
227
228#define mmTPC2_RTR_HBW_RANGE_BASE_L_7 0xE804DC
229
230#define mmTPC2_RTR_HBW_RANGE_BASE_H_0 0xE804E0
231
232#define mmTPC2_RTR_HBW_RANGE_BASE_H_1 0xE804E4
233
234#define mmTPC2_RTR_HBW_RANGE_BASE_H_2 0xE804E8
235
236#define mmTPC2_RTR_HBW_RANGE_BASE_H_3 0xE804EC
237
238#define mmTPC2_RTR_HBW_RANGE_BASE_H_4 0xE804F0
239
240#define mmTPC2_RTR_HBW_RANGE_BASE_H_5 0xE804F4
241
242#define mmTPC2_RTR_HBW_RANGE_BASE_H_6 0xE804F8
243
244#define mmTPC2_RTR_HBW_RANGE_BASE_H_7 0xE804FC
245
246#define mmTPC2_RTR_LBW_RANGE_HIT 0xE80500
247
248#define mmTPC2_RTR_LBW_RANGE_MASK_0 0xE80510
249
250#define mmTPC2_RTR_LBW_RANGE_MASK_1 0xE80514
251
252#define mmTPC2_RTR_LBW_RANGE_MASK_2 0xE80518
253
254#define mmTPC2_RTR_LBW_RANGE_MASK_3 0xE8051C
255
256#define mmTPC2_RTR_LBW_RANGE_MASK_4 0xE80520
257
258#define mmTPC2_RTR_LBW_RANGE_MASK_5 0xE80524
259
260#define mmTPC2_RTR_LBW_RANGE_MASK_6 0xE80528
261
262#define mmTPC2_RTR_LBW_RANGE_MASK_7 0xE8052C
263
264#define mmTPC2_RTR_LBW_RANGE_MASK_8 0xE80530
265
266#define mmTPC2_RTR_LBW_RANGE_MASK_9 0xE80534
267
268#define mmTPC2_RTR_LBW_RANGE_MASK_10 0xE80538
269
270#define mmTPC2_RTR_LBW_RANGE_MASK_11 0xE8053C
271
272#define mmTPC2_RTR_LBW_RANGE_MASK_12 0xE80540
273
274#define mmTPC2_RTR_LBW_RANGE_MASK_13 0xE80544
275
276#define mmTPC2_RTR_LBW_RANGE_MASK_14 0xE80548
277
278#define mmTPC2_RTR_LBW_RANGE_MASK_15 0xE8054C
279
280#define mmTPC2_RTR_LBW_RANGE_BASE_0 0xE80550
281
282#define mmTPC2_RTR_LBW_RANGE_BASE_1 0xE80554
283
284#define mmTPC2_RTR_LBW_RANGE_BASE_2 0xE80558
285
286#define mmTPC2_RTR_LBW_RANGE_BASE_3 0xE8055C
287
288#define mmTPC2_RTR_LBW_RANGE_BASE_4 0xE80560
289
290#define mmTPC2_RTR_LBW_RANGE_BASE_5 0xE80564
291
292#define mmTPC2_RTR_LBW_RANGE_BASE_6 0xE80568
293
294#define mmTPC2_RTR_LBW_RANGE_BASE_7 0xE8056C
295
296#define mmTPC2_RTR_LBW_RANGE_BASE_8 0xE80570
297
298#define mmTPC2_RTR_LBW_RANGE_BASE_9 0xE80574
299
300#define mmTPC2_RTR_LBW_RANGE_BASE_10 0xE80578
301
302#define mmTPC2_RTR_LBW_RANGE_BASE_11 0xE8057C
303
304#define mmTPC2_RTR_LBW_RANGE_BASE_12 0xE80580
305
306#define mmTPC2_RTR_LBW_RANGE_BASE_13 0xE80584
307
308#define mmTPC2_RTR_LBW_RANGE_BASE_14 0xE80588
309
310#define mmTPC2_RTR_LBW_RANGE_BASE_15 0xE8058C
311
312#define mmTPC2_RTR_RGLTR 0xE80590
313
314#define mmTPC2_RTR_RGLTR_WR_RESULT 0xE80594
315
316#define mmTPC2_RTR_RGLTR_RD_RESULT 0xE80598
317
318#define mmTPC2_RTR_SCRAMB_EN 0xE80600
319
320#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604
321
322#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
new file mode 100644
index 000000000000..0baf63c69b25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC3_CFG_REGS_H_
14#define ASIC_REG_TPC3_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC3_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400
23
24#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404
25
26#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408
27
28#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C
29
30#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410
31
32#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414
33
34#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6418
35
36#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC641C
37
38#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC6420
39
40#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6424
41
42#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6428
43
44#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC642C
45
46#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6430
47
48#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6434
49
50#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC6438
51
52#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xEC643C
53
54#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6440
55
56#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6444
57
58#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6448
59
60#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC644C
61
62#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC6450
63
64#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6454
65
66#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6458
67
68#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC645C
69
70#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC6460
71
72#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6464
73
74#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6468
75
76#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC646C
77
78#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6470
79
80#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6474
81
82#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC6478
83
84#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xEC647C
85
86#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6480
87
88#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6484
89
90#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6488
91
92#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC648C
93
94#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC6490
95
96#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6494
97
98#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6498
99
100#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC649C
101
102#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC64A0
103
104#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC64A4
105
106#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC64A8
107
108#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC64AC
109
110#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xEC64B0
111
112#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC64B4
113
114#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC64B8
115
116#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xEC64BC
117
118#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC64C0
119
120#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC64C4
121
122#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xEC64C8
123
124#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC64CC
125
126#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC64D0
127
128#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xEC64D4
129
130#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64D8
131
132#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64DC
133
134#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xEC64E0
135
136#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64E4
137
138#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64E8
139
140#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64EC
141
142#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64F0
143
144#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64F4
145
146#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64F8
147
148#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xEC64FC
149
150#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC6500
151
152#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC6504
153
154#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6508
155
156#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC650C
157
158#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC6510
159
160#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6514
161
162#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC6518
163
164#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC651C
165
166#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6520
167
168#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC6524
169
170#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC6528
171
172#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xEC652C
173
174#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC6530
175
176#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC6534
177
178#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC6538
179
180#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC653C
181
182#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC6540
183
184#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC6544
185
186#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6548
187
188#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC654C
189
190#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC6550
191
192#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6554
193
194#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6558
195
196#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC655C
197
198#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6560
199
200#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6564
201
202#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC6568
203
204#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xEC656C
205
206#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6570
207
208#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6574
209
210#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6578
211
212#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC657C
213
214#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC6580
215
216#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6584
217
218#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6588
219
220#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC658C
221
222#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC6590
223
224#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6594
225
226#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6598
227
228#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC659C
229
230#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xEC65A0
231
232#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC65A4
233
234#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC65A8
235
236#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xEC65AC
237
238#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC65B0
239
240#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC65B4
241
242#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xEC65B8
243
244#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC65BC
245
246#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC65C0
247
248#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xEC65C4
249
250#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC65C8
251
252#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC65CC
253
254#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC65D0
255
256#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC65D4
257
258#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC65D8
259
260#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC65DC
261
262#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xEC65E0
263
264#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC65E4
265
266#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC65E8
267
268#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xEC65EC
269
270#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC65F0
271
272#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC65F4
273
274#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xEC65F8
275
276#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC65FC
277
278#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC6600
279
280#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6604
281
282#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6608
283
284#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC660C
285
286#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6610
287
288#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6614
289
290#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC6618
291
292#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC661C
293
294#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6620
295
296#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6624
297
298#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC6628
299
300#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xEC662C
301
302#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC6630
303
304#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC6634
305
306#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6638
307
308#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC663C
309
310#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC6640
311
312#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6644
313
314#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC6648
315
316#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC664C
317
318#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6650
319
320#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC6654
321
322#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC6658
323
324#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xEC665C
325
326#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6660
327
328#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC6664
329
330#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6668
331
332#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC666C
333
334#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6670
335
336#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC6674
337
338#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC6678
339
340#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC667C
341
342#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC6680
343
344#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC6684
345
346#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC6688
347
348#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC668C
349
350#define mmTPC3_CFG_KERNEL_SRF_0 0xEC6690
351
352#define mmTPC3_CFG_KERNEL_SRF_1 0xEC6694
353
354#define mmTPC3_CFG_KERNEL_SRF_2 0xEC6698
355
356#define mmTPC3_CFG_KERNEL_SRF_3 0xEC669C
357
358#define mmTPC3_CFG_KERNEL_SRF_4 0xEC66A0
359
360#define mmTPC3_CFG_KERNEL_SRF_5 0xEC66A4
361
362#define mmTPC3_CFG_KERNEL_SRF_6 0xEC66A8
363
364#define mmTPC3_CFG_KERNEL_SRF_7 0xEC66AC
365
366#define mmTPC3_CFG_KERNEL_SRF_8 0xEC66B0
367
368#define mmTPC3_CFG_KERNEL_SRF_9 0xEC66B4
369
370#define mmTPC3_CFG_KERNEL_SRF_10 0xEC66B8
371
372#define mmTPC3_CFG_KERNEL_SRF_11 0xEC66BC
373
374#define mmTPC3_CFG_KERNEL_SRF_12 0xEC66C0
375
376#define mmTPC3_CFG_KERNEL_SRF_13 0xEC66C4
377
378#define mmTPC3_CFG_KERNEL_SRF_14 0xEC66C8
379
380#define mmTPC3_CFG_KERNEL_SRF_15 0xEC66CC
381
382#define mmTPC3_CFG_KERNEL_SRF_16 0xEC66D0
383
384#define mmTPC3_CFG_KERNEL_SRF_17 0xEC66D4
385
386#define mmTPC3_CFG_KERNEL_SRF_18 0xEC66D8
387
388#define mmTPC3_CFG_KERNEL_SRF_19 0xEC66DC
389
390#define mmTPC3_CFG_KERNEL_SRF_20 0xEC66E0
391
392#define mmTPC3_CFG_KERNEL_SRF_21 0xEC66E4
393
394#define mmTPC3_CFG_KERNEL_SRF_22 0xEC66E8
395
396#define mmTPC3_CFG_KERNEL_SRF_23 0xEC66EC
397
398#define mmTPC3_CFG_KERNEL_SRF_24 0xEC66F0
399
400#define mmTPC3_CFG_KERNEL_SRF_25 0xEC66F4
401
402#define mmTPC3_CFG_KERNEL_SRF_26 0xEC66F8
403
404#define mmTPC3_CFG_KERNEL_SRF_27 0xEC66FC
405
406#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6700
407
408#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6704
409
410#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6708
411
412#define mmTPC3_CFG_KERNEL_SRF_31 0xEC670C
413
414#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC6710
415
416#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6714
417
418#define mmTPC3_CFG_RESERVED_DESC_END 0xEC6738
419
420#define mmTPC3_CFG_ROUND_CSR 0xEC67FC
421
422#define mmTPC3_CFG_TBUF_BASE_ADDR_LOW 0xEC6800
423
424#define mmTPC3_CFG_TBUF_BASE_ADDR_HIGH 0xEC6804
425
426#define mmTPC3_CFG_SEMAPHORE 0xEC6808
427
428#define mmTPC3_CFG_VFLAGS 0xEC680C
429
430#define mmTPC3_CFG_SFLAGS 0xEC6810
431
432#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6818
433
434#define mmTPC3_CFG_STATUS 0xEC681C
435
436#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6820
437
438#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6824
439
440#define mmTPC3_CFG_SM_BASE_ADDRESS_LOW 0xEC6828
441
442#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC682C
443
444#define mmTPC3_CFG_TPC_CMD 0xEC6830
445
446#define mmTPC3_CFG_TPC_EXECUTE 0xEC6838
447
448#define mmTPC3_CFG_TPC_STALL 0xEC683C
449
450#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6840
451
452#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6844
453
454#define mmTPC3_CFG_MSS_CONFIG 0xEC6854
455
456#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6858
457
458#define mmTPC3_CFG_TPC_INTR_MASK 0xEC685C
459
460#define mmTPC3_CFG_TSB_CONFIG 0xEC6860
461
462#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00
463
464#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04
465
466#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08
467
468#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C
469
470#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10
471
472#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14
473
474#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6A18
475
476#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A1C
477
478#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A20
479
480#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6A24
481
482#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A28
483
484#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A2C
485
486#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6A30
487
488#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A34
489
490#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A38
491
492#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xEC6A3C
493
494#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A40
495
496#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A44
497
498#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6A48
499
500#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A4C
501
502#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A50
503
504#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A54
505
506#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A58
507
508#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A5C
509
510#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A60
511
512#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6A64
513
514#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A68
515
516#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A6C
517
518#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6A70
519
520#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A74
521
522#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A78
523
524#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xEC6A7C
525
526#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A80
527
528#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A84
529
530#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6A88
531
532#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A8C
533
534#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A90
535
536#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6A94
537
538#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A98
539
540#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A9C
541
542#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6AA0
543
544#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6AA4
545
546#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6AA8
547
548#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6AAC
549
550#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xEC6AB0
551
552#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6AB4
553
554#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6AB8
555
556#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xEC6ABC
557
558#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6AC0
559
560#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6AC4
561
562#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xEC6AC8
563
564#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6ACC
565
566#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6AD0
567
568#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xEC6AD4
569
570#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AD8
571
572#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6ADC
573
574#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xEC6AE0
575
576#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AE4
577
578#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AE8
579
580#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AEC
581
582#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AF0
583
584#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AF4
585
586#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6AF8
587
588#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xEC6AFC
589
590#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6B00
591
592#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6B04
593
594#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6B08
595
596#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6B0C
597
598#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6B10
599
600#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6B14
601
602#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6B18
603
604#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6B1C
605
606#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6B20
607
608#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6B24
609
610#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6B28
611
612#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xEC6B2C
613
614#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6B30
615
616#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6B34
617
618#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6B38
619
620#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6B3C
621
622#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6B40
623
624#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6B44
625
626#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6B48
627
628#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6B4C
629
630#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6B50
631
632#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6B54
633
634#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B58
635
636#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B5C
637
638#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6B60
639
640#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B64
641
642#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B68
643
644#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xEC6B6C
645
646#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B70
647
648#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B74
649
650#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6B78
651
652#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B7C
653
654#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B80
655
656#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B84
657
658#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B88
659
660#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B8C
661
662#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B90
663
664#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6B94
665
666#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B98
667
668#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B9C
669
670#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xEC6BA0
671
672#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6BA4
673
674#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6BA8
675
676#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xEC6BAC
677
678#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6BB0
679
680#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6BB4
681
682#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xEC6BB8
683
684#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6BBC
685
686#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6BC0
687
688#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xEC6BC4
689
690#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6BC8
691
692#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6BCC
693
694#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6BD0
695
696#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6BD4
697
698#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6BD8
699
700#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6BDC
701
702#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xEC6BE0
703
704#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6BE4
705
706#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6BE8
707
708#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xEC6BEC
709
710#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6BF0
711
712#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6BF4
713
714#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xEC6BF8
715
716#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6BFC
717
718#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6C00
719
720#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6C04
721
722#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6C08
723
724#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6C0C
725
726#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6C10
727
728#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6C14
729
730#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6C18
731
732#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6C1C
733
734#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6C20
735
736#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6C24
737
738#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6C28
739
740#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xEC6C2C
741
742#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6C30
743
744#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6C34
745
746#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6C38
747
748#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6C3C
749
750#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6C40
751
752#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6C44
753
754#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6C48
755
756#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6C4C
757
758#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6C50
759
760#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6C54
761
762#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6C58
763
764#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xEC6C5C
765
766#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6C60
767
768#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6C64
769
770#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6C68
771
772#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6C6C
773
774#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6C70
775
776#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6C74
777
778#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6C78
779
780#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6C7C
781
782#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6C80
783
784#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6C84
785
786#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6C88
787
788#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6C8C
789
790#define mmTPC3_CFG_QM_SRF_0 0xEC6C90
791
792#define mmTPC3_CFG_QM_SRF_1 0xEC6C94
793
794#define mmTPC3_CFG_QM_SRF_2 0xEC6C98
795
796#define mmTPC3_CFG_QM_SRF_3 0xEC6C9C
797
798#define mmTPC3_CFG_QM_SRF_4 0xEC6CA0
799
800#define mmTPC3_CFG_QM_SRF_5 0xEC6CA4
801
802#define mmTPC3_CFG_QM_SRF_6 0xEC6CA8
803
804#define mmTPC3_CFG_QM_SRF_7 0xEC6CAC
805
806#define mmTPC3_CFG_QM_SRF_8 0xEC6CB0
807
808#define mmTPC3_CFG_QM_SRF_9 0xEC6CB4
809
810#define mmTPC3_CFG_QM_SRF_10 0xEC6CB8
811
812#define mmTPC3_CFG_QM_SRF_11 0xEC6CBC
813
814#define mmTPC3_CFG_QM_SRF_12 0xEC6CC0
815
816#define mmTPC3_CFG_QM_SRF_13 0xEC6CC4
817
818#define mmTPC3_CFG_QM_SRF_14 0xEC6CC8
819
820#define mmTPC3_CFG_QM_SRF_15 0xEC6CCC
821
822#define mmTPC3_CFG_QM_SRF_16 0xEC6CD0
823
824#define mmTPC3_CFG_QM_SRF_17 0xEC6CD4
825
826#define mmTPC3_CFG_QM_SRF_18 0xEC6CD8
827
828#define mmTPC3_CFG_QM_SRF_19 0xEC6CDC
829
830#define mmTPC3_CFG_QM_SRF_20 0xEC6CE0
831
832#define mmTPC3_CFG_QM_SRF_21 0xEC6CE4
833
834#define mmTPC3_CFG_QM_SRF_22 0xEC6CE8
835
836#define mmTPC3_CFG_QM_SRF_23 0xEC6CEC
837
838#define mmTPC3_CFG_QM_SRF_24 0xEC6CF0
839
840#define mmTPC3_CFG_QM_SRF_25 0xEC6CF4
841
842#define mmTPC3_CFG_QM_SRF_26 0xEC6CF8
843
844#define mmTPC3_CFG_QM_SRF_27 0xEC6CFC
845
846#define mmTPC3_CFG_QM_SRF_28 0xEC6D00
847
848#define mmTPC3_CFG_QM_SRF_29 0xEC6D04
849
850#define mmTPC3_CFG_QM_SRF_30 0xEC6D08
851
852#define mmTPC3_CFG_QM_SRF_31 0xEC6D0C
853
854#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6D10
855
856#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D14
857
858#define mmTPC3_CFG_ARUSER 0xEC6D18
859
860#define mmTPC3_CFG_AWUSER 0xEC6D1C
861
862#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC6E00
863
864#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC6E04
865
866#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC6E08
867
868#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC6E0C
869
870#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC6E10
871
872#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC6E14
873
874#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC6E18
875
876#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC6E1C
877
878#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC6E20
879
880#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC6E24
881
882#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC6E28
883
884#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C
885
886#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
new file mode 100644
index 000000000000..82a5261e852f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC3_CMDQ_REGS_H_
14#define ASIC_REG_TPC3_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC3_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC3_CMDQ_GLBL_CFG0 0xEC9000
23
24#define mmTPC3_CMDQ_GLBL_CFG1 0xEC9004
25
26#define mmTPC3_CMDQ_GLBL_PROT 0xEC9008
27
28#define mmTPC3_CMDQ_GLBL_ERR_CFG 0xEC900C
29
30#define mmTPC3_CMDQ_GLBL_ERR_ADDR_LO 0xEC9010
31
32#define mmTPC3_CMDQ_GLBL_ERR_ADDR_HI 0xEC9014
33
34#define mmTPC3_CMDQ_GLBL_ERR_WDATA 0xEC9018
35
36#define mmTPC3_CMDQ_GLBL_SECURE_PROPS 0xEC901C
37
38#define mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS 0xEC9020
39
40#define mmTPC3_CMDQ_GLBL_STS0 0xEC9024
41
42#define mmTPC3_CMDQ_GLBL_STS1 0xEC9028
43
44#define mmTPC3_CMDQ_CQ_CFG0 0xEC90B0
45
46#define mmTPC3_CMDQ_CQ_CFG1 0xEC90B4
47
48#define mmTPC3_CMDQ_CQ_ARUSER 0xEC90B8
49
50#define mmTPC3_CMDQ_CQ_PTR_LO 0xEC90C0
51
52#define mmTPC3_CMDQ_CQ_PTR_HI 0xEC90C4
53
54#define mmTPC3_CMDQ_CQ_TSIZE 0xEC90C8
55
56#define mmTPC3_CMDQ_CQ_CTL 0xEC90CC
57
58#define mmTPC3_CMDQ_CQ_PTR_LO_STS 0xEC90D4
59
60#define mmTPC3_CMDQ_CQ_PTR_HI_STS 0xEC90D8
61
62#define mmTPC3_CMDQ_CQ_TSIZE_STS 0xEC90DC
63
64#define mmTPC3_CMDQ_CQ_CTL_STS 0xEC90E0
65
66#define mmTPC3_CMDQ_CQ_STS0 0xEC90E4
67
68#define mmTPC3_CMDQ_CQ_STS1 0xEC90E8
69
70#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN 0xEC90F0
71
72#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xEC90F4
73
74#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT 0xEC90F8
75
76#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT 0xEC90FC
77
78#define mmTPC3_CMDQ_CQ_IFIFO_CNT 0xEC9108
79
80#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO 0xEC9120
81
82#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI 0xEC9124
83
84#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO 0xEC9128
85
86#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI 0xEC912C
87
88#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO 0xEC9130
89
90#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI 0xEC9134
91
92#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO 0xEC9138
93
94#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI 0xEC913C
95
96#define mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET 0xEC9140
97
98#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC9144
99
100#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC9148
101
102#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xEC914C
103
104#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xEC9150
105
106#define mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET 0xEC9154
107
108#define mmTPC3_CMDQ_CP_FENCE0_RDATA 0xEC9158
109
110#define mmTPC3_CMDQ_CP_FENCE1_RDATA 0xEC915C
111
112#define mmTPC3_CMDQ_CP_FENCE2_RDATA 0xEC9160
113
114#define mmTPC3_CMDQ_CP_FENCE3_RDATA 0xEC9164
115
116#define mmTPC3_CMDQ_CP_FENCE0_CNT 0xEC9168
117
118#define mmTPC3_CMDQ_CP_FENCE1_CNT 0xEC916C
119
120#define mmTPC3_CMDQ_CP_FENCE2_CNT 0xEC9170
121
122#define mmTPC3_CMDQ_CP_FENCE3_CNT 0xEC9174
123
124#define mmTPC3_CMDQ_CP_STS 0xEC9178
125
126#define mmTPC3_CMDQ_CP_CURRENT_INST_LO 0xEC917C
127
128#define mmTPC3_CMDQ_CP_CURRENT_INST_HI 0xEC9180
129
130#define mmTPC3_CMDQ_CP_BARRIER_CFG 0xEC9184
131
132#define mmTPC3_CMDQ_CP_DBG_0 0xEC9188
133
134#define mmTPC3_CMDQ_CQ_BUF_ADDR 0xEC9308
135
136#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C
137
138#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
new file mode 100644
index 000000000000..b05b1e18e664
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC3_QM_REGS_H_
14#define ASIC_REG_TPC3_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC3_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC3_QM_GLBL_CFG0 0xEC8000
23
24#define mmTPC3_QM_GLBL_CFG1 0xEC8004
25
26#define mmTPC3_QM_GLBL_PROT 0xEC8008
27
28#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C
29
30#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8010
31
32#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8014
33
34#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8018
35
36#define mmTPC3_QM_GLBL_SECURE_PROPS 0xEC801C
37
38#define mmTPC3_QM_GLBL_NON_SECURE_PROPS 0xEC8020
39
40#define mmTPC3_QM_GLBL_STS0 0xEC8024
41
42#define mmTPC3_QM_GLBL_STS1 0xEC8028
43
44#define mmTPC3_QM_PQ_BASE_LO 0xEC8060
45
46#define mmTPC3_QM_PQ_BASE_HI 0xEC8064
47
48#define mmTPC3_QM_PQ_SIZE 0xEC8068
49
50#define mmTPC3_QM_PQ_PI 0xEC806C
51
52#define mmTPC3_QM_PQ_CI 0xEC8070
53
54#define mmTPC3_QM_PQ_CFG0 0xEC8074
55
56#define mmTPC3_QM_PQ_CFG1 0xEC8078
57
58#define mmTPC3_QM_PQ_ARUSER 0xEC807C
59
60#define mmTPC3_QM_PQ_PUSH0 0xEC8080
61
62#define mmTPC3_QM_PQ_PUSH1 0xEC8084
63
64#define mmTPC3_QM_PQ_PUSH2 0xEC8088
65
66#define mmTPC3_QM_PQ_PUSH3 0xEC808C
67
68#define mmTPC3_QM_PQ_STS0 0xEC8090
69
70#define mmTPC3_QM_PQ_STS1 0xEC8094
71
72#define mmTPC3_QM_PQ_RD_RATE_LIM_EN 0xEC80A0
73
74#define mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xEC80A4
75
76#define mmTPC3_QM_PQ_RD_RATE_LIM_SAT 0xEC80A8
77
78#define mmTPC3_QM_PQ_RD_RATE_LIM_TOUT 0xEC80AC
79
80#define mmTPC3_QM_CQ_CFG0 0xEC80B0
81
82#define mmTPC3_QM_CQ_CFG1 0xEC80B4
83
84#define mmTPC3_QM_CQ_ARUSER 0xEC80B8
85
86#define mmTPC3_QM_CQ_PTR_LO 0xEC80C0
87
88#define mmTPC3_QM_CQ_PTR_HI 0xEC80C4
89
90#define mmTPC3_QM_CQ_TSIZE 0xEC80C8
91
92#define mmTPC3_QM_CQ_CTL 0xEC80CC
93
94#define mmTPC3_QM_CQ_PTR_LO_STS 0xEC80D4
95
96#define mmTPC3_QM_CQ_PTR_HI_STS 0xEC80D8
97
98#define mmTPC3_QM_CQ_TSIZE_STS 0xEC80DC
99
100#define mmTPC3_QM_CQ_CTL_STS 0xEC80E0
101
102#define mmTPC3_QM_CQ_STS0 0xEC80E4
103
104#define mmTPC3_QM_CQ_STS1 0xEC80E8
105
106#define mmTPC3_QM_CQ_RD_RATE_LIM_EN 0xEC80F0
107
108#define mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xEC80F4
109
110#define mmTPC3_QM_CQ_RD_RATE_LIM_SAT 0xEC80F8
111
112#define mmTPC3_QM_CQ_RD_RATE_LIM_TOUT 0xEC80FC
113
114#define mmTPC3_QM_CQ_IFIFO_CNT 0xEC8108
115
116#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO 0xEC8120
117
118#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI 0xEC8124
119
120#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO 0xEC8128
121
122#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI 0xEC812C
123
124#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO 0xEC8130
125
126#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI 0xEC8134
127
128#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO 0xEC8138
129
130#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI 0xEC813C
131
132#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET 0xEC8140
133
134#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC8144
135
136#define mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC8148
137
138#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xEC814C
139
140#define mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xEC8150
141
142#define mmTPC3_QM_CP_LDMA_COMMIT_OFFSET 0xEC8154
143
144#define mmTPC3_QM_CP_FENCE0_RDATA 0xEC8158
145
146#define mmTPC3_QM_CP_FENCE1_RDATA 0xEC815C
147
148#define mmTPC3_QM_CP_FENCE2_RDATA 0xEC8160
149
150#define mmTPC3_QM_CP_FENCE3_RDATA 0xEC8164
151
152#define mmTPC3_QM_CP_FENCE0_CNT 0xEC8168
153
154#define mmTPC3_QM_CP_FENCE1_CNT 0xEC816C
155
156#define mmTPC3_QM_CP_FENCE2_CNT 0xEC8170
157
158#define mmTPC3_QM_CP_FENCE3_CNT 0xEC8174
159
160#define mmTPC3_QM_CP_STS 0xEC8178
161
162#define mmTPC3_QM_CP_CURRENT_INST_LO 0xEC817C
163
164#define mmTPC3_QM_CP_CURRENT_INST_HI 0xEC8180
165
166#define mmTPC3_QM_CP_BARRIER_CFG 0xEC8184
167
168#define mmTPC3_QM_CP_DBG_0 0xEC8188
169
170#define mmTPC3_QM_PQ_BUF_ADDR 0xEC8300
171
172#define mmTPC3_QM_PQ_BUF_RDATA 0xEC8304
173
174#define mmTPC3_QM_CQ_BUF_ADDR 0xEC8308
175
176#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C
177
178#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
new file mode 100644
index 000000000000..5a2fd7652650
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
@@ -0,0 +1,323 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC3_RTR_REGS_H_
14#define ASIC_REG_TPC3_RTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC3_RTR (Prototype: TPC_RTR)
19 *****************************************
20 */
21
22#define mmTPC3_RTR_HBW_RD_RQ_E_ARB 0xEC0100
23
24#define mmTPC3_RTR_HBW_RD_RQ_W_ARB 0xEC0104
25
26#define mmTPC3_RTR_HBW_RD_RQ_N_ARB 0xEC0108
27
28#define mmTPC3_RTR_HBW_RD_RQ_S_ARB 0xEC010C
29
30#define mmTPC3_RTR_HBW_RD_RQ_L_ARB 0xEC0110
31
32#define mmTPC3_RTR_HBW_E_ARB_MAX 0xEC0120
33
34#define mmTPC3_RTR_HBW_W_ARB_MAX 0xEC0124
35
36#define mmTPC3_RTR_HBW_N_ARB_MAX 0xEC0128
37
38#define mmTPC3_RTR_HBW_S_ARB_MAX 0xEC012C
39
40#define mmTPC3_RTR_HBW_L_ARB_MAX 0xEC0130
41
42#define mmTPC3_RTR_HBW_RD_RS_E_ARB 0xEC0140
43
44#define mmTPC3_RTR_HBW_RD_RS_W_ARB 0xEC0144
45
46#define mmTPC3_RTR_HBW_RD_RS_N_ARB 0xEC0148
47
48#define mmTPC3_RTR_HBW_RD_RS_S_ARB 0xEC014C
49
50#define mmTPC3_RTR_HBW_RD_RS_L_ARB 0xEC0150
51
52#define mmTPC3_RTR_HBW_WR_RQ_E_ARB 0xEC0170
53
54#define mmTPC3_RTR_HBW_WR_RQ_W_ARB 0xEC0174
55
56#define mmTPC3_RTR_HBW_WR_RQ_N_ARB 0xEC0178
57
58#define mmTPC3_RTR_HBW_WR_RQ_S_ARB 0xEC017C
59
60#define mmTPC3_RTR_HBW_WR_RQ_L_ARB 0xEC0180
61
62#define mmTPC3_RTR_HBW_WR_RS_E_ARB 0xEC0190
63
64#define mmTPC3_RTR_HBW_WR_RS_W_ARB 0xEC0194
65
66#define mmTPC3_RTR_HBW_WR_RS_N_ARB 0xEC0198
67
68#define mmTPC3_RTR_HBW_WR_RS_S_ARB 0xEC019C
69
70#define mmTPC3_RTR_HBW_WR_RS_L_ARB 0xEC01A0
71
72#define mmTPC3_RTR_LBW_RD_RQ_E_ARB 0xEC0200
73
74#define mmTPC3_RTR_LBW_RD_RQ_W_ARB 0xEC0204
75
76#define mmTPC3_RTR_LBW_RD_RQ_N_ARB 0xEC0208
77
78#define mmTPC3_RTR_LBW_RD_RQ_S_ARB 0xEC020C
79
80#define mmTPC3_RTR_LBW_RD_RQ_L_ARB 0xEC0210
81
82#define mmTPC3_RTR_LBW_E_ARB_MAX 0xEC0220
83
84#define mmTPC3_RTR_LBW_W_ARB_MAX 0xEC0224
85
86#define mmTPC3_RTR_LBW_N_ARB_MAX 0xEC0228
87
88#define mmTPC3_RTR_LBW_S_ARB_MAX 0xEC022C
89
90#define mmTPC3_RTR_LBW_L_ARB_MAX 0xEC0230
91
92#define mmTPC3_RTR_LBW_RD_RS_E_ARB 0xEC0250
93
94#define mmTPC3_RTR_LBW_RD_RS_W_ARB 0xEC0254
95
96#define mmTPC3_RTR_LBW_RD_RS_N_ARB 0xEC0258
97
98#define mmTPC3_RTR_LBW_RD_RS_S_ARB 0xEC025C
99
100#define mmTPC3_RTR_LBW_RD_RS_L_ARB 0xEC0260
101
102#define mmTPC3_RTR_LBW_WR_RQ_E_ARB 0xEC0270
103
104#define mmTPC3_RTR_LBW_WR_RQ_W_ARB 0xEC0274
105
106#define mmTPC3_RTR_LBW_WR_RQ_N_ARB 0xEC0278
107
108#define mmTPC3_RTR_LBW_WR_RQ_S_ARB 0xEC027C
109
110#define mmTPC3_RTR_LBW_WR_RQ_L_ARB 0xEC0280
111
112#define mmTPC3_RTR_LBW_WR_RS_E_ARB 0xEC0290
113
114#define mmTPC3_RTR_LBW_WR_RS_W_ARB 0xEC0294
115
116#define mmTPC3_RTR_LBW_WR_RS_N_ARB 0xEC0298
117
118#define mmTPC3_RTR_LBW_WR_RS_S_ARB 0xEC029C
119
120#define mmTPC3_RTR_LBW_WR_RS_L_ARB 0xEC02A0
121
122#define mmTPC3_RTR_DBG_E_ARB 0xEC0300
123
124#define mmTPC3_RTR_DBG_W_ARB 0xEC0304
125
126#define mmTPC3_RTR_DBG_N_ARB 0xEC0308
127
128#define mmTPC3_RTR_DBG_S_ARB 0xEC030C
129
130#define mmTPC3_RTR_DBG_L_ARB 0xEC0310
131
132#define mmTPC3_RTR_DBG_E_ARB_MAX 0xEC0320
133
134#define mmTPC3_RTR_DBG_W_ARB_MAX 0xEC0324
135
136#define mmTPC3_RTR_DBG_N_ARB_MAX 0xEC0328
137
138#define mmTPC3_RTR_DBG_S_ARB_MAX 0xEC032C
139
140#define mmTPC3_RTR_DBG_L_ARB_MAX 0xEC0330
141
142#define mmTPC3_RTR_SPLIT_COEF_0 0xEC0400
143
144#define mmTPC3_RTR_SPLIT_COEF_1 0xEC0404
145
146#define mmTPC3_RTR_SPLIT_COEF_2 0xEC0408
147
148#define mmTPC3_RTR_SPLIT_COEF_3 0xEC040C
149
150#define mmTPC3_RTR_SPLIT_COEF_4 0xEC0410
151
152#define mmTPC3_RTR_SPLIT_COEF_5 0xEC0414
153
154#define mmTPC3_RTR_SPLIT_COEF_6 0xEC0418
155
156#define mmTPC3_RTR_SPLIT_COEF_7 0xEC041C
157
158#define mmTPC3_RTR_SPLIT_COEF_8 0xEC0420
159
160#define mmTPC3_RTR_SPLIT_COEF_9 0xEC0424
161
162#define mmTPC3_RTR_SPLIT_CFG 0xEC0440
163
164#define mmTPC3_RTR_SPLIT_RD_SAT 0xEC0444
165
166#define mmTPC3_RTR_SPLIT_RD_RST_TOKEN 0xEC0448
167
168#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_0 0xEC044C
169
170#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_1 0xEC0450
171
172#define mmTPC3_RTR_SPLIT_WR_SAT 0xEC0454
173
174#define mmTPC3_RTR_WPLIT_WR_TST_TOLEN 0xEC0458
175
176#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_0 0xEC045C
177
178#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_1 0xEC0460
179
180#define mmTPC3_RTR_HBW_RANGE_HIT 0xEC0470
181
182#define mmTPC3_RTR_HBW_RANGE_MASK_L_0 0xEC0480
183
184#define mmTPC3_RTR_HBW_RANGE_MASK_L_1 0xEC0484
185
186#define mmTPC3_RTR_HBW_RANGE_MASK_L_2 0xEC0488
187
188#define mmTPC3_RTR_HBW_RANGE_MASK_L_3 0xEC048C
189
190#define mmTPC3_RTR_HBW_RANGE_MASK_L_4 0xEC0490
191
192#define mmTPC3_RTR_HBW_RANGE_MASK_L_5 0xEC0494
193
194#define mmTPC3_RTR_HBW_RANGE_MASK_L_6 0xEC0498
195
196#define mmTPC3_RTR_HBW_RANGE_MASK_L_7 0xEC049C
197
198#define mmTPC3_RTR_HBW_RANGE_MASK_H_0 0xEC04A0
199
200#define mmTPC3_RTR_HBW_RANGE_MASK_H_1 0xEC04A4
201
202#define mmTPC3_RTR_HBW_RANGE_MASK_H_2 0xEC04A8
203
204#define mmTPC3_RTR_HBW_RANGE_MASK_H_3 0xEC04AC
205
206#define mmTPC3_RTR_HBW_RANGE_MASK_H_4 0xEC04B0
207
208#define mmTPC3_RTR_HBW_RANGE_MASK_H_5 0xEC04B4
209
210#define mmTPC3_RTR_HBW_RANGE_MASK_H_6 0xEC04B8
211
212#define mmTPC3_RTR_HBW_RANGE_MASK_H_7 0xEC04BC
213
214#define mmTPC3_RTR_HBW_RANGE_BASE_L_0 0xEC04C0
215
216#define mmTPC3_RTR_HBW_RANGE_BASE_L_1 0xEC04C4
217
218#define mmTPC3_RTR_HBW_RANGE_BASE_L_2 0xEC04C8
219
220#define mmTPC3_RTR_HBW_RANGE_BASE_L_3 0xEC04CC
221
222#define mmTPC3_RTR_HBW_RANGE_BASE_L_4 0xEC04D0
223
224#define mmTPC3_RTR_HBW_RANGE_BASE_L_5 0xEC04D4
225
226#define mmTPC3_RTR_HBW_RANGE_BASE_L_6 0xEC04D8
227
228#define mmTPC3_RTR_HBW_RANGE_BASE_L_7 0xEC04DC
229
230#define mmTPC3_RTR_HBW_RANGE_BASE_H_0 0xEC04E0
231
232#define mmTPC3_RTR_HBW_RANGE_BASE_H_1 0xEC04E4
233
234#define mmTPC3_RTR_HBW_RANGE_BASE_H_2 0xEC04E8
235
236#define mmTPC3_RTR_HBW_RANGE_BASE_H_3 0xEC04EC
237
238#define mmTPC3_RTR_HBW_RANGE_BASE_H_4 0xEC04F0
239
240#define mmTPC3_RTR_HBW_RANGE_BASE_H_5 0xEC04F4
241
242#define mmTPC3_RTR_HBW_RANGE_BASE_H_6 0xEC04F8
243
244#define mmTPC3_RTR_HBW_RANGE_BASE_H_7 0xEC04FC
245
246#define mmTPC3_RTR_LBW_RANGE_HIT 0xEC0500
247
248#define mmTPC3_RTR_LBW_RANGE_MASK_0 0xEC0510
249
250#define mmTPC3_RTR_LBW_RANGE_MASK_1 0xEC0514
251
252#define mmTPC3_RTR_LBW_RANGE_MASK_2 0xEC0518
253
254#define mmTPC3_RTR_LBW_RANGE_MASK_3 0xEC051C
255
256#define mmTPC3_RTR_LBW_RANGE_MASK_4 0xEC0520
257
258#define mmTPC3_RTR_LBW_RANGE_MASK_5 0xEC0524
259
260#define mmTPC3_RTR_LBW_RANGE_MASK_6 0xEC0528
261
262#define mmTPC3_RTR_LBW_RANGE_MASK_7 0xEC052C
263
264#define mmTPC3_RTR_LBW_RANGE_MASK_8 0xEC0530
265
266#define mmTPC3_RTR_LBW_RANGE_MASK_9 0xEC0534
267
268#define mmTPC3_RTR_LBW_RANGE_MASK_10 0xEC0538
269
270#define mmTPC3_RTR_LBW_RANGE_MASK_11 0xEC053C
271
272#define mmTPC3_RTR_LBW_RANGE_MASK_12 0xEC0540
273
274#define mmTPC3_RTR_LBW_RANGE_MASK_13 0xEC0544
275
276#define mmTPC3_RTR_LBW_RANGE_MASK_14 0xEC0548
277
278#define mmTPC3_RTR_LBW_RANGE_MASK_15 0xEC054C
279
280#define mmTPC3_RTR_LBW_RANGE_BASE_0 0xEC0550
281
282#define mmTPC3_RTR_LBW_RANGE_BASE_1 0xEC0554
283
284#define mmTPC3_RTR_LBW_RANGE_BASE_2 0xEC0558
285
286#define mmTPC3_RTR_LBW_RANGE_BASE_3 0xEC055C
287
288#define mmTPC3_RTR_LBW_RANGE_BASE_4 0xEC0560
289
290#define mmTPC3_RTR_LBW_RANGE_BASE_5 0xEC0564
291
292#define mmTPC3_RTR_LBW_RANGE_BASE_6 0xEC0568
293
294#define mmTPC3_RTR_LBW_RANGE_BASE_7 0xEC056C
295
296#define mmTPC3_RTR_LBW_RANGE_BASE_8 0xEC0570
297
298#define mmTPC3_RTR_LBW_RANGE_BASE_9 0xEC0574
299
300#define mmTPC3_RTR_LBW_RANGE_BASE_10 0xEC0578
301
302#define mmTPC3_RTR_LBW_RANGE_BASE_11 0xEC057C
303
304#define mmTPC3_RTR_LBW_RANGE_BASE_12 0xEC0580
305
306#define mmTPC3_RTR_LBW_RANGE_BASE_13 0xEC0584
307
308#define mmTPC3_RTR_LBW_RANGE_BASE_14 0xEC0588
309
310#define mmTPC3_RTR_LBW_RANGE_BASE_15 0xEC058C
311
312#define mmTPC3_RTR_RGLTR 0xEC0590
313
314#define mmTPC3_RTR_RGLTR_WR_RESULT 0xEC0594
315
316#define mmTPC3_RTR_RGLTR_RD_RESULT 0xEC0598
317
318#define mmTPC3_RTR_SCRAMB_EN 0xEC0600
319
320#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604
321
322#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
new file mode 100644
index 000000000000..d64a100075f2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC4_CFG_REGS_H_
14#define ASIC_REG_TPC4_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC4_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400
23
24#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404
25
26#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408
27
28#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C
29
30#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410
31
32#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414
33
34#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF06418
35
36#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF0641C
37
38#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF06420
39
40#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF06424
41
42#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06428
43
44#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF0642C
45
46#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF06430
47
48#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06434
49
50#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF06438
51
52#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF0643C
53
54#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06440
55
56#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06444
57
58#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF06448
59
60#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF0644C
61
62#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF06450
63
64#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06454
65
66#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06458
67
68#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF0645C
69
70#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF06460
71
72#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF06464
73
74#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06468
75
76#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF0646C
77
78#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF06470
79
80#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06474
81
82#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF06478
83
84#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF0647C
85
86#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06480
87
88#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06484
89
90#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF06488
91
92#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF0648C
93
94#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF06490
95
96#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF06494
97
98#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06498
99
100#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF0649C
101
102#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF064A0
103
104#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF064A4
105
106#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF064A8
107
108#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF064AC
109
110#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF064B0
111
112#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF064B4
113
114#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF064B8
115
116#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF064BC
117
118#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF064C0
119
120#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF064C4
121
122#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF064C8
123
124#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF064CC
125
126#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF064D0
127
128#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF064D4
129
130#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064D8
131
132#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064DC
133
134#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF064E0
135
136#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064E4
137
138#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064E8
139
140#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064EC
141
142#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064F0
143
144#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064F4
145
146#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064F8
147
148#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF064FC
149
150#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF06500
151
152#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF06504
153
154#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF06508
155
156#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF0650C
157
158#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF06510
159
160#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF06514
161
162#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF06518
163
164#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF0651C
165
166#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF06520
167
168#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF06524
169
170#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF06528
171
172#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF0652C
173
174#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF06530
175
176#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF06534
177
178#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF06538
179
180#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF0653C
181
182#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF06540
183
184#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF06544
185
186#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF06548
187
188#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF0654C
189
190#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF06550
191
192#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF06554
193
194#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06558
195
196#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF0655C
197
198#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF06560
199
200#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06564
201
202#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF06568
203
204#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF0656C
205
206#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06570
207
208#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06574
209
210#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF06578
211
212#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF0657C
213
214#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF06580
215
216#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06584
217
218#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06588
219
220#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF0658C
221
222#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF06590
223
224#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF06594
225
226#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06598
227
228#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF0659C
229
230#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF065A0
231
232#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF065A4
233
234#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF065A8
235
236#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF065AC
237
238#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF065B0
239
240#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF065B4
241
242#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF065B8
243
244#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF065BC
245
246#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF065C0
247
248#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF065C4
249
250#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF065C8
251
252#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF065CC
253
254#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF065D0
255
256#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF065D4
257
258#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF065D8
259
260#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF065DC
261
262#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF065E0
263
264#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF065E4
265
266#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF065E8
267
268#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF065EC
269
270#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF065F0
271
272#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF065F4
273
274#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF065F8
275
276#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF065FC
277
278#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF06600
279
280#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF06604
281
282#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06608
283
284#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF0660C
285
286#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF06610
287
288#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06614
289
290#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF06618
291
292#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF0661C
293
294#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06620
295
296#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06624
297
298#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF06628
299
300#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF0662C
301
302#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF06630
303
304#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF06634
305
306#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF06638
307
308#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF0663C
309
310#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF06640
311
312#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF06644
313
314#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF06648
315
316#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF0664C
317
318#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF06650
319
320#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF06654
321
322#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF06658
323
324#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF0665C
325
326#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06660
327
328#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF06664
329
330#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06668
331
332#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF0666C
333
334#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06670
335
336#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF06674
337
338#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF06678
339
340#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF0667C
341
342#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF06680
343
344#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF06684
345
346#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF06688
347
348#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF0668C
349
350#define mmTPC4_CFG_KERNEL_SRF_0 0xF06690
351
352#define mmTPC4_CFG_KERNEL_SRF_1 0xF06694
353
354#define mmTPC4_CFG_KERNEL_SRF_2 0xF06698
355
356#define mmTPC4_CFG_KERNEL_SRF_3 0xF0669C
357
358#define mmTPC4_CFG_KERNEL_SRF_4 0xF066A0
359
360#define mmTPC4_CFG_KERNEL_SRF_5 0xF066A4
361
362#define mmTPC4_CFG_KERNEL_SRF_6 0xF066A8
363
364#define mmTPC4_CFG_KERNEL_SRF_7 0xF066AC
365
366#define mmTPC4_CFG_KERNEL_SRF_8 0xF066B0
367
368#define mmTPC4_CFG_KERNEL_SRF_9 0xF066B4
369
370#define mmTPC4_CFG_KERNEL_SRF_10 0xF066B8
371
372#define mmTPC4_CFG_KERNEL_SRF_11 0xF066BC
373
374#define mmTPC4_CFG_KERNEL_SRF_12 0xF066C0
375
376#define mmTPC4_CFG_KERNEL_SRF_13 0xF066C4
377
378#define mmTPC4_CFG_KERNEL_SRF_14 0xF066C8
379
380#define mmTPC4_CFG_KERNEL_SRF_15 0xF066CC
381
382#define mmTPC4_CFG_KERNEL_SRF_16 0xF066D0
383
384#define mmTPC4_CFG_KERNEL_SRF_17 0xF066D4
385
386#define mmTPC4_CFG_KERNEL_SRF_18 0xF066D8
387
388#define mmTPC4_CFG_KERNEL_SRF_19 0xF066DC
389
390#define mmTPC4_CFG_KERNEL_SRF_20 0xF066E0
391
392#define mmTPC4_CFG_KERNEL_SRF_21 0xF066E4
393
394#define mmTPC4_CFG_KERNEL_SRF_22 0xF066E8
395
396#define mmTPC4_CFG_KERNEL_SRF_23 0xF066EC
397
398#define mmTPC4_CFG_KERNEL_SRF_24 0xF066F0
399
400#define mmTPC4_CFG_KERNEL_SRF_25 0xF066F4
401
402#define mmTPC4_CFG_KERNEL_SRF_26 0xF066F8
403
404#define mmTPC4_CFG_KERNEL_SRF_27 0xF066FC
405
406#define mmTPC4_CFG_KERNEL_SRF_28 0xF06700
407
408#define mmTPC4_CFG_KERNEL_SRF_29 0xF06704
409
410#define mmTPC4_CFG_KERNEL_SRF_30 0xF06708
411
412#define mmTPC4_CFG_KERNEL_SRF_31 0xF0670C
413
414#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF06710
415
416#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06714
417
418#define mmTPC4_CFG_RESERVED_DESC_END 0xF06738
419
420#define mmTPC4_CFG_ROUND_CSR 0xF067FC
421
422#define mmTPC4_CFG_TBUF_BASE_ADDR_LOW 0xF06800
423
424#define mmTPC4_CFG_TBUF_BASE_ADDR_HIGH 0xF06804
425
426#define mmTPC4_CFG_SEMAPHORE 0xF06808
427
428#define mmTPC4_CFG_VFLAGS 0xF0680C
429
430#define mmTPC4_CFG_SFLAGS 0xF06810
431
432#define mmTPC4_CFG_LFSR_POLYNOM 0xF06818
433
434#define mmTPC4_CFG_STATUS 0xF0681C
435
436#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06820
437
438#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06824
439
440#define mmTPC4_CFG_SM_BASE_ADDRESS_LOW 0xF06828
441
442#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0682C
443
444#define mmTPC4_CFG_TPC_CMD 0xF06830
445
446#define mmTPC4_CFG_TPC_EXECUTE 0xF06838
447
448#define mmTPC4_CFG_TPC_STALL 0xF0683C
449
450#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06840
451
452#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06844
453
454#define mmTPC4_CFG_MSS_CONFIG 0xF06854
455
456#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06858
457
458#define mmTPC4_CFG_TPC_INTR_MASK 0xF0685C
459
460#define mmTPC4_CFG_TSB_CONFIG 0xF06860
461
462#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00
463
464#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04
465
466#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08
467
468#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C
469
470#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10
471
472#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14
473
474#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF06A18
475
476#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A1C
477
478#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A20
479
480#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF06A24
481
482#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A28
483
484#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A2C
485
486#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF06A30
487
488#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A34
489
490#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A38
491
492#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF06A3C
493
494#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A40
495
496#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A44
497
498#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF06A48
499
500#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A4C
501
502#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A50
503
504#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A54
505
506#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A58
507
508#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A5C
509
510#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A60
511
512#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF06A64
513
514#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A68
515
516#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A6C
517
518#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF06A70
519
520#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A74
521
522#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A78
523
524#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF06A7C
525
526#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A80
527
528#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A84
529
530#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF06A88
531
532#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A8C
533
534#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A90
535
536#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF06A94
537
538#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A98
539
540#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A9C
541
542#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06AA0
543
544#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06AA4
545
546#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06AA8
547
548#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06AAC
549
550#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF06AB0
551
552#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06AB4
553
554#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06AB8
555
556#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF06ABC
557
558#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06AC0
559
560#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06AC4
561
562#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF06AC8
563
564#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06ACC
565
566#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06AD0
567
568#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF06AD4
569
570#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AD8
571
572#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06ADC
573
574#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF06AE0
575
576#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AE4
577
578#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AE8
579
580#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AEC
581
582#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AF0
583
584#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AF4
585
586#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06AF8
587
588#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF06AFC
589
590#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06B00
591
592#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06B04
593
594#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF06B08
595
596#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06B0C
597
598#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06B10
599
600#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF06B14
601
602#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06B18
603
604#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06B1C
605
606#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF06B20
607
608#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06B24
609
610#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06B28
611
612#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF06B2C
613
614#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06B30
615
616#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06B34
617
618#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06B38
619
620#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06B3C
621
622#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06B40
623
624#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06B44
625
626#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF06B48
627
628#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06B4C
629
630#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06B50
631
632#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF06B54
633
634#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B58
635
636#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B5C
637
638#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF06B60
639
640#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B64
641
642#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B68
643
644#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF06B6C
645
646#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B70
647
648#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B74
649
650#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF06B78
651
652#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B7C
653
654#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B80
655
656#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B84
657
658#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B88
659
660#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B8C
661
662#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B90
663
664#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF06B94
665
666#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B98
667
668#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B9C
669
670#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF06BA0
671
672#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06BA4
673
674#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06BA8
675
676#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF06BAC
677
678#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06BB0
679
680#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06BB4
681
682#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF06BB8
683
684#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06BBC
685
686#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06BC0
687
688#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF06BC4
689
690#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06BC8
691
692#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06BCC
693
694#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06BD0
695
696#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06BD4
697
698#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06BD8
699
700#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06BDC
701
702#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF06BE0
703
704#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06BE4
705
706#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06BE8
707
708#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF06BEC
709
710#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06BF0
711
712#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06BF4
713
714#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF06BF8
715
716#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06BFC
717
718#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06C00
719
720#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF06C04
721
722#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06C08
723
724#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06C0C
725
726#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF06C10
727
728#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06C14
729
730#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06C18
731
732#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06C1C
733
734#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06C20
735
736#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06C24
737
738#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06C28
739
740#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF06C2C
741
742#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06C30
743
744#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06C34
745
746#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF06C38
747
748#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06C3C
749
750#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06C40
751
752#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF06C44
753
754#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06C48
755
756#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06C4C
757
758#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF06C50
759
760#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06C54
761
762#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06C58
763
764#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF06C5C
765
766#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06C60
767
768#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06C64
769
770#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06C68
771
772#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06C6C
773
774#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06C70
775
776#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06C74
777
778#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06C78
779
780#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06C7C
781
782#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06C80
783
784#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06C84
785
786#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06C88
787
788#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06C8C
789
790#define mmTPC4_CFG_QM_SRF_0 0xF06C90
791
792#define mmTPC4_CFG_QM_SRF_1 0xF06C94
793
794#define mmTPC4_CFG_QM_SRF_2 0xF06C98
795
796#define mmTPC4_CFG_QM_SRF_3 0xF06C9C
797
798#define mmTPC4_CFG_QM_SRF_4 0xF06CA0
799
800#define mmTPC4_CFG_QM_SRF_5 0xF06CA4
801
802#define mmTPC4_CFG_QM_SRF_6 0xF06CA8
803
804#define mmTPC4_CFG_QM_SRF_7 0xF06CAC
805
806#define mmTPC4_CFG_QM_SRF_8 0xF06CB0
807
808#define mmTPC4_CFG_QM_SRF_9 0xF06CB4
809
810#define mmTPC4_CFG_QM_SRF_10 0xF06CB8
811
812#define mmTPC4_CFG_QM_SRF_11 0xF06CBC
813
814#define mmTPC4_CFG_QM_SRF_12 0xF06CC0
815
816#define mmTPC4_CFG_QM_SRF_13 0xF06CC4
817
818#define mmTPC4_CFG_QM_SRF_14 0xF06CC8
819
820#define mmTPC4_CFG_QM_SRF_15 0xF06CCC
821
822#define mmTPC4_CFG_QM_SRF_16 0xF06CD0
823
824#define mmTPC4_CFG_QM_SRF_17 0xF06CD4
825
826#define mmTPC4_CFG_QM_SRF_18 0xF06CD8
827
828#define mmTPC4_CFG_QM_SRF_19 0xF06CDC
829
830#define mmTPC4_CFG_QM_SRF_20 0xF06CE0
831
832#define mmTPC4_CFG_QM_SRF_21 0xF06CE4
833
834#define mmTPC4_CFG_QM_SRF_22 0xF06CE8
835
836#define mmTPC4_CFG_QM_SRF_23 0xF06CEC
837
838#define mmTPC4_CFG_QM_SRF_24 0xF06CF0
839
840#define mmTPC4_CFG_QM_SRF_25 0xF06CF4
841
842#define mmTPC4_CFG_QM_SRF_26 0xF06CF8
843
844#define mmTPC4_CFG_QM_SRF_27 0xF06CFC
845
846#define mmTPC4_CFG_QM_SRF_28 0xF06D00
847
848#define mmTPC4_CFG_QM_SRF_29 0xF06D04
849
850#define mmTPC4_CFG_QM_SRF_30 0xF06D08
851
852#define mmTPC4_CFG_QM_SRF_31 0xF06D0C
853
854#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06D10
855
856#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D14
857
858#define mmTPC4_CFG_ARUSER 0xF06D18
859
860#define mmTPC4_CFG_AWUSER 0xF06D1C
861
862#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF06E00
863
864#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF06E04
865
866#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF06E08
867
868#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF06E0C
869
870#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF06E10
871
872#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF06E14
873
874#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF06E18
875
876#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF06E1C
877
878#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF06E20
879
880#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF06E24
881
882#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF06E28
883
884#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C
885
886#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
new file mode 100644
index 000000000000..565b42885b0d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC4_CMDQ_REGS_H_
14#define ASIC_REG_TPC4_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC4_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC4_CMDQ_GLBL_CFG0 0xF09000
23
24#define mmTPC4_CMDQ_GLBL_CFG1 0xF09004
25
26#define mmTPC4_CMDQ_GLBL_PROT 0xF09008
27
28#define mmTPC4_CMDQ_GLBL_ERR_CFG 0xF0900C
29
30#define mmTPC4_CMDQ_GLBL_ERR_ADDR_LO 0xF09010
31
32#define mmTPC4_CMDQ_GLBL_ERR_ADDR_HI 0xF09014
33
34#define mmTPC4_CMDQ_GLBL_ERR_WDATA 0xF09018
35
36#define mmTPC4_CMDQ_GLBL_SECURE_PROPS 0xF0901C
37
38#define mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS 0xF09020
39
40#define mmTPC4_CMDQ_GLBL_STS0 0xF09024
41
42#define mmTPC4_CMDQ_GLBL_STS1 0xF09028
43
44#define mmTPC4_CMDQ_CQ_CFG0 0xF090B0
45
46#define mmTPC4_CMDQ_CQ_CFG1 0xF090B4
47
48#define mmTPC4_CMDQ_CQ_ARUSER 0xF090B8
49
50#define mmTPC4_CMDQ_CQ_PTR_LO 0xF090C0
51
52#define mmTPC4_CMDQ_CQ_PTR_HI 0xF090C4
53
54#define mmTPC4_CMDQ_CQ_TSIZE 0xF090C8
55
56#define mmTPC4_CMDQ_CQ_CTL 0xF090CC
57
58#define mmTPC4_CMDQ_CQ_PTR_LO_STS 0xF090D4
59
60#define mmTPC4_CMDQ_CQ_PTR_HI_STS 0xF090D8
61
62#define mmTPC4_CMDQ_CQ_TSIZE_STS 0xF090DC
63
64#define mmTPC4_CMDQ_CQ_CTL_STS 0xF090E0
65
66#define mmTPC4_CMDQ_CQ_STS0 0xF090E4
67
68#define mmTPC4_CMDQ_CQ_STS1 0xF090E8
69
70#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN 0xF090F0
71
72#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF090F4
73
74#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT 0xF090F8
75
76#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF090FC
77
78#define mmTPC4_CMDQ_CQ_IFIFO_CNT 0xF09108
79
80#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF09120
81
82#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF09124
83
84#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF09128
85
86#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF0912C
87
88#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF09130
89
90#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF09134
91
92#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF09138
93
94#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF0913C
95
96#define mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF09140
97
98#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF09144
99
100#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF09148
101
102#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF0914C
103
104#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF09150
105
106#define mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF09154
107
108#define mmTPC4_CMDQ_CP_FENCE0_RDATA 0xF09158
109
110#define mmTPC4_CMDQ_CP_FENCE1_RDATA 0xF0915C
111
112#define mmTPC4_CMDQ_CP_FENCE2_RDATA 0xF09160
113
114#define mmTPC4_CMDQ_CP_FENCE3_RDATA 0xF09164
115
116#define mmTPC4_CMDQ_CP_FENCE0_CNT 0xF09168
117
118#define mmTPC4_CMDQ_CP_FENCE1_CNT 0xF0916C
119
120#define mmTPC4_CMDQ_CP_FENCE2_CNT 0xF09170
121
122#define mmTPC4_CMDQ_CP_FENCE3_CNT 0xF09174
123
124#define mmTPC4_CMDQ_CP_STS 0xF09178
125
126#define mmTPC4_CMDQ_CP_CURRENT_INST_LO 0xF0917C
127
128#define mmTPC4_CMDQ_CP_CURRENT_INST_HI 0xF09180
129
130#define mmTPC4_CMDQ_CP_BARRIER_CFG 0xF09184
131
132#define mmTPC4_CMDQ_CP_DBG_0 0xF09188
133
134#define mmTPC4_CMDQ_CQ_BUF_ADDR 0xF09308
135
136#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C
137
138#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
new file mode 100644
index 000000000000..196da3f12710
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC4_QM_REGS_H_
14#define ASIC_REG_TPC4_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC4_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC4_QM_GLBL_CFG0 0xF08000
23
24#define mmTPC4_QM_GLBL_CFG1 0xF08004
25
26#define mmTPC4_QM_GLBL_PROT 0xF08008
27
28#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C
29
30#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08010
31
32#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08014
33
34#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08018
35
36#define mmTPC4_QM_GLBL_SECURE_PROPS 0xF0801C
37
38#define mmTPC4_QM_GLBL_NON_SECURE_PROPS 0xF08020
39
40#define mmTPC4_QM_GLBL_STS0 0xF08024
41
42#define mmTPC4_QM_GLBL_STS1 0xF08028
43
44#define mmTPC4_QM_PQ_BASE_LO 0xF08060
45
46#define mmTPC4_QM_PQ_BASE_HI 0xF08064
47
48#define mmTPC4_QM_PQ_SIZE 0xF08068
49
50#define mmTPC4_QM_PQ_PI 0xF0806C
51
52#define mmTPC4_QM_PQ_CI 0xF08070
53
54#define mmTPC4_QM_PQ_CFG0 0xF08074
55
56#define mmTPC4_QM_PQ_CFG1 0xF08078
57
58#define mmTPC4_QM_PQ_ARUSER 0xF0807C
59
60#define mmTPC4_QM_PQ_PUSH0 0xF08080
61
62#define mmTPC4_QM_PQ_PUSH1 0xF08084
63
64#define mmTPC4_QM_PQ_PUSH2 0xF08088
65
66#define mmTPC4_QM_PQ_PUSH3 0xF0808C
67
68#define mmTPC4_QM_PQ_STS0 0xF08090
69
70#define mmTPC4_QM_PQ_STS1 0xF08094
71
72#define mmTPC4_QM_PQ_RD_RATE_LIM_EN 0xF080A0
73
74#define mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF080A4
75
76#define mmTPC4_QM_PQ_RD_RATE_LIM_SAT 0xF080A8
77
78#define mmTPC4_QM_PQ_RD_RATE_LIM_TOUT 0xF080AC
79
80#define mmTPC4_QM_CQ_CFG0 0xF080B0
81
82#define mmTPC4_QM_CQ_CFG1 0xF080B4
83
84#define mmTPC4_QM_CQ_ARUSER 0xF080B8
85
86#define mmTPC4_QM_CQ_PTR_LO 0xF080C0
87
88#define mmTPC4_QM_CQ_PTR_HI 0xF080C4
89
90#define mmTPC4_QM_CQ_TSIZE 0xF080C8
91
92#define mmTPC4_QM_CQ_CTL 0xF080CC
93
94#define mmTPC4_QM_CQ_PTR_LO_STS 0xF080D4
95
96#define mmTPC4_QM_CQ_PTR_HI_STS 0xF080D8
97
98#define mmTPC4_QM_CQ_TSIZE_STS 0xF080DC
99
100#define mmTPC4_QM_CQ_CTL_STS 0xF080E0
101
102#define mmTPC4_QM_CQ_STS0 0xF080E4
103
104#define mmTPC4_QM_CQ_STS1 0xF080E8
105
106#define mmTPC4_QM_CQ_RD_RATE_LIM_EN 0xF080F0
107
108#define mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF080F4
109
110#define mmTPC4_QM_CQ_RD_RATE_LIM_SAT 0xF080F8
111
112#define mmTPC4_QM_CQ_RD_RATE_LIM_TOUT 0xF080FC
113
114#define mmTPC4_QM_CQ_IFIFO_CNT 0xF08108
115
116#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO 0xF08120
117
118#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI 0xF08124
119
120#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO 0xF08128
121
122#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI 0xF0812C
123
124#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO 0xF08130
125
126#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI 0xF08134
127
128#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO 0xF08138
129
130#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI 0xF0813C
131
132#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET 0xF08140
133
134#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF08144
135
136#define mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF08148
137
138#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF0814C
139
140#define mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF08150
141
142#define mmTPC4_QM_CP_LDMA_COMMIT_OFFSET 0xF08154
143
144#define mmTPC4_QM_CP_FENCE0_RDATA 0xF08158
145
146#define mmTPC4_QM_CP_FENCE1_RDATA 0xF0815C
147
148#define mmTPC4_QM_CP_FENCE2_RDATA 0xF08160
149
150#define mmTPC4_QM_CP_FENCE3_RDATA 0xF08164
151
152#define mmTPC4_QM_CP_FENCE0_CNT 0xF08168
153
154#define mmTPC4_QM_CP_FENCE1_CNT 0xF0816C
155
156#define mmTPC4_QM_CP_FENCE2_CNT 0xF08170
157
158#define mmTPC4_QM_CP_FENCE3_CNT 0xF08174
159
160#define mmTPC4_QM_CP_STS 0xF08178
161
162#define mmTPC4_QM_CP_CURRENT_INST_LO 0xF0817C
163
164#define mmTPC4_QM_CP_CURRENT_INST_HI 0xF08180
165
166#define mmTPC4_QM_CP_BARRIER_CFG 0xF08184
167
168#define mmTPC4_QM_CP_DBG_0 0xF08188
169
170#define mmTPC4_QM_PQ_BUF_ADDR 0xF08300
171
172#define mmTPC4_QM_PQ_BUF_RDATA 0xF08304
173
174#define mmTPC4_QM_CQ_BUF_ADDR 0xF08308
175
176#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C
177
178#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
new file mode 100644
index 000000000000..8b54041d144a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
@@ -0,0 +1,323 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC4_RTR_REGS_H_
14#define ASIC_REG_TPC4_RTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC4_RTR (Prototype: TPC_RTR)
19 *****************************************
20 */
21
22#define mmTPC4_RTR_HBW_RD_RQ_E_ARB 0xF00100
23
24#define mmTPC4_RTR_HBW_RD_RQ_W_ARB 0xF00104
25
26#define mmTPC4_RTR_HBW_RD_RQ_N_ARB 0xF00108
27
28#define mmTPC4_RTR_HBW_RD_RQ_S_ARB 0xF0010C
29
30#define mmTPC4_RTR_HBW_RD_RQ_L_ARB 0xF00110
31
32#define mmTPC4_RTR_HBW_E_ARB_MAX 0xF00120
33
34#define mmTPC4_RTR_HBW_W_ARB_MAX 0xF00124
35
36#define mmTPC4_RTR_HBW_N_ARB_MAX 0xF00128
37
38#define mmTPC4_RTR_HBW_S_ARB_MAX 0xF0012C
39
40#define mmTPC4_RTR_HBW_L_ARB_MAX 0xF00130
41
42#define mmTPC4_RTR_HBW_RD_RS_E_ARB 0xF00140
43
44#define mmTPC4_RTR_HBW_RD_RS_W_ARB 0xF00144
45
46#define mmTPC4_RTR_HBW_RD_RS_N_ARB 0xF00148
47
48#define mmTPC4_RTR_HBW_RD_RS_S_ARB 0xF0014C
49
50#define mmTPC4_RTR_HBW_RD_RS_L_ARB 0xF00150
51
52#define mmTPC4_RTR_HBW_WR_RQ_E_ARB 0xF00170
53
54#define mmTPC4_RTR_HBW_WR_RQ_W_ARB 0xF00174
55
56#define mmTPC4_RTR_HBW_WR_RQ_N_ARB 0xF00178
57
58#define mmTPC4_RTR_HBW_WR_RQ_S_ARB 0xF0017C
59
60#define mmTPC4_RTR_HBW_WR_RQ_L_ARB 0xF00180
61
62#define mmTPC4_RTR_HBW_WR_RS_E_ARB 0xF00190
63
64#define mmTPC4_RTR_HBW_WR_RS_W_ARB 0xF00194
65
66#define mmTPC4_RTR_HBW_WR_RS_N_ARB 0xF00198
67
68#define mmTPC4_RTR_HBW_WR_RS_S_ARB 0xF0019C
69
70#define mmTPC4_RTR_HBW_WR_RS_L_ARB 0xF001A0
71
72#define mmTPC4_RTR_LBW_RD_RQ_E_ARB 0xF00200
73
74#define mmTPC4_RTR_LBW_RD_RQ_W_ARB 0xF00204
75
76#define mmTPC4_RTR_LBW_RD_RQ_N_ARB 0xF00208
77
78#define mmTPC4_RTR_LBW_RD_RQ_S_ARB 0xF0020C
79
80#define mmTPC4_RTR_LBW_RD_RQ_L_ARB 0xF00210
81
82#define mmTPC4_RTR_LBW_E_ARB_MAX 0xF00220
83
84#define mmTPC4_RTR_LBW_W_ARB_MAX 0xF00224
85
86#define mmTPC4_RTR_LBW_N_ARB_MAX 0xF00228
87
88#define mmTPC4_RTR_LBW_S_ARB_MAX 0xF0022C
89
90#define mmTPC4_RTR_LBW_L_ARB_MAX 0xF00230
91
92#define mmTPC4_RTR_LBW_RD_RS_E_ARB 0xF00250
93
94#define mmTPC4_RTR_LBW_RD_RS_W_ARB 0xF00254
95
96#define mmTPC4_RTR_LBW_RD_RS_N_ARB 0xF00258
97
98#define mmTPC4_RTR_LBW_RD_RS_S_ARB 0xF0025C
99
100#define mmTPC4_RTR_LBW_RD_RS_L_ARB 0xF00260
101
102#define mmTPC4_RTR_LBW_WR_RQ_E_ARB 0xF00270
103
104#define mmTPC4_RTR_LBW_WR_RQ_W_ARB 0xF00274
105
106#define mmTPC4_RTR_LBW_WR_RQ_N_ARB 0xF00278
107
108#define mmTPC4_RTR_LBW_WR_RQ_S_ARB 0xF0027C
109
110#define mmTPC4_RTR_LBW_WR_RQ_L_ARB 0xF00280
111
112#define mmTPC4_RTR_LBW_WR_RS_E_ARB 0xF00290
113
114#define mmTPC4_RTR_LBW_WR_RS_W_ARB 0xF00294
115
116#define mmTPC4_RTR_LBW_WR_RS_N_ARB 0xF00298
117
118#define mmTPC4_RTR_LBW_WR_RS_S_ARB 0xF0029C
119
120#define mmTPC4_RTR_LBW_WR_RS_L_ARB 0xF002A0
121
122#define mmTPC4_RTR_DBG_E_ARB 0xF00300
123
124#define mmTPC4_RTR_DBG_W_ARB 0xF00304
125
126#define mmTPC4_RTR_DBG_N_ARB 0xF00308
127
128#define mmTPC4_RTR_DBG_S_ARB 0xF0030C
129
130#define mmTPC4_RTR_DBG_L_ARB 0xF00310
131
132#define mmTPC4_RTR_DBG_E_ARB_MAX 0xF00320
133
134#define mmTPC4_RTR_DBG_W_ARB_MAX 0xF00324
135
136#define mmTPC4_RTR_DBG_N_ARB_MAX 0xF00328
137
138#define mmTPC4_RTR_DBG_S_ARB_MAX 0xF0032C
139
140#define mmTPC4_RTR_DBG_L_ARB_MAX 0xF00330
141
142#define mmTPC4_RTR_SPLIT_COEF_0 0xF00400
143
144#define mmTPC4_RTR_SPLIT_COEF_1 0xF00404
145
146#define mmTPC4_RTR_SPLIT_COEF_2 0xF00408
147
148#define mmTPC4_RTR_SPLIT_COEF_3 0xF0040C
149
150#define mmTPC4_RTR_SPLIT_COEF_4 0xF00410
151
152#define mmTPC4_RTR_SPLIT_COEF_5 0xF00414
153
154#define mmTPC4_RTR_SPLIT_COEF_6 0xF00418
155
156#define mmTPC4_RTR_SPLIT_COEF_7 0xF0041C
157
158#define mmTPC4_RTR_SPLIT_COEF_8 0xF00420
159
160#define mmTPC4_RTR_SPLIT_COEF_9 0xF00424
161
162#define mmTPC4_RTR_SPLIT_CFG 0xF00440
163
164#define mmTPC4_RTR_SPLIT_RD_SAT 0xF00444
165
166#define mmTPC4_RTR_SPLIT_RD_RST_TOKEN 0xF00448
167
168#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_0 0xF0044C
169
170#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_1 0xF00450
171
172#define mmTPC4_RTR_SPLIT_WR_SAT 0xF00454
173
174#define mmTPC4_RTR_WPLIT_WR_TST_TOLEN 0xF00458
175
176#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_0 0xF0045C
177
178#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_1 0xF00460
179
180#define mmTPC4_RTR_HBW_RANGE_HIT 0xF00470
181
182#define mmTPC4_RTR_HBW_RANGE_MASK_L_0 0xF00480
183
184#define mmTPC4_RTR_HBW_RANGE_MASK_L_1 0xF00484
185
186#define mmTPC4_RTR_HBW_RANGE_MASK_L_2 0xF00488
187
188#define mmTPC4_RTR_HBW_RANGE_MASK_L_3 0xF0048C
189
190#define mmTPC4_RTR_HBW_RANGE_MASK_L_4 0xF00490
191
192#define mmTPC4_RTR_HBW_RANGE_MASK_L_5 0xF00494
193
194#define mmTPC4_RTR_HBW_RANGE_MASK_L_6 0xF00498
195
196#define mmTPC4_RTR_HBW_RANGE_MASK_L_7 0xF0049C
197
198#define mmTPC4_RTR_HBW_RANGE_MASK_H_0 0xF004A0
199
200#define mmTPC4_RTR_HBW_RANGE_MASK_H_1 0xF004A4
201
202#define mmTPC4_RTR_HBW_RANGE_MASK_H_2 0xF004A8
203
204#define mmTPC4_RTR_HBW_RANGE_MASK_H_3 0xF004AC
205
206#define mmTPC4_RTR_HBW_RANGE_MASK_H_4 0xF004B0
207
208#define mmTPC4_RTR_HBW_RANGE_MASK_H_5 0xF004B4
209
210#define mmTPC4_RTR_HBW_RANGE_MASK_H_6 0xF004B8
211
212#define mmTPC4_RTR_HBW_RANGE_MASK_H_7 0xF004BC
213
214#define mmTPC4_RTR_HBW_RANGE_BASE_L_0 0xF004C0
215
216#define mmTPC4_RTR_HBW_RANGE_BASE_L_1 0xF004C4
217
218#define mmTPC4_RTR_HBW_RANGE_BASE_L_2 0xF004C8
219
220#define mmTPC4_RTR_HBW_RANGE_BASE_L_3 0xF004CC
221
222#define mmTPC4_RTR_HBW_RANGE_BASE_L_4 0xF004D0
223
224#define mmTPC4_RTR_HBW_RANGE_BASE_L_5 0xF004D4
225
226#define mmTPC4_RTR_HBW_RANGE_BASE_L_6 0xF004D8
227
228#define mmTPC4_RTR_HBW_RANGE_BASE_L_7 0xF004DC
229
230#define mmTPC4_RTR_HBW_RANGE_BASE_H_0 0xF004E0
231
232#define mmTPC4_RTR_HBW_RANGE_BASE_H_1 0xF004E4
233
234#define mmTPC4_RTR_HBW_RANGE_BASE_H_2 0xF004E8
235
236#define mmTPC4_RTR_HBW_RANGE_BASE_H_3 0xF004EC
237
238#define mmTPC4_RTR_HBW_RANGE_BASE_H_4 0xF004F0
239
240#define mmTPC4_RTR_HBW_RANGE_BASE_H_5 0xF004F4
241
242#define mmTPC4_RTR_HBW_RANGE_BASE_H_6 0xF004F8
243
244#define mmTPC4_RTR_HBW_RANGE_BASE_H_7 0xF004FC
245
246#define mmTPC4_RTR_LBW_RANGE_HIT 0xF00500
247
248#define mmTPC4_RTR_LBW_RANGE_MASK_0 0xF00510
249
250#define mmTPC4_RTR_LBW_RANGE_MASK_1 0xF00514
251
252#define mmTPC4_RTR_LBW_RANGE_MASK_2 0xF00518
253
254#define mmTPC4_RTR_LBW_RANGE_MASK_3 0xF0051C
255
256#define mmTPC4_RTR_LBW_RANGE_MASK_4 0xF00520
257
258#define mmTPC4_RTR_LBW_RANGE_MASK_5 0xF00524
259
260#define mmTPC4_RTR_LBW_RANGE_MASK_6 0xF00528
261
262#define mmTPC4_RTR_LBW_RANGE_MASK_7 0xF0052C
263
264#define mmTPC4_RTR_LBW_RANGE_MASK_8 0xF00530
265
266#define mmTPC4_RTR_LBW_RANGE_MASK_9 0xF00534
267
268#define mmTPC4_RTR_LBW_RANGE_MASK_10 0xF00538
269
270#define mmTPC4_RTR_LBW_RANGE_MASK_11 0xF0053C
271
272#define mmTPC4_RTR_LBW_RANGE_MASK_12 0xF00540
273
274#define mmTPC4_RTR_LBW_RANGE_MASK_13 0xF00544
275
276#define mmTPC4_RTR_LBW_RANGE_MASK_14 0xF00548
277
278#define mmTPC4_RTR_LBW_RANGE_MASK_15 0xF0054C
279
280#define mmTPC4_RTR_LBW_RANGE_BASE_0 0xF00550
281
282#define mmTPC4_RTR_LBW_RANGE_BASE_1 0xF00554
283
284#define mmTPC4_RTR_LBW_RANGE_BASE_2 0xF00558
285
286#define mmTPC4_RTR_LBW_RANGE_BASE_3 0xF0055C
287
288#define mmTPC4_RTR_LBW_RANGE_BASE_4 0xF00560
289
290#define mmTPC4_RTR_LBW_RANGE_BASE_5 0xF00564
291
292#define mmTPC4_RTR_LBW_RANGE_BASE_6 0xF00568
293
294#define mmTPC4_RTR_LBW_RANGE_BASE_7 0xF0056C
295
296#define mmTPC4_RTR_LBW_RANGE_BASE_8 0xF00570
297
298#define mmTPC4_RTR_LBW_RANGE_BASE_9 0xF00574
299
300#define mmTPC4_RTR_LBW_RANGE_BASE_10 0xF00578
301
302#define mmTPC4_RTR_LBW_RANGE_BASE_11 0xF0057C
303
304#define mmTPC4_RTR_LBW_RANGE_BASE_12 0xF00580
305
306#define mmTPC4_RTR_LBW_RANGE_BASE_13 0xF00584
307
308#define mmTPC4_RTR_LBW_RANGE_BASE_14 0xF00588
309
310#define mmTPC4_RTR_LBW_RANGE_BASE_15 0xF0058C
311
312#define mmTPC4_RTR_RGLTR 0xF00590
313
314#define mmTPC4_RTR_RGLTR_WR_RESULT 0xF00594
315
316#define mmTPC4_RTR_RGLTR_RD_RESULT 0xF00598
317
318#define mmTPC4_RTR_SCRAMB_EN 0xF00600
319
320#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604
321
322#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
new file mode 100644
index 000000000000..3f00954fcdba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC5_CFG_REGS_H_
14#define ASIC_REG_TPC5_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC5_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400
23
24#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404
25
26#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408
27
28#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C
29
30#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410
31
32#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414
33
34#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF46418
35
36#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF4641C
37
38#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF46420
39
40#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF46424
41
42#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46428
43
44#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF4642C
45
46#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF46430
47
48#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46434
49
50#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF46438
51
52#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF4643C
53
54#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46440
55
56#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46444
57
58#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF46448
59
60#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF4644C
61
62#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF46450
63
64#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46454
65
66#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46458
67
68#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF4645C
69
70#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF46460
71
72#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF46464
73
74#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46468
75
76#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF4646C
77
78#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF46470
79
80#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46474
81
82#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF46478
83
84#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF4647C
85
86#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46480
87
88#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46484
89
90#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF46488
91
92#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF4648C
93
94#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF46490
95
96#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF46494
97
98#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46498
99
100#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF4649C
101
102#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF464A0
103
104#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF464A4
105
106#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF464A8
107
108#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF464AC
109
110#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF464B0
111
112#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF464B4
113
114#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF464B8
115
116#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF464BC
117
118#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF464C0
119
120#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF464C4
121
122#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF464C8
123
124#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF464CC
125
126#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF464D0
127
128#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF464D4
129
130#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464D8
131
132#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464DC
133
134#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF464E0
135
136#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464E4
137
138#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464E8
139
140#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464EC
141
142#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464F0
143
144#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464F4
145
146#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464F8
147
148#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF464FC
149
150#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF46500
151
152#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF46504
153
154#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF46508
155
156#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF4650C
157
158#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF46510
159
160#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF46514
161
162#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF46518
163
164#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF4651C
165
166#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF46520
167
168#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF46524
169
170#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF46528
171
172#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF4652C
173
174#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF46530
175
176#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF46534
177
178#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF46538
179
180#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF4653C
181
182#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF46540
183
184#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF46544
185
186#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF46548
187
188#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF4654C
189
190#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF46550
191
192#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF46554
193
194#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46558
195
196#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF4655C
197
198#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF46560
199
200#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46564
201
202#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF46568
203
204#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF4656C
205
206#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46570
207
208#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46574
209
210#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF46578
211
212#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF4657C
213
214#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF46580
215
216#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46584
217
218#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46588
219
220#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF4658C
221
222#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF46590
223
224#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF46594
225
226#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46598
227
228#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF4659C
229
230#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF465A0
231
232#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF465A4
233
234#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF465A8
235
236#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF465AC
237
238#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF465B0
239
240#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF465B4
241
242#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF465B8
243
244#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF465BC
245
246#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF465C0
247
248#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF465C4
249
250#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF465C8
251
252#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF465CC
253
254#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF465D0
255
256#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF465D4
257
258#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF465D8
259
260#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF465DC
261
262#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF465E0
263
264#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF465E4
265
266#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF465E8
267
268#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF465EC
269
270#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF465F0
271
272#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF465F4
273
274#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF465F8
275
276#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF465FC
277
278#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF46600
279
280#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF46604
281
282#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46608
283
284#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF4660C
285
286#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF46610
287
288#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46614
289
290#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF46618
291
292#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF4661C
293
294#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46620
295
296#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46624
297
298#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF46628
299
300#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF4662C
301
302#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF46630
303
304#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF46634
305
306#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF46638
307
308#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF4663C
309
310#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF46640
311
312#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF46644
313
314#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF46648
315
316#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF4664C
317
318#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF46650
319
320#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF46654
321
322#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF46658
323
324#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF4665C
325
326#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46660
327
328#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF46664
329
330#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46668
331
332#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF4666C
333
334#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46670
335
336#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF46674
337
338#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF46678
339
340#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF4667C
341
342#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF46680
343
344#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF46684
345
346#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF46688
347
348#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF4668C
349
350#define mmTPC5_CFG_KERNEL_SRF_0 0xF46690
351
352#define mmTPC5_CFG_KERNEL_SRF_1 0xF46694
353
354#define mmTPC5_CFG_KERNEL_SRF_2 0xF46698
355
356#define mmTPC5_CFG_KERNEL_SRF_3 0xF4669C
357
358#define mmTPC5_CFG_KERNEL_SRF_4 0xF466A0
359
360#define mmTPC5_CFG_KERNEL_SRF_5 0xF466A4
361
362#define mmTPC5_CFG_KERNEL_SRF_6 0xF466A8
363
364#define mmTPC5_CFG_KERNEL_SRF_7 0xF466AC
365
366#define mmTPC5_CFG_KERNEL_SRF_8 0xF466B0
367
368#define mmTPC5_CFG_KERNEL_SRF_9 0xF466B4
369
370#define mmTPC5_CFG_KERNEL_SRF_10 0xF466B8
371
372#define mmTPC5_CFG_KERNEL_SRF_11 0xF466BC
373
374#define mmTPC5_CFG_KERNEL_SRF_12 0xF466C0
375
376#define mmTPC5_CFG_KERNEL_SRF_13 0xF466C4
377
378#define mmTPC5_CFG_KERNEL_SRF_14 0xF466C8
379
380#define mmTPC5_CFG_KERNEL_SRF_15 0xF466CC
381
382#define mmTPC5_CFG_KERNEL_SRF_16 0xF466D0
383
384#define mmTPC5_CFG_KERNEL_SRF_17 0xF466D4
385
386#define mmTPC5_CFG_KERNEL_SRF_18 0xF466D8
387
388#define mmTPC5_CFG_KERNEL_SRF_19 0xF466DC
389
390#define mmTPC5_CFG_KERNEL_SRF_20 0xF466E0
391
392#define mmTPC5_CFG_KERNEL_SRF_21 0xF466E4
393
394#define mmTPC5_CFG_KERNEL_SRF_22 0xF466E8
395
396#define mmTPC5_CFG_KERNEL_SRF_23 0xF466EC
397
398#define mmTPC5_CFG_KERNEL_SRF_24 0xF466F0
399
400#define mmTPC5_CFG_KERNEL_SRF_25 0xF466F4
401
402#define mmTPC5_CFG_KERNEL_SRF_26 0xF466F8
403
404#define mmTPC5_CFG_KERNEL_SRF_27 0xF466FC
405
406#define mmTPC5_CFG_KERNEL_SRF_28 0xF46700
407
408#define mmTPC5_CFG_KERNEL_SRF_29 0xF46704
409
410#define mmTPC5_CFG_KERNEL_SRF_30 0xF46708
411
412#define mmTPC5_CFG_KERNEL_SRF_31 0xF4670C
413
414#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF46710
415
416#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46714
417
418#define mmTPC5_CFG_RESERVED_DESC_END 0xF46738
419
420#define mmTPC5_CFG_ROUND_CSR 0xF467FC
421
422#define mmTPC5_CFG_TBUF_BASE_ADDR_LOW 0xF46800
423
424#define mmTPC5_CFG_TBUF_BASE_ADDR_HIGH 0xF46804
425
426#define mmTPC5_CFG_SEMAPHORE 0xF46808
427
428#define mmTPC5_CFG_VFLAGS 0xF4680C
429
430#define mmTPC5_CFG_SFLAGS 0xF46810
431
432#define mmTPC5_CFG_LFSR_POLYNOM 0xF46818
433
434#define mmTPC5_CFG_STATUS 0xF4681C
435
436#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46820
437
438#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46824
439
440#define mmTPC5_CFG_SM_BASE_ADDRESS_LOW 0xF46828
441
442#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4682C
443
444#define mmTPC5_CFG_TPC_CMD 0xF46830
445
446#define mmTPC5_CFG_TPC_EXECUTE 0xF46838
447
448#define mmTPC5_CFG_TPC_STALL 0xF4683C
449
450#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46840
451
452#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46844
453
454#define mmTPC5_CFG_MSS_CONFIG 0xF46854
455
456#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46858
457
458#define mmTPC5_CFG_TPC_INTR_MASK 0xF4685C
459
460#define mmTPC5_CFG_TSB_CONFIG 0xF46860
461
462#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00
463
464#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04
465
466#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08
467
468#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C
469
470#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10
471
472#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14
473
474#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF46A18
475
476#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A1C
477
478#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A20
479
480#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF46A24
481
482#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A28
483
484#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A2C
485
486#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF46A30
487
488#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A34
489
490#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A38
491
492#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF46A3C
493
494#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A40
495
496#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A44
497
498#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF46A48
499
500#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A4C
501
502#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A50
503
504#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A54
505
506#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A58
507
508#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A5C
509
510#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A60
511
512#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF46A64
513
514#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A68
515
516#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A6C
517
518#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF46A70
519
520#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A74
521
522#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A78
523
524#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF46A7C
525
526#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A80
527
528#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A84
529
530#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF46A88
531
532#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A8C
533
534#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A90
535
536#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF46A94
537
538#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A98
539
540#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A9C
541
542#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46AA0
543
544#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46AA4
545
546#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46AA8
547
548#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46AAC
549
550#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF46AB0
551
552#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46AB4
553
554#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46AB8
555
556#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF46ABC
557
558#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46AC0
559
560#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46AC4
561
562#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF46AC8
563
564#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46ACC
565
566#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46AD0
567
568#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF46AD4
569
570#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AD8
571
572#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46ADC
573
574#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF46AE0
575
576#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AE4
577
578#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AE8
579
580#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AEC
581
582#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AF0
583
584#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AF4
585
586#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46AF8
587
588#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF46AFC
589
590#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46B00
591
592#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46B04
593
594#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF46B08
595
596#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46B0C
597
598#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46B10
599
600#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF46B14
601
602#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46B18
603
604#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46B1C
605
606#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF46B20
607
608#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46B24
609
610#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46B28
611
612#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF46B2C
613
614#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46B30
615
616#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46B34
617
618#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46B38
619
620#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46B3C
621
622#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46B40
623
624#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46B44
625
626#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF46B48
627
628#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46B4C
629
630#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46B50
631
632#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF46B54
633
634#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B58
635
636#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B5C
637
638#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF46B60
639
640#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B64
641
642#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B68
643
644#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF46B6C
645
646#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B70
647
648#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B74
649
650#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF46B78
651
652#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B7C
653
654#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B80
655
656#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B84
657
658#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B88
659
660#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B8C
661
662#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B90
663
664#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF46B94
665
666#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B98
667
668#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B9C
669
670#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF46BA0
671
672#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46BA4
673
674#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46BA8
675
676#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF46BAC
677
678#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46BB0
679
680#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46BB4
681
682#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF46BB8
683
684#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46BBC
685
686#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46BC0
687
688#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF46BC4
689
690#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46BC8
691
692#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46BCC
693
694#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46BD0
695
696#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46BD4
697
698#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46BD8
699
700#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46BDC
701
702#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF46BE0
703
704#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46BE4
705
706#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46BE8
707
708#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF46BEC
709
710#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46BF0
711
712#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46BF4
713
714#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF46BF8
715
716#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46BFC
717
718#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46C00
719
720#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF46C04
721
722#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46C08
723
724#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46C0C
725
726#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF46C10
727
728#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46C14
729
730#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46C18
731
732#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46C1C
733
734#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46C20
735
736#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46C24
737
738#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46C28
739
740#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF46C2C
741
742#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46C30
743
744#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46C34
745
746#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF46C38
747
748#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46C3C
749
750#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46C40
751
752#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF46C44
753
754#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46C48
755
756#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46C4C
757
758#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF46C50
759
760#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46C54
761
762#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46C58
763
764#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF46C5C
765
766#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46C60
767
768#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46C64
769
770#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46C68
771
772#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46C6C
773
774#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46C70
775
776#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46C74
777
778#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46C78
779
780#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46C7C
781
782#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46C80
783
784#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46C84
785
786#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46C88
787
788#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46C8C
789
790#define mmTPC5_CFG_QM_SRF_0 0xF46C90
791
792#define mmTPC5_CFG_QM_SRF_1 0xF46C94
793
794#define mmTPC5_CFG_QM_SRF_2 0xF46C98
795
796#define mmTPC5_CFG_QM_SRF_3 0xF46C9C
797
798#define mmTPC5_CFG_QM_SRF_4 0xF46CA0
799
800#define mmTPC5_CFG_QM_SRF_5 0xF46CA4
801
802#define mmTPC5_CFG_QM_SRF_6 0xF46CA8
803
804#define mmTPC5_CFG_QM_SRF_7 0xF46CAC
805
806#define mmTPC5_CFG_QM_SRF_8 0xF46CB0
807
808#define mmTPC5_CFG_QM_SRF_9 0xF46CB4
809
810#define mmTPC5_CFG_QM_SRF_10 0xF46CB8
811
812#define mmTPC5_CFG_QM_SRF_11 0xF46CBC
813
814#define mmTPC5_CFG_QM_SRF_12 0xF46CC0
815
816#define mmTPC5_CFG_QM_SRF_13 0xF46CC4
817
818#define mmTPC5_CFG_QM_SRF_14 0xF46CC8
819
820#define mmTPC5_CFG_QM_SRF_15 0xF46CCC
821
822#define mmTPC5_CFG_QM_SRF_16 0xF46CD0
823
824#define mmTPC5_CFG_QM_SRF_17 0xF46CD4
825
826#define mmTPC5_CFG_QM_SRF_18 0xF46CD8
827
828#define mmTPC5_CFG_QM_SRF_19 0xF46CDC
829
830#define mmTPC5_CFG_QM_SRF_20 0xF46CE0
831
832#define mmTPC5_CFG_QM_SRF_21 0xF46CE4
833
834#define mmTPC5_CFG_QM_SRF_22 0xF46CE8
835
836#define mmTPC5_CFG_QM_SRF_23 0xF46CEC
837
838#define mmTPC5_CFG_QM_SRF_24 0xF46CF0
839
840#define mmTPC5_CFG_QM_SRF_25 0xF46CF4
841
842#define mmTPC5_CFG_QM_SRF_26 0xF46CF8
843
844#define mmTPC5_CFG_QM_SRF_27 0xF46CFC
845
846#define mmTPC5_CFG_QM_SRF_28 0xF46D00
847
848#define mmTPC5_CFG_QM_SRF_29 0xF46D04
849
850#define mmTPC5_CFG_QM_SRF_30 0xF46D08
851
852#define mmTPC5_CFG_QM_SRF_31 0xF46D0C
853
854#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46D10
855
856#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D14
857
858#define mmTPC5_CFG_ARUSER 0xF46D18
859
860#define mmTPC5_CFG_AWUSER 0xF46D1C
861
862#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF46E00
863
864#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF46E04
865
866#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF46E08
867
868#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF46E0C
869
870#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF46E10
871
872#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF46E14
873
874#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF46E18
875
876#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF46E1C
877
878#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF46E20
879
880#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF46E24
881
882#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF46E28
883
884#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C
885
886#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
new file mode 100644
index 000000000000..d8e72a8e18d7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC5_CMDQ_REGS_H_
14#define ASIC_REG_TPC5_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC5_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC5_CMDQ_GLBL_CFG0 0xF49000
23
24#define mmTPC5_CMDQ_GLBL_CFG1 0xF49004
25
26#define mmTPC5_CMDQ_GLBL_PROT 0xF49008
27
28#define mmTPC5_CMDQ_GLBL_ERR_CFG 0xF4900C
29
30#define mmTPC5_CMDQ_GLBL_ERR_ADDR_LO 0xF49010
31
32#define mmTPC5_CMDQ_GLBL_ERR_ADDR_HI 0xF49014
33
34#define mmTPC5_CMDQ_GLBL_ERR_WDATA 0xF49018
35
36#define mmTPC5_CMDQ_GLBL_SECURE_PROPS 0xF4901C
37
38#define mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS 0xF49020
39
40#define mmTPC5_CMDQ_GLBL_STS0 0xF49024
41
42#define mmTPC5_CMDQ_GLBL_STS1 0xF49028
43
44#define mmTPC5_CMDQ_CQ_CFG0 0xF490B0
45
46#define mmTPC5_CMDQ_CQ_CFG1 0xF490B4
47
48#define mmTPC5_CMDQ_CQ_ARUSER 0xF490B8
49
50#define mmTPC5_CMDQ_CQ_PTR_LO 0xF490C0
51
52#define mmTPC5_CMDQ_CQ_PTR_HI 0xF490C4
53
54#define mmTPC5_CMDQ_CQ_TSIZE 0xF490C8
55
56#define mmTPC5_CMDQ_CQ_CTL 0xF490CC
57
58#define mmTPC5_CMDQ_CQ_PTR_LO_STS 0xF490D4
59
60#define mmTPC5_CMDQ_CQ_PTR_HI_STS 0xF490D8
61
62#define mmTPC5_CMDQ_CQ_TSIZE_STS 0xF490DC
63
64#define mmTPC5_CMDQ_CQ_CTL_STS 0xF490E0
65
66#define mmTPC5_CMDQ_CQ_STS0 0xF490E4
67
68#define mmTPC5_CMDQ_CQ_STS1 0xF490E8
69
70#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN 0xF490F0
71
72#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF490F4
73
74#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT 0xF490F8
75
76#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF490FC
77
78#define mmTPC5_CMDQ_CQ_IFIFO_CNT 0xF49108
79
80#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF49120
81
82#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF49124
83
84#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF49128
85
86#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF4912C
87
88#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF49130
89
90#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF49134
91
92#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF49138
93
94#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF4913C
95
96#define mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF49140
97
98#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF49144
99
100#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF49148
101
102#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF4914C
103
104#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF49150
105
106#define mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF49154
107
108#define mmTPC5_CMDQ_CP_FENCE0_RDATA 0xF49158
109
110#define mmTPC5_CMDQ_CP_FENCE1_RDATA 0xF4915C
111
112#define mmTPC5_CMDQ_CP_FENCE2_RDATA 0xF49160
113
114#define mmTPC5_CMDQ_CP_FENCE3_RDATA 0xF49164
115
116#define mmTPC5_CMDQ_CP_FENCE0_CNT 0xF49168
117
118#define mmTPC5_CMDQ_CP_FENCE1_CNT 0xF4916C
119
120#define mmTPC5_CMDQ_CP_FENCE2_CNT 0xF49170
121
122#define mmTPC5_CMDQ_CP_FENCE3_CNT 0xF49174
123
124#define mmTPC5_CMDQ_CP_STS 0xF49178
125
126#define mmTPC5_CMDQ_CP_CURRENT_INST_LO 0xF4917C
127
128#define mmTPC5_CMDQ_CP_CURRENT_INST_HI 0xF49180
129
130#define mmTPC5_CMDQ_CP_BARRIER_CFG 0xF49184
131
132#define mmTPC5_CMDQ_CP_DBG_0 0xF49188
133
134#define mmTPC5_CMDQ_CQ_BUF_ADDR 0xF49308
135
136#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C
137
138#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
new file mode 100644
index 000000000000..be2e68624709
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC5_QM_REGS_H_
14#define ASIC_REG_TPC5_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC5_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC5_QM_GLBL_CFG0 0xF48000
23
24#define mmTPC5_QM_GLBL_CFG1 0xF48004
25
26#define mmTPC5_QM_GLBL_PROT 0xF48008
27
28#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C
29
30#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48010
31
32#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48014
33
34#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48018
35
36#define mmTPC5_QM_GLBL_SECURE_PROPS 0xF4801C
37
38#define mmTPC5_QM_GLBL_NON_SECURE_PROPS 0xF48020
39
40#define mmTPC5_QM_GLBL_STS0 0xF48024
41
42#define mmTPC5_QM_GLBL_STS1 0xF48028
43
44#define mmTPC5_QM_PQ_BASE_LO 0xF48060
45
46#define mmTPC5_QM_PQ_BASE_HI 0xF48064
47
48#define mmTPC5_QM_PQ_SIZE 0xF48068
49
50#define mmTPC5_QM_PQ_PI 0xF4806C
51
52#define mmTPC5_QM_PQ_CI 0xF48070
53
54#define mmTPC5_QM_PQ_CFG0 0xF48074
55
56#define mmTPC5_QM_PQ_CFG1 0xF48078
57
58#define mmTPC5_QM_PQ_ARUSER 0xF4807C
59
60#define mmTPC5_QM_PQ_PUSH0 0xF48080
61
62#define mmTPC5_QM_PQ_PUSH1 0xF48084
63
64#define mmTPC5_QM_PQ_PUSH2 0xF48088
65
66#define mmTPC5_QM_PQ_PUSH3 0xF4808C
67
68#define mmTPC5_QM_PQ_STS0 0xF48090
69
70#define mmTPC5_QM_PQ_STS1 0xF48094
71
72#define mmTPC5_QM_PQ_RD_RATE_LIM_EN 0xF480A0
73
74#define mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF480A4
75
76#define mmTPC5_QM_PQ_RD_RATE_LIM_SAT 0xF480A8
77
78#define mmTPC5_QM_PQ_RD_RATE_LIM_TOUT 0xF480AC
79
80#define mmTPC5_QM_CQ_CFG0 0xF480B0
81
82#define mmTPC5_QM_CQ_CFG1 0xF480B4
83
84#define mmTPC5_QM_CQ_ARUSER 0xF480B8
85
86#define mmTPC5_QM_CQ_PTR_LO 0xF480C0
87
88#define mmTPC5_QM_CQ_PTR_HI 0xF480C4
89
90#define mmTPC5_QM_CQ_TSIZE 0xF480C8
91
92#define mmTPC5_QM_CQ_CTL 0xF480CC
93
94#define mmTPC5_QM_CQ_PTR_LO_STS 0xF480D4
95
96#define mmTPC5_QM_CQ_PTR_HI_STS 0xF480D8
97
98#define mmTPC5_QM_CQ_TSIZE_STS 0xF480DC
99
100#define mmTPC5_QM_CQ_CTL_STS 0xF480E0
101
102#define mmTPC5_QM_CQ_STS0 0xF480E4
103
104#define mmTPC5_QM_CQ_STS1 0xF480E8
105
106#define mmTPC5_QM_CQ_RD_RATE_LIM_EN 0xF480F0
107
108#define mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF480F4
109
110#define mmTPC5_QM_CQ_RD_RATE_LIM_SAT 0xF480F8
111
112#define mmTPC5_QM_CQ_RD_RATE_LIM_TOUT 0xF480FC
113
114#define mmTPC5_QM_CQ_IFIFO_CNT 0xF48108
115
116#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO 0xF48120
117
118#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI 0xF48124
119
120#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO 0xF48128
121
122#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI 0xF4812C
123
124#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO 0xF48130
125
126#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI 0xF48134
127
128#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO 0xF48138
129
130#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI 0xF4813C
131
132#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET 0xF48140
133
134#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF48144
135
136#define mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF48148
137
138#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF4814C
139
140#define mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF48150
141
142#define mmTPC5_QM_CP_LDMA_COMMIT_OFFSET 0xF48154
143
144#define mmTPC5_QM_CP_FENCE0_RDATA 0xF48158
145
146#define mmTPC5_QM_CP_FENCE1_RDATA 0xF4815C
147
148#define mmTPC5_QM_CP_FENCE2_RDATA 0xF48160
149
150#define mmTPC5_QM_CP_FENCE3_RDATA 0xF48164
151
152#define mmTPC5_QM_CP_FENCE0_CNT 0xF48168
153
154#define mmTPC5_QM_CP_FENCE1_CNT 0xF4816C
155
156#define mmTPC5_QM_CP_FENCE2_CNT 0xF48170
157
158#define mmTPC5_QM_CP_FENCE3_CNT 0xF48174
159
160#define mmTPC5_QM_CP_STS 0xF48178
161
162#define mmTPC5_QM_CP_CURRENT_INST_LO 0xF4817C
163
164#define mmTPC5_QM_CP_CURRENT_INST_HI 0xF48180
165
166#define mmTPC5_QM_CP_BARRIER_CFG 0xF48184
167
168#define mmTPC5_QM_CP_DBG_0 0xF48188
169
170#define mmTPC5_QM_PQ_BUF_ADDR 0xF48300
171
172#define mmTPC5_QM_PQ_BUF_RDATA 0xF48304
173
174#define mmTPC5_QM_CQ_BUF_ADDR 0xF48308
175
176#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C
177
178#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
new file mode 100644
index 000000000000..6f301c7bbc2f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
@@ -0,0 +1,323 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC5_RTR_REGS_H_
14#define ASIC_REG_TPC5_RTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC5_RTR (Prototype: TPC_RTR)
19 *****************************************
20 */
21
22#define mmTPC5_RTR_HBW_RD_RQ_E_ARB 0xF40100
23
24#define mmTPC5_RTR_HBW_RD_RQ_W_ARB 0xF40104
25
26#define mmTPC5_RTR_HBW_RD_RQ_N_ARB 0xF40108
27
28#define mmTPC5_RTR_HBW_RD_RQ_S_ARB 0xF4010C
29
30#define mmTPC5_RTR_HBW_RD_RQ_L_ARB 0xF40110
31
32#define mmTPC5_RTR_HBW_E_ARB_MAX 0xF40120
33
34#define mmTPC5_RTR_HBW_W_ARB_MAX 0xF40124
35
36#define mmTPC5_RTR_HBW_N_ARB_MAX 0xF40128
37
38#define mmTPC5_RTR_HBW_S_ARB_MAX 0xF4012C
39
40#define mmTPC5_RTR_HBW_L_ARB_MAX 0xF40130
41
42#define mmTPC5_RTR_HBW_RD_RS_E_ARB 0xF40140
43
44#define mmTPC5_RTR_HBW_RD_RS_W_ARB 0xF40144
45
46#define mmTPC5_RTR_HBW_RD_RS_N_ARB 0xF40148
47
48#define mmTPC5_RTR_HBW_RD_RS_S_ARB 0xF4014C
49
50#define mmTPC5_RTR_HBW_RD_RS_L_ARB 0xF40150
51
52#define mmTPC5_RTR_HBW_WR_RQ_E_ARB 0xF40170
53
54#define mmTPC5_RTR_HBW_WR_RQ_W_ARB 0xF40174
55
56#define mmTPC5_RTR_HBW_WR_RQ_N_ARB 0xF40178
57
58#define mmTPC5_RTR_HBW_WR_RQ_S_ARB 0xF4017C
59
60#define mmTPC5_RTR_HBW_WR_RQ_L_ARB 0xF40180
61
62#define mmTPC5_RTR_HBW_WR_RS_E_ARB 0xF40190
63
64#define mmTPC5_RTR_HBW_WR_RS_W_ARB 0xF40194
65
66#define mmTPC5_RTR_HBW_WR_RS_N_ARB 0xF40198
67
68#define mmTPC5_RTR_HBW_WR_RS_S_ARB 0xF4019C
69
70#define mmTPC5_RTR_HBW_WR_RS_L_ARB 0xF401A0
71
72#define mmTPC5_RTR_LBW_RD_RQ_E_ARB 0xF40200
73
74#define mmTPC5_RTR_LBW_RD_RQ_W_ARB 0xF40204
75
76#define mmTPC5_RTR_LBW_RD_RQ_N_ARB 0xF40208
77
78#define mmTPC5_RTR_LBW_RD_RQ_S_ARB 0xF4020C
79
80#define mmTPC5_RTR_LBW_RD_RQ_L_ARB 0xF40210
81
82#define mmTPC5_RTR_LBW_E_ARB_MAX 0xF40220
83
84#define mmTPC5_RTR_LBW_W_ARB_MAX 0xF40224
85
86#define mmTPC5_RTR_LBW_N_ARB_MAX 0xF40228
87
88#define mmTPC5_RTR_LBW_S_ARB_MAX 0xF4022C
89
90#define mmTPC5_RTR_LBW_L_ARB_MAX 0xF40230
91
92#define mmTPC5_RTR_LBW_RD_RS_E_ARB 0xF40250
93
94#define mmTPC5_RTR_LBW_RD_RS_W_ARB 0xF40254
95
96#define mmTPC5_RTR_LBW_RD_RS_N_ARB 0xF40258
97
98#define mmTPC5_RTR_LBW_RD_RS_S_ARB 0xF4025C
99
100#define mmTPC5_RTR_LBW_RD_RS_L_ARB 0xF40260
101
102#define mmTPC5_RTR_LBW_WR_RQ_E_ARB 0xF40270
103
104#define mmTPC5_RTR_LBW_WR_RQ_W_ARB 0xF40274
105
106#define mmTPC5_RTR_LBW_WR_RQ_N_ARB 0xF40278
107
108#define mmTPC5_RTR_LBW_WR_RQ_S_ARB 0xF4027C
109
110#define mmTPC5_RTR_LBW_WR_RQ_L_ARB 0xF40280
111
112#define mmTPC5_RTR_LBW_WR_RS_E_ARB 0xF40290
113
114#define mmTPC5_RTR_LBW_WR_RS_W_ARB 0xF40294
115
116#define mmTPC5_RTR_LBW_WR_RS_N_ARB 0xF40298
117
118#define mmTPC5_RTR_LBW_WR_RS_S_ARB 0xF4029C
119
120#define mmTPC5_RTR_LBW_WR_RS_L_ARB 0xF402A0
121
122#define mmTPC5_RTR_DBG_E_ARB 0xF40300
123
124#define mmTPC5_RTR_DBG_W_ARB 0xF40304
125
126#define mmTPC5_RTR_DBG_N_ARB 0xF40308
127
128#define mmTPC5_RTR_DBG_S_ARB 0xF4030C
129
130#define mmTPC5_RTR_DBG_L_ARB 0xF40310
131
132#define mmTPC5_RTR_DBG_E_ARB_MAX 0xF40320
133
134#define mmTPC5_RTR_DBG_W_ARB_MAX 0xF40324
135
136#define mmTPC5_RTR_DBG_N_ARB_MAX 0xF40328
137
138#define mmTPC5_RTR_DBG_S_ARB_MAX 0xF4032C
139
140#define mmTPC5_RTR_DBG_L_ARB_MAX 0xF40330
141
142#define mmTPC5_RTR_SPLIT_COEF_0 0xF40400
143
144#define mmTPC5_RTR_SPLIT_COEF_1 0xF40404
145
146#define mmTPC5_RTR_SPLIT_COEF_2 0xF40408
147
148#define mmTPC5_RTR_SPLIT_COEF_3 0xF4040C
149
150#define mmTPC5_RTR_SPLIT_COEF_4 0xF40410
151
152#define mmTPC5_RTR_SPLIT_COEF_5 0xF40414
153
154#define mmTPC5_RTR_SPLIT_COEF_6 0xF40418
155
156#define mmTPC5_RTR_SPLIT_COEF_7 0xF4041C
157
158#define mmTPC5_RTR_SPLIT_COEF_8 0xF40420
159
160#define mmTPC5_RTR_SPLIT_COEF_9 0xF40424
161
162#define mmTPC5_RTR_SPLIT_CFG 0xF40440
163
164#define mmTPC5_RTR_SPLIT_RD_SAT 0xF40444
165
166#define mmTPC5_RTR_SPLIT_RD_RST_TOKEN 0xF40448
167
168#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_0 0xF4044C
169
170#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_1 0xF40450
171
172#define mmTPC5_RTR_SPLIT_WR_SAT 0xF40454
173
174#define mmTPC5_RTR_WPLIT_WR_TST_TOLEN 0xF40458
175
176#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_0 0xF4045C
177
178#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_1 0xF40460
179
180#define mmTPC5_RTR_HBW_RANGE_HIT 0xF40470
181
182#define mmTPC5_RTR_HBW_RANGE_MASK_L_0 0xF40480
183
184#define mmTPC5_RTR_HBW_RANGE_MASK_L_1 0xF40484
185
186#define mmTPC5_RTR_HBW_RANGE_MASK_L_2 0xF40488
187
188#define mmTPC5_RTR_HBW_RANGE_MASK_L_3 0xF4048C
189
190#define mmTPC5_RTR_HBW_RANGE_MASK_L_4 0xF40490
191
192#define mmTPC5_RTR_HBW_RANGE_MASK_L_5 0xF40494
193
194#define mmTPC5_RTR_HBW_RANGE_MASK_L_6 0xF40498
195
196#define mmTPC5_RTR_HBW_RANGE_MASK_L_7 0xF4049C
197
198#define mmTPC5_RTR_HBW_RANGE_MASK_H_0 0xF404A0
199
200#define mmTPC5_RTR_HBW_RANGE_MASK_H_1 0xF404A4
201
202#define mmTPC5_RTR_HBW_RANGE_MASK_H_2 0xF404A8
203
204#define mmTPC5_RTR_HBW_RANGE_MASK_H_3 0xF404AC
205
206#define mmTPC5_RTR_HBW_RANGE_MASK_H_4 0xF404B0
207
208#define mmTPC5_RTR_HBW_RANGE_MASK_H_5 0xF404B4
209
210#define mmTPC5_RTR_HBW_RANGE_MASK_H_6 0xF404B8
211
212#define mmTPC5_RTR_HBW_RANGE_MASK_H_7 0xF404BC
213
214#define mmTPC5_RTR_HBW_RANGE_BASE_L_0 0xF404C0
215
216#define mmTPC5_RTR_HBW_RANGE_BASE_L_1 0xF404C4
217
218#define mmTPC5_RTR_HBW_RANGE_BASE_L_2 0xF404C8
219
220#define mmTPC5_RTR_HBW_RANGE_BASE_L_3 0xF404CC
221
222#define mmTPC5_RTR_HBW_RANGE_BASE_L_4 0xF404D0
223
224#define mmTPC5_RTR_HBW_RANGE_BASE_L_5 0xF404D4
225
226#define mmTPC5_RTR_HBW_RANGE_BASE_L_6 0xF404D8
227
228#define mmTPC5_RTR_HBW_RANGE_BASE_L_7 0xF404DC
229
230#define mmTPC5_RTR_HBW_RANGE_BASE_H_0 0xF404E0
231
232#define mmTPC5_RTR_HBW_RANGE_BASE_H_1 0xF404E4
233
234#define mmTPC5_RTR_HBW_RANGE_BASE_H_2 0xF404E8
235
236#define mmTPC5_RTR_HBW_RANGE_BASE_H_3 0xF404EC
237
238#define mmTPC5_RTR_HBW_RANGE_BASE_H_4 0xF404F0
239
240#define mmTPC5_RTR_HBW_RANGE_BASE_H_5 0xF404F4
241
242#define mmTPC5_RTR_HBW_RANGE_BASE_H_6 0xF404F8
243
244#define mmTPC5_RTR_HBW_RANGE_BASE_H_7 0xF404FC
245
246#define mmTPC5_RTR_LBW_RANGE_HIT 0xF40500
247
248#define mmTPC5_RTR_LBW_RANGE_MASK_0 0xF40510
249
250#define mmTPC5_RTR_LBW_RANGE_MASK_1 0xF40514
251
252#define mmTPC5_RTR_LBW_RANGE_MASK_2 0xF40518
253
254#define mmTPC5_RTR_LBW_RANGE_MASK_3 0xF4051C
255
256#define mmTPC5_RTR_LBW_RANGE_MASK_4 0xF40520
257
258#define mmTPC5_RTR_LBW_RANGE_MASK_5 0xF40524
259
260#define mmTPC5_RTR_LBW_RANGE_MASK_6 0xF40528
261
262#define mmTPC5_RTR_LBW_RANGE_MASK_7 0xF4052C
263
264#define mmTPC5_RTR_LBW_RANGE_MASK_8 0xF40530
265
266#define mmTPC5_RTR_LBW_RANGE_MASK_9 0xF40534
267
268#define mmTPC5_RTR_LBW_RANGE_MASK_10 0xF40538
269
270#define mmTPC5_RTR_LBW_RANGE_MASK_11 0xF4053C
271
272#define mmTPC5_RTR_LBW_RANGE_MASK_12 0xF40540
273
274#define mmTPC5_RTR_LBW_RANGE_MASK_13 0xF40544
275
276#define mmTPC5_RTR_LBW_RANGE_MASK_14 0xF40548
277
278#define mmTPC5_RTR_LBW_RANGE_MASK_15 0xF4054C
279
280#define mmTPC5_RTR_LBW_RANGE_BASE_0 0xF40550
281
282#define mmTPC5_RTR_LBW_RANGE_BASE_1 0xF40554
283
284#define mmTPC5_RTR_LBW_RANGE_BASE_2 0xF40558
285
286#define mmTPC5_RTR_LBW_RANGE_BASE_3 0xF4055C
287
288#define mmTPC5_RTR_LBW_RANGE_BASE_4 0xF40560
289
290#define mmTPC5_RTR_LBW_RANGE_BASE_5 0xF40564
291
292#define mmTPC5_RTR_LBW_RANGE_BASE_6 0xF40568
293
294#define mmTPC5_RTR_LBW_RANGE_BASE_7 0xF4056C
295
296#define mmTPC5_RTR_LBW_RANGE_BASE_8 0xF40570
297
298#define mmTPC5_RTR_LBW_RANGE_BASE_9 0xF40574
299
300#define mmTPC5_RTR_LBW_RANGE_BASE_10 0xF40578
301
302#define mmTPC5_RTR_LBW_RANGE_BASE_11 0xF4057C
303
304#define mmTPC5_RTR_LBW_RANGE_BASE_12 0xF40580
305
306#define mmTPC5_RTR_LBW_RANGE_BASE_13 0xF40584
307
308#define mmTPC5_RTR_LBW_RANGE_BASE_14 0xF40588
309
310#define mmTPC5_RTR_LBW_RANGE_BASE_15 0xF4058C
311
312#define mmTPC5_RTR_RGLTR 0xF40590
313
314#define mmTPC5_RTR_RGLTR_WR_RESULT 0xF40594
315
316#define mmTPC5_RTR_RGLTR_RD_RESULT 0xF40598
317
318#define mmTPC5_RTR_SCRAMB_EN 0xF40600
319
320#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604
321
322#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
new file mode 100644
index 000000000000..1e1168601c41
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC6_CFG_REGS_H_
14#define ASIC_REG_TPC6_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC6_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400
23
24#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404
25
26#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408
27
28#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C
29
30#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410
31
32#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414
33
34#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF86418
35
36#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF8641C
37
38#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF86420
39
40#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF86424
41
42#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86428
43
44#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF8642C
45
46#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF86430
47
48#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86434
49
50#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF86438
51
52#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF8643C
53
54#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86440
55
56#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86444
57
58#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF86448
59
60#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF8644C
61
62#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF86450
63
64#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86454
65
66#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86458
67
68#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF8645C
69
70#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF86460
71
72#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF86464
73
74#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86468
75
76#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF8646C
77
78#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF86470
79
80#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86474
81
82#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF86478
83
84#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF8647C
85
86#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86480
87
88#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86484
89
90#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF86488
91
92#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF8648C
93
94#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF86490
95
96#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF86494
97
98#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86498
99
100#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF8649C
101
102#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF864A0
103
104#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF864A4
105
106#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF864A8
107
108#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF864AC
109
110#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF864B0
111
112#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF864B4
113
114#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF864B8
115
116#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF864BC
117
118#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF864C0
119
120#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF864C4
121
122#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF864C8
123
124#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF864CC
125
126#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF864D0
127
128#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF864D4
129
130#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864D8
131
132#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864DC
133
134#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF864E0
135
136#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864E4
137
138#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864E8
139
140#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864EC
141
142#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864F0
143
144#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864F4
145
146#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864F8
147
148#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF864FC
149
150#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF86500
151
152#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF86504
153
154#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF86508
155
156#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF8650C
157
158#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF86510
159
160#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF86514
161
162#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF86518
163
164#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF8651C
165
166#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF86520
167
168#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF86524
169
170#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF86528
171
172#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF8652C
173
174#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF86530
175
176#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF86534
177
178#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF86538
179
180#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF8653C
181
182#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF86540
183
184#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF86544
185
186#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF86548
187
188#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF8654C
189
190#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF86550
191
192#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF86554
193
194#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86558
195
196#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF8655C
197
198#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF86560
199
200#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86564
201
202#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF86568
203
204#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF8656C
205
206#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86570
207
208#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86574
209
210#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF86578
211
212#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF8657C
213
214#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF86580
215
216#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86584
217
218#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86588
219
220#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF8658C
221
222#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF86590
223
224#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF86594
225
226#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86598
227
228#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF8659C
229
230#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF865A0
231
232#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF865A4
233
234#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF865A8
235
236#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF865AC
237
238#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF865B0
239
240#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF865B4
241
242#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF865B8
243
244#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF865BC
245
246#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF865C0
247
248#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF865C4
249
250#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF865C8
251
252#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF865CC
253
254#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF865D0
255
256#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF865D4
257
258#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF865D8
259
260#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF865DC
261
262#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF865E0
263
264#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF865E4
265
266#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF865E8
267
268#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF865EC
269
270#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF865F0
271
272#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF865F4
273
274#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF865F8
275
276#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF865FC
277
278#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF86600
279
280#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF86604
281
282#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86608
283
284#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF8660C
285
286#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF86610
287
288#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86614
289
290#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF86618
291
292#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF8661C
293
294#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86620
295
296#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86624
297
298#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF86628
299
300#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF8662C
301
302#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF86630
303
304#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF86634
305
306#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF86638
307
308#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF8663C
309
310#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF86640
311
312#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF86644
313
314#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF86648
315
316#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF8664C
317
318#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF86650
319
320#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF86654
321
322#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF86658
323
324#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF8665C
325
326#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86660
327
328#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF86664
329
330#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86668
331
332#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF8666C
333
334#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86670
335
336#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF86674
337
338#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF86678
339
340#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF8667C
341
342#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF86680
343
344#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF86684
345
346#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF86688
347
348#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF8668C
349
350#define mmTPC6_CFG_KERNEL_SRF_0 0xF86690
351
352#define mmTPC6_CFG_KERNEL_SRF_1 0xF86694
353
354#define mmTPC6_CFG_KERNEL_SRF_2 0xF86698
355
356#define mmTPC6_CFG_KERNEL_SRF_3 0xF8669C
357
358#define mmTPC6_CFG_KERNEL_SRF_4 0xF866A0
359
360#define mmTPC6_CFG_KERNEL_SRF_5 0xF866A4
361
362#define mmTPC6_CFG_KERNEL_SRF_6 0xF866A8
363
364#define mmTPC6_CFG_KERNEL_SRF_7 0xF866AC
365
366#define mmTPC6_CFG_KERNEL_SRF_8 0xF866B0
367
368#define mmTPC6_CFG_KERNEL_SRF_9 0xF866B4
369
370#define mmTPC6_CFG_KERNEL_SRF_10 0xF866B8
371
372#define mmTPC6_CFG_KERNEL_SRF_11 0xF866BC
373
374#define mmTPC6_CFG_KERNEL_SRF_12 0xF866C0
375
376#define mmTPC6_CFG_KERNEL_SRF_13 0xF866C4
377
378#define mmTPC6_CFG_KERNEL_SRF_14 0xF866C8
379
380#define mmTPC6_CFG_KERNEL_SRF_15 0xF866CC
381
382#define mmTPC6_CFG_KERNEL_SRF_16 0xF866D0
383
384#define mmTPC6_CFG_KERNEL_SRF_17 0xF866D4
385
386#define mmTPC6_CFG_KERNEL_SRF_18 0xF866D8
387
388#define mmTPC6_CFG_KERNEL_SRF_19 0xF866DC
389
390#define mmTPC6_CFG_KERNEL_SRF_20 0xF866E0
391
392#define mmTPC6_CFG_KERNEL_SRF_21 0xF866E4
393
394#define mmTPC6_CFG_KERNEL_SRF_22 0xF866E8
395
396#define mmTPC6_CFG_KERNEL_SRF_23 0xF866EC
397
398#define mmTPC6_CFG_KERNEL_SRF_24 0xF866F0
399
400#define mmTPC6_CFG_KERNEL_SRF_25 0xF866F4
401
402#define mmTPC6_CFG_KERNEL_SRF_26 0xF866F8
403
404#define mmTPC6_CFG_KERNEL_SRF_27 0xF866FC
405
406#define mmTPC6_CFG_KERNEL_SRF_28 0xF86700
407
408#define mmTPC6_CFG_KERNEL_SRF_29 0xF86704
409
410#define mmTPC6_CFG_KERNEL_SRF_30 0xF86708
411
412#define mmTPC6_CFG_KERNEL_SRF_31 0xF8670C
413
414#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF86710
415
416#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86714
417
418#define mmTPC6_CFG_RESERVED_DESC_END 0xF86738
419
420#define mmTPC6_CFG_ROUND_CSR 0xF867FC
421
422#define mmTPC6_CFG_TBUF_BASE_ADDR_LOW 0xF86800
423
424#define mmTPC6_CFG_TBUF_BASE_ADDR_HIGH 0xF86804
425
426#define mmTPC6_CFG_SEMAPHORE 0xF86808
427
428#define mmTPC6_CFG_VFLAGS 0xF8680C
429
430#define mmTPC6_CFG_SFLAGS 0xF86810
431
432#define mmTPC6_CFG_LFSR_POLYNOM 0xF86818
433
434#define mmTPC6_CFG_STATUS 0xF8681C
435
436#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86820
437
438#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86824
439
440#define mmTPC6_CFG_SM_BASE_ADDRESS_LOW 0xF86828
441
442#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8682C
443
444#define mmTPC6_CFG_TPC_CMD 0xF86830
445
446#define mmTPC6_CFG_TPC_EXECUTE 0xF86838
447
448#define mmTPC6_CFG_TPC_STALL 0xF8683C
449
450#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86840
451
452#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86844
453
454#define mmTPC6_CFG_MSS_CONFIG 0xF86854
455
456#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86858
457
458#define mmTPC6_CFG_TPC_INTR_MASK 0xF8685C
459
460#define mmTPC6_CFG_TSB_CONFIG 0xF86860
461
462#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00
463
464#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04
465
466#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08
467
468#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C
469
470#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10
471
472#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14
473
474#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF86A18
475
476#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A1C
477
478#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A20
479
480#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF86A24
481
482#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A28
483
484#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A2C
485
486#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF86A30
487
488#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A34
489
490#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A38
491
492#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF86A3C
493
494#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A40
495
496#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A44
497
498#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF86A48
499
500#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A4C
501
502#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A50
503
504#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A54
505
506#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A58
507
508#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A5C
509
510#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A60
511
512#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF86A64
513
514#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A68
515
516#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A6C
517
518#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF86A70
519
520#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A74
521
522#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A78
523
524#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF86A7C
525
526#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A80
527
528#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A84
529
530#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF86A88
531
532#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A8C
533
534#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A90
535
536#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF86A94
537
538#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A98
539
540#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A9C
541
542#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86AA0
543
544#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86AA4
545
546#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86AA8
547
548#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86AAC
549
550#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF86AB0
551
552#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86AB4
553
554#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86AB8
555
556#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF86ABC
557
558#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86AC0
559
560#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86AC4
561
562#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF86AC8
563
564#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86ACC
565
566#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86AD0
567
568#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF86AD4
569
570#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AD8
571
572#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86ADC
573
574#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF86AE0
575
576#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AE4
577
578#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AE8
579
580#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AEC
581
582#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AF0
583
584#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AF4
585
586#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86AF8
587
588#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF86AFC
589
590#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86B00
591
592#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86B04
593
594#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF86B08
595
596#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86B0C
597
598#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86B10
599
600#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF86B14
601
602#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86B18
603
604#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86B1C
605
606#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF86B20
607
608#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86B24
609
610#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86B28
611
612#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF86B2C
613
614#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86B30
615
616#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86B34
617
618#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86B38
619
620#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86B3C
621
622#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86B40
623
624#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86B44
625
626#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF86B48
627
628#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86B4C
629
630#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86B50
631
632#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF86B54
633
634#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B58
635
636#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B5C
637
638#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF86B60
639
640#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B64
641
642#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B68
643
644#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF86B6C
645
646#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B70
647
648#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B74
649
650#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF86B78
651
652#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B7C
653
654#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B80
655
656#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B84
657
658#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B88
659
660#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B8C
661
662#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B90
663
664#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF86B94
665
666#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B98
667
668#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B9C
669
670#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF86BA0
671
672#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86BA4
673
674#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86BA8
675
676#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF86BAC
677
678#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86BB0
679
680#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86BB4
681
682#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF86BB8
683
684#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86BBC
685
686#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86BC0
687
688#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF86BC4
689
690#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86BC8
691
692#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86BCC
693
694#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86BD0
695
696#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86BD4
697
698#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86BD8
699
700#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86BDC
701
702#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF86BE0
703
704#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86BE4
705
706#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86BE8
707
708#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF86BEC
709
710#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86BF0
711
712#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86BF4
713
714#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF86BF8
715
716#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86BFC
717
718#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86C00
719
720#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF86C04
721
722#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86C08
723
724#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86C0C
725
726#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF86C10
727
728#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86C14
729
730#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86C18
731
732#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86C1C
733
734#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86C20
735
736#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86C24
737
738#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86C28
739
740#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF86C2C
741
742#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86C30
743
744#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86C34
745
746#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF86C38
747
748#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86C3C
749
750#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86C40
751
752#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF86C44
753
754#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86C48
755
756#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86C4C
757
758#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF86C50
759
760#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86C54
761
762#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86C58
763
764#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF86C5C
765
766#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86C60
767
768#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86C64
769
770#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86C68
771
772#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86C6C
773
774#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86C70
775
776#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86C74
777
778#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86C78
779
780#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86C7C
781
782#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86C80
783
784#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86C84
785
786#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86C88
787
788#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86C8C
789
790#define mmTPC6_CFG_QM_SRF_0 0xF86C90
791
792#define mmTPC6_CFG_QM_SRF_1 0xF86C94
793
794#define mmTPC6_CFG_QM_SRF_2 0xF86C98
795
796#define mmTPC6_CFG_QM_SRF_3 0xF86C9C
797
798#define mmTPC6_CFG_QM_SRF_4 0xF86CA0
799
800#define mmTPC6_CFG_QM_SRF_5 0xF86CA4
801
802#define mmTPC6_CFG_QM_SRF_6 0xF86CA8
803
804#define mmTPC6_CFG_QM_SRF_7 0xF86CAC
805
806#define mmTPC6_CFG_QM_SRF_8 0xF86CB0
807
808#define mmTPC6_CFG_QM_SRF_9 0xF86CB4
809
810#define mmTPC6_CFG_QM_SRF_10 0xF86CB8
811
812#define mmTPC6_CFG_QM_SRF_11 0xF86CBC
813
814#define mmTPC6_CFG_QM_SRF_12 0xF86CC0
815
816#define mmTPC6_CFG_QM_SRF_13 0xF86CC4
817
818#define mmTPC6_CFG_QM_SRF_14 0xF86CC8
819
820#define mmTPC6_CFG_QM_SRF_15 0xF86CCC
821
822#define mmTPC6_CFG_QM_SRF_16 0xF86CD0
823
824#define mmTPC6_CFG_QM_SRF_17 0xF86CD4
825
826#define mmTPC6_CFG_QM_SRF_18 0xF86CD8
827
828#define mmTPC6_CFG_QM_SRF_19 0xF86CDC
829
830#define mmTPC6_CFG_QM_SRF_20 0xF86CE0
831
832#define mmTPC6_CFG_QM_SRF_21 0xF86CE4
833
834#define mmTPC6_CFG_QM_SRF_22 0xF86CE8
835
836#define mmTPC6_CFG_QM_SRF_23 0xF86CEC
837
838#define mmTPC6_CFG_QM_SRF_24 0xF86CF0
839
840#define mmTPC6_CFG_QM_SRF_25 0xF86CF4
841
842#define mmTPC6_CFG_QM_SRF_26 0xF86CF8
843
844#define mmTPC6_CFG_QM_SRF_27 0xF86CFC
845
846#define mmTPC6_CFG_QM_SRF_28 0xF86D00
847
848#define mmTPC6_CFG_QM_SRF_29 0xF86D04
849
850#define mmTPC6_CFG_QM_SRF_30 0xF86D08
851
852#define mmTPC6_CFG_QM_SRF_31 0xF86D0C
853
854#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86D10
855
856#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D14
857
858#define mmTPC6_CFG_ARUSER 0xF86D18
859
860#define mmTPC6_CFG_AWUSER 0xF86D1C
861
862#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF86E00
863
864#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF86E04
865
866#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF86E08
867
868#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF86E0C
869
870#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF86E10
871
872#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF86E14
873
874#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF86E18
875
876#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF86E1C
877
878#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF86E20
879
880#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF86E24
881
882#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF86E28
883
884#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C
885
886#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
new file mode 100644
index 000000000000..fbca6b47284e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC6_CMDQ_REGS_H_
14#define ASIC_REG_TPC6_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC6_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC6_CMDQ_GLBL_CFG0 0xF89000
23
24#define mmTPC6_CMDQ_GLBL_CFG1 0xF89004
25
26#define mmTPC6_CMDQ_GLBL_PROT 0xF89008
27
28#define mmTPC6_CMDQ_GLBL_ERR_CFG 0xF8900C
29
30#define mmTPC6_CMDQ_GLBL_ERR_ADDR_LO 0xF89010
31
32#define mmTPC6_CMDQ_GLBL_ERR_ADDR_HI 0xF89014
33
34#define mmTPC6_CMDQ_GLBL_ERR_WDATA 0xF89018
35
36#define mmTPC6_CMDQ_GLBL_SECURE_PROPS 0xF8901C
37
38#define mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS 0xF89020
39
40#define mmTPC6_CMDQ_GLBL_STS0 0xF89024
41
42#define mmTPC6_CMDQ_GLBL_STS1 0xF89028
43
44#define mmTPC6_CMDQ_CQ_CFG0 0xF890B0
45
46#define mmTPC6_CMDQ_CQ_CFG1 0xF890B4
47
48#define mmTPC6_CMDQ_CQ_ARUSER 0xF890B8
49
50#define mmTPC6_CMDQ_CQ_PTR_LO 0xF890C0
51
52#define mmTPC6_CMDQ_CQ_PTR_HI 0xF890C4
53
54#define mmTPC6_CMDQ_CQ_TSIZE 0xF890C8
55
56#define mmTPC6_CMDQ_CQ_CTL 0xF890CC
57
58#define mmTPC6_CMDQ_CQ_PTR_LO_STS 0xF890D4
59
60#define mmTPC6_CMDQ_CQ_PTR_HI_STS 0xF890D8
61
62#define mmTPC6_CMDQ_CQ_TSIZE_STS 0xF890DC
63
64#define mmTPC6_CMDQ_CQ_CTL_STS 0xF890E0
65
66#define mmTPC6_CMDQ_CQ_STS0 0xF890E4
67
68#define mmTPC6_CMDQ_CQ_STS1 0xF890E8
69
70#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN 0xF890F0
71
72#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF890F4
73
74#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT 0xF890F8
75
76#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF890FC
77
78#define mmTPC6_CMDQ_CQ_IFIFO_CNT 0xF89108
79
80#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF89120
81
82#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF89124
83
84#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF89128
85
86#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF8912C
87
88#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF89130
89
90#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF89134
91
92#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF89138
93
94#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF8913C
95
96#define mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF89140
97
98#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF89144
99
100#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF89148
101
102#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF8914C
103
104#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF89150
105
106#define mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF89154
107
108#define mmTPC6_CMDQ_CP_FENCE0_RDATA 0xF89158
109
110#define mmTPC6_CMDQ_CP_FENCE1_RDATA 0xF8915C
111
112#define mmTPC6_CMDQ_CP_FENCE2_RDATA 0xF89160
113
114#define mmTPC6_CMDQ_CP_FENCE3_RDATA 0xF89164
115
116#define mmTPC6_CMDQ_CP_FENCE0_CNT 0xF89168
117
118#define mmTPC6_CMDQ_CP_FENCE1_CNT 0xF8916C
119
120#define mmTPC6_CMDQ_CP_FENCE2_CNT 0xF89170
121
122#define mmTPC6_CMDQ_CP_FENCE3_CNT 0xF89174
123
124#define mmTPC6_CMDQ_CP_STS 0xF89178
125
126#define mmTPC6_CMDQ_CP_CURRENT_INST_LO 0xF8917C
127
128#define mmTPC6_CMDQ_CP_CURRENT_INST_HI 0xF89180
129
130#define mmTPC6_CMDQ_CP_BARRIER_CFG 0xF89184
131
132#define mmTPC6_CMDQ_CP_DBG_0 0xF89188
133
134#define mmTPC6_CMDQ_CQ_BUF_ADDR 0xF89308
135
136#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C
137
138#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
new file mode 100644
index 000000000000..bf32465dabcb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC6_QM_REGS_H_
14#define ASIC_REG_TPC6_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC6_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC6_QM_GLBL_CFG0 0xF88000
23
24#define mmTPC6_QM_GLBL_CFG1 0xF88004
25
26#define mmTPC6_QM_GLBL_PROT 0xF88008
27
28#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C
29
30#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88010
31
32#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88014
33
34#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88018
35
36#define mmTPC6_QM_GLBL_SECURE_PROPS 0xF8801C
37
38#define mmTPC6_QM_GLBL_NON_SECURE_PROPS 0xF88020
39
40#define mmTPC6_QM_GLBL_STS0 0xF88024
41
42#define mmTPC6_QM_GLBL_STS1 0xF88028
43
44#define mmTPC6_QM_PQ_BASE_LO 0xF88060
45
46#define mmTPC6_QM_PQ_BASE_HI 0xF88064
47
48#define mmTPC6_QM_PQ_SIZE 0xF88068
49
50#define mmTPC6_QM_PQ_PI 0xF8806C
51
52#define mmTPC6_QM_PQ_CI 0xF88070
53
54#define mmTPC6_QM_PQ_CFG0 0xF88074
55
56#define mmTPC6_QM_PQ_CFG1 0xF88078
57
58#define mmTPC6_QM_PQ_ARUSER 0xF8807C
59
60#define mmTPC6_QM_PQ_PUSH0 0xF88080
61
62#define mmTPC6_QM_PQ_PUSH1 0xF88084
63
64#define mmTPC6_QM_PQ_PUSH2 0xF88088
65
66#define mmTPC6_QM_PQ_PUSH3 0xF8808C
67
68#define mmTPC6_QM_PQ_STS0 0xF88090
69
70#define mmTPC6_QM_PQ_STS1 0xF88094
71
72#define mmTPC6_QM_PQ_RD_RATE_LIM_EN 0xF880A0
73
74#define mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF880A4
75
76#define mmTPC6_QM_PQ_RD_RATE_LIM_SAT 0xF880A8
77
78#define mmTPC6_QM_PQ_RD_RATE_LIM_TOUT 0xF880AC
79
80#define mmTPC6_QM_CQ_CFG0 0xF880B0
81
82#define mmTPC6_QM_CQ_CFG1 0xF880B4
83
84#define mmTPC6_QM_CQ_ARUSER 0xF880B8
85
86#define mmTPC6_QM_CQ_PTR_LO 0xF880C0
87
88#define mmTPC6_QM_CQ_PTR_HI 0xF880C4
89
90#define mmTPC6_QM_CQ_TSIZE 0xF880C8
91
92#define mmTPC6_QM_CQ_CTL 0xF880CC
93
94#define mmTPC6_QM_CQ_PTR_LO_STS 0xF880D4
95
96#define mmTPC6_QM_CQ_PTR_HI_STS 0xF880D8
97
98#define mmTPC6_QM_CQ_TSIZE_STS 0xF880DC
99
100#define mmTPC6_QM_CQ_CTL_STS 0xF880E0
101
102#define mmTPC6_QM_CQ_STS0 0xF880E4
103
104#define mmTPC6_QM_CQ_STS1 0xF880E8
105
106#define mmTPC6_QM_CQ_RD_RATE_LIM_EN 0xF880F0
107
108#define mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF880F4
109
110#define mmTPC6_QM_CQ_RD_RATE_LIM_SAT 0xF880F8
111
112#define mmTPC6_QM_CQ_RD_RATE_LIM_TOUT 0xF880FC
113
114#define mmTPC6_QM_CQ_IFIFO_CNT 0xF88108
115
116#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO 0xF88120
117
118#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI 0xF88124
119
120#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO 0xF88128
121
122#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI 0xF8812C
123
124#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO 0xF88130
125
126#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI 0xF88134
127
128#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO 0xF88138
129
130#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI 0xF8813C
131
132#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET 0xF88140
133
134#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF88144
135
136#define mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF88148
137
138#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF8814C
139
140#define mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF88150
141
142#define mmTPC6_QM_CP_LDMA_COMMIT_OFFSET 0xF88154
143
144#define mmTPC6_QM_CP_FENCE0_RDATA 0xF88158
145
146#define mmTPC6_QM_CP_FENCE1_RDATA 0xF8815C
147
148#define mmTPC6_QM_CP_FENCE2_RDATA 0xF88160
149
150#define mmTPC6_QM_CP_FENCE3_RDATA 0xF88164
151
152#define mmTPC6_QM_CP_FENCE0_CNT 0xF88168
153
154#define mmTPC6_QM_CP_FENCE1_CNT 0xF8816C
155
156#define mmTPC6_QM_CP_FENCE2_CNT 0xF88170
157
158#define mmTPC6_QM_CP_FENCE3_CNT 0xF88174
159
160#define mmTPC6_QM_CP_STS 0xF88178
161
162#define mmTPC6_QM_CP_CURRENT_INST_LO 0xF8817C
163
164#define mmTPC6_QM_CP_CURRENT_INST_HI 0xF88180
165
166#define mmTPC6_QM_CP_BARRIER_CFG 0xF88184
167
168#define mmTPC6_QM_CP_DBG_0 0xF88188
169
170#define mmTPC6_QM_PQ_BUF_ADDR 0xF88300
171
172#define mmTPC6_QM_PQ_BUF_RDATA 0xF88304
173
174#define mmTPC6_QM_CQ_BUF_ADDR 0xF88308
175
176#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C
177
178#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
new file mode 100644
index 000000000000..609bb90e1046
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
@@ -0,0 +1,323 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC6_RTR_REGS_H_
14#define ASIC_REG_TPC6_RTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC6_RTR (Prototype: TPC_RTR)
19 *****************************************
20 */
21
22#define mmTPC6_RTR_HBW_RD_RQ_E_ARB 0xF80100
23
24#define mmTPC6_RTR_HBW_RD_RQ_W_ARB 0xF80104
25
26#define mmTPC6_RTR_HBW_RD_RQ_N_ARB 0xF80108
27
28#define mmTPC6_RTR_HBW_RD_RQ_S_ARB 0xF8010C
29
30#define mmTPC6_RTR_HBW_RD_RQ_L_ARB 0xF80110
31
32#define mmTPC6_RTR_HBW_E_ARB_MAX 0xF80120
33
34#define mmTPC6_RTR_HBW_W_ARB_MAX 0xF80124
35
36#define mmTPC6_RTR_HBW_N_ARB_MAX 0xF80128
37
38#define mmTPC6_RTR_HBW_S_ARB_MAX 0xF8012C
39
40#define mmTPC6_RTR_HBW_L_ARB_MAX 0xF80130
41
42#define mmTPC6_RTR_HBW_RD_RS_E_ARB 0xF80140
43
44#define mmTPC6_RTR_HBW_RD_RS_W_ARB 0xF80144
45
46#define mmTPC6_RTR_HBW_RD_RS_N_ARB 0xF80148
47
48#define mmTPC6_RTR_HBW_RD_RS_S_ARB 0xF8014C
49
50#define mmTPC6_RTR_HBW_RD_RS_L_ARB 0xF80150
51
52#define mmTPC6_RTR_HBW_WR_RQ_E_ARB 0xF80170
53
54#define mmTPC6_RTR_HBW_WR_RQ_W_ARB 0xF80174
55
56#define mmTPC6_RTR_HBW_WR_RQ_N_ARB 0xF80178
57
58#define mmTPC6_RTR_HBW_WR_RQ_S_ARB 0xF8017C
59
60#define mmTPC6_RTR_HBW_WR_RQ_L_ARB 0xF80180
61
62#define mmTPC6_RTR_HBW_WR_RS_E_ARB 0xF80190
63
64#define mmTPC6_RTR_HBW_WR_RS_W_ARB 0xF80194
65
66#define mmTPC6_RTR_HBW_WR_RS_N_ARB 0xF80198
67
68#define mmTPC6_RTR_HBW_WR_RS_S_ARB 0xF8019C
69
70#define mmTPC6_RTR_HBW_WR_RS_L_ARB 0xF801A0
71
72#define mmTPC6_RTR_LBW_RD_RQ_E_ARB 0xF80200
73
74#define mmTPC6_RTR_LBW_RD_RQ_W_ARB 0xF80204
75
76#define mmTPC6_RTR_LBW_RD_RQ_N_ARB 0xF80208
77
78#define mmTPC6_RTR_LBW_RD_RQ_S_ARB 0xF8020C
79
80#define mmTPC6_RTR_LBW_RD_RQ_L_ARB 0xF80210
81
82#define mmTPC6_RTR_LBW_E_ARB_MAX 0xF80220
83
84#define mmTPC6_RTR_LBW_W_ARB_MAX 0xF80224
85
86#define mmTPC6_RTR_LBW_N_ARB_MAX 0xF80228
87
88#define mmTPC6_RTR_LBW_S_ARB_MAX 0xF8022C
89
90#define mmTPC6_RTR_LBW_L_ARB_MAX 0xF80230
91
92#define mmTPC6_RTR_LBW_RD_RS_E_ARB 0xF80250
93
94#define mmTPC6_RTR_LBW_RD_RS_W_ARB 0xF80254
95
96#define mmTPC6_RTR_LBW_RD_RS_N_ARB 0xF80258
97
98#define mmTPC6_RTR_LBW_RD_RS_S_ARB 0xF8025C
99
100#define mmTPC6_RTR_LBW_RD_RS_L_ARB 0xF80260
101
102#define mmTPC6_RTR_LBW_WR_RQ_E_ARB 0xF80270
103
104#define mmTPC6_RTR_LBW_WR_RQ_W_ARB 0xF80274
105
106#define mmTPC6_RTR_LBW_WR_RQ_N_ARB 0xF80278
107
108#define mmTPC6_RTR_LBW_WR_RQ_S_ARB 0xF8027C
109
110#define mmTPC6_RTR_LBW_WR_RQ_L_ARB 0xF80280
111
112#define mmTPC6_RTR_LBW_WR_RS_E_ARB 0xF80290
113
114#define mmTPC6_RTR_LBW_WR_RS_W_ARB 0xF80294
115
116#define mmTPC6_RTR_LBW_WR_RS_N_ARB 0xF80298
117
118#define mmTPC6_RTR_LBW_WR_RS_S_ARB 0xF8029C
119
120#define mmTPC6_RTR_LBW_WR_RS_L_ARB 0xF802A0
121
122#define mmTPC6_RTR_DBG_E_ARB 0xF80300
123
124#define mmTPC6_RTR_DBG_W_ARB 0xF80304
125
126#define mmTPC6_RTR_DBG_N_ARB 0xF80308
127
128#define mmTPC6_RTR_DBG_S_ARB 0xF8030C
129
130#define mmTPC6_RTR_DBG_L_ARB 0xF80310
131
132#define mmTPC6_RTR_DBG_E_ARB_MAX 0xF80320
133
134#define mmTPC6_RTR_DBG_W_ARB_MAX 0xF80324
135
136#define mmTPC6_RTR_DBG_N_ARB_MAX 0xF80328
137
138#define mmTPC6_RTR_DBG_S_ARB_MAX 0xF8032C
139
140#define mmTPC6_RTR_DBG_L_ARB_MAX 0xF80330
141
142#define mmTPC6_RTR_SPLIT_COEF_0 0xF80400
143
144#define mmTPC6_RTR_SPLIT_COEF_1 0xF80404
145
146#define mmTPC6_RTR_SPLIT_COEF_2 0xF80408
147
148#define mmTPC6_RTR_SPLIT_COEF_3 0xF8040C
149
150#define mmTPC6_RTR_SPLIT_COEF_4 0xF80410
151
152#define mmTPC6_RTR_SPLIT_COEF_5 0xF80414
153
154#define mmTPC6_RTR_SPLIT_COEF_6 0xF80418
155
156#define mmTPC6_RTR_SPLIT_COEF_7 0xF8041C
157
158#define mmTPC6_RTR_SPLIT_COEF_8 0xF80420
159
160#define mmTPC6_RTR_SPLIT_COEF_9 0xF80424
161
162#define mmTPC6_RTR_SPLIT_CFG 0xF80440
163
164#define mmTPC6_RTR_SPLIT_RD_SAT 0xF80444
165
166#define mmTPC6_RTR_SPLIT_RD_RST_TOKEN 0xF80448
167
168#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_0 0xF8044C
169
170#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_1 0xF80450
171
172#define mmTPC6_RTR_SPLIT_WR_SAT 0xF80454
173
174#define mmTPC6_RTR_WPLIT_WR_TST_TOLEN 0xF80458
175
176#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_0 0xF8045C
177
178#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_1 0xF80460
179
180#define mmTPC6_RTR_HBW_RANGE_HIT 0xF80470
181
182#define mmTPC6_RTR_HBW_RANGE_MASK_L_0 0xF80480
183
184#define mmTPC6_RTR_HBW_RANGE_MASK_L_1 0xF80484
185
186#define mmTPC6_RTR_HBW_RANGE_MASK_L_2 0xF80488
187
188#define mmTPC6_RTR_HBW_RANGE_MASK_L_3 0xF8048C
189
190#define mmTPC6_RTR_HBW_RANGE_MASK_L_4 0xF80490
191
192#define mmTPC6_RTR_HBW_RANGE_MASK_L_5 0xF80494
193
194#define mmTPC6_RTR_HBW_RANGE_MASK_L_6 0xF80498
195
196#define mmTPC6_RTR_HBW_RANGE_MASK_L_7 0xF8049C
197
198#define mmTPC6_RTR_HBW_RANGE_MASK_H_0 0xF804A0
199
200#define mmTPC6_RTR_HBW_RANGE_MASK_H_1 0xF804A4
201
202#define mmTPC6_RTR_HBW_RANGE_MASK_H_2 0xF804A8
203
204#define mmTPC6_RTR_HBW_RANGE_MASK_H_3 0xF804AC
205
206#define mmTPC6_RTR_HBW_RANGE_MASK_H_4 0xF804B0
207
208#define mmTPC6_RTR_HBW_RANGE_MASK_H_5 0xF804B4
209
210#define mmTPC6_RTR_HBW_RANGE_MASK_H_6 0xF804B8
211
212#define mmTPC6_RTR_HBW_RANGE_MASK_H_7 0xF804BC
213
214#define mmTPC6_RTR_HBW_RANGE_BASE_L_0 0xF804C0
215
216#define mmTPC6_RTR_HBW_RANGE_BASE_L_1 0xF804C4
217
218#define mmTPC6_RTR_HBW_RANGE_BASE_L_2 0xF804C8
219
220#define mmTPC6_RTR_HBW_RANGE_BASE_L_3 0xF804CC
221
222#define mmTPC6_RTR_HBW_RANGE_BASE_L_4 0xF804D0
223
224#define mmTPC6_RTR_HBW_RANGE_BASE_L_5 0xF804D4
225
226#define mmTPC6_RTR_HBW_RANGE_BASE_L_6 0xF804D8
227
228#define mmTPC6_RTR_HBW_RANGE_BASE_L_7 0xF804DC
229
230#define mmTPC6_RTR_HBW_RANGE_BASE_H_0 0xF804E0
231
232#define mmTPC6_RTR_HBW_RANGE_BASE_H_1 0xF804E4
233
234#define mmTPC6_RTR_HBW_RANGE_BASE_H_2 0xF804E8
235
236#define mmTPC6_RTR_HBW_RANGE_BASE_H_3 0xF804EC
237
238#define mmTPC6_RTR_HBW_RANGE_BASE_H_4 0xF804F0
239
240#define mmTPC6_RTR_HBW_RANGE_BASE_H_5 0xF804F4
241
242#define mmTPC6_RTR_HBW_RANGE_BASE_H_6 0xF804F8
243
244#define mmTPC6_RTR_HBW_RANGE_BASE_H_7 0xF804FC
245
246#define mmTPC6_RTR_LBW_RANGE_HIT 0xF80500
247
248#define mmTPC6_RTR_LBW_RANGE_MASK_0 0xF80510
249
250#define mmTPC6_RTR_LBW_RANGE_MASK_1 0xF80514
251
252#define mmTPC6_RTR_LBW_RANGE_MASK_2 0xF80518
253
254#define mmTPC6_RTR_LBW_RANGE_MASK_3 0xF8051C
255
256#define mmTPC6_RTR_LBW_RANGE_MASK_4 0xF80520
257
258#define mmTPC6_RTR_LBW_RANGE_MASK_5 0xF80524
259
260#define mmTPC6_RTR_LBW_RANGE_MASK_6 0xF80528
261
262#define mmTPC6_RTR_LBW_RANGE_MASK_7 0xF8052C
263
264#define mmTPC6_RTR_LBW_RANGE_MASK_8 0xF80530
265
266#define mmTPC6_RTR_LBW_RANGE_MASK_9 0xF80534
267
268#define mmTPC6_RTR_LBW_RANGE_MASK_10 0xF80538
269
270#define mmTPC6_RTR_LBW_RANGE_MASK_11 0xF8053C
271
272#define mmTPC6_RTR_LBW_RANGE_MASK_12 0xF80540
273
274#define mmTPC6_RTR_LBW_RANGE_MASK_13 0xF80544
275
276#define mmTPC6_RTR_LBW_RANGE_MASK_14 0xF80548
277
278#define mmTPC6_RTR_LBW_RANGE_MASK_15 0xF8054C
279
280#define mmTPC6_RTR_LBW_RANGE_BASE_0 0xF80550
281
282#define mmTPC6_RTR_LBW_RANGE_BASE_1 0xF80554
283
284#define mmTPC6_RTR_LBW_RANGE_BASE_2 0xF80558
285
286#define mmTPC6_RTR_LBW_RANGE_BASE_3 0xF8055C
287
288#define mmTPC6_RTR_LBW_RANGE_BASE_4 0xF80560
289
290#define mmTPC6_RTR_LBW_RANGE_BASE_5 0xF80564
291
292#define mmTPC6_RTR_LBW_RANGE_BASE_6 0xF80568
293
294#define mmTPC6_RTR_LBW_RANGE_BASE_7 0xF8056C
295
296#define mmTPC6_RTR_LBW_RANGE_BASE_8 0xF80570
297
298#define mmTPC6_RTR_LBW_RANGE_BASE_9 0xF80574
299
300#define mmTPC6_RTR_LBW_RANGE_BASE_10 0xF80578
301
302#define mmTPC6_RTR_LBW_RANGE_BASE_11 0xF8057C
303
304#define mmTPC6_RTR_LBW_RANGE_BASE_12 0xF80580
305
306#define mmTPC6_RTR_LBW_RANGE_BASE_13 0xF80584
307
308#define mmTPC6_RTR_LBW_RANGE_BASE_14 0xF80588
309
310#define mmTPC6_RTR_LBW_RANGE_BASE_15 0xF8058C
311
312#define mmTPC6_RTR_RGLTR 0xF80590
313
314#define mmTPC6_RTR_RGLTR_WR_RESULT 0xF80594
315
316#define mmTPC6_RTR_RGLTR_RD_RESULT 0xF80598
317
318#define mmTPC6_RTR_SCRAMB_EN 0xF80600
319
320#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604
321
322#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
new file mode 100644
index 000000000000..bf2fd0f73906
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
@@ -0,0 +1,887 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC7_CFG_REGS_H_
14#define ASIC_REG_TPC7_CFG_REGS_H_
15
16/*
17 *****************************************
18 * TPC7_CFG (Prototype: TPC)
19 *****************************************
20 */
21
22#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400
23
24#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404
25
26#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408
27
28#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C
29
30#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410
31
32#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414
33
34#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6418
35
36#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC641C
37
38#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC6420
39
40#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6424
41
42#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6428
43
44#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC642C
45
46#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6430
47
48#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6434
49
50#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC6438
51
52#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xFC643C
53
54#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6440
55
56#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6444
57
58#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6448
59
60#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC644C
61
62#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC6450
63
64#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6454
65
66#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6458
67
68#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC645C
69
70#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC6460
71
72#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6464
73
74#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6468
75
76#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC646C
77
78#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6470
79
80#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6474
81
82#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC6478
83
84#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xFC647C
85
86#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6480
87
88#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6484
89
90#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6488
91
92#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC648C
93
94#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC6490
95
96#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6494
97
98#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6498
99
100#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC649C
101
102#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC64A0
103
104#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC64A4
105
106#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC64A8
107
108#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC64AC
109
110#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xFC64B0
111
112#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC64B4
113
114#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC64B8
115
116#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xFC64BC
117
118#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC64C0
119
120#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC64C4
121
122#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xFC64C8
123
124#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC64CC
125
126#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC64D0
127
128#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xFC64D4
129
130#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64D8
131
132#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64DC
133
134#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xFC64E0
135
136#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64E4
137
138#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64E8
139
140#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64EC
141
142#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64F0
143
144#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64F4
145
146#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64F8
147
148#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xFC64FC
149
150#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC6500
151
152#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC6504
153
154#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6508
155
156#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC650C
157
158#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC6510
159
160#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6514
161
162#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC6518
163
164#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC651C
165
166#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6520
167
168#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC6524
169
170#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC6528
171
172#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xFC652C
173
174#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC6530
175
176#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC6534
177
178#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC6538
179
180#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC653C
181
182#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC6540
183
184#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC6544
185
186#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6548
187
188#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC654C
189
190#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC6550
191
192#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6554
193
194#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6558
195
196#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC655C
197
198#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6560
199
200#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6564
201
202#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC6568
203
204#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xFC656C
205
206#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6570
207
208#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6574
209
210#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6578
211
212#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC657C
213
214#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC6580
215
216#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6584
217
218#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6588
219
220#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC658C
221
222#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC6590
223
224#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6594
225
226#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6598
227
228#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC659C
229
230#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xFC65A0
231
232#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC65A4
233
234#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC65A8
235
236#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xFC65AC
237
238#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC65B0
239
240#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC65B4
241
242#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xFC65B8
243
244#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC65BC
245
246#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC65C0
247
248#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xFC65C4
249
250#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC65C8
251
252#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC65CC
253
254#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC65D0
255
256#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC65D4
257
258#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC65D8
259
260#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC65DC
261
262#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xFC65E0
263
264#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC65E4
265
266#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC65E8
267
268#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xFC65EC
269
270#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC65F0
271
272#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC65F4
273
274#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xFC65F8
275
276#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC65FC
277
278#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC6600
279
280#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6604
281
282#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6608
283
284#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC660C
285
286#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6610
287
288#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6614
289
290#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC6618
291
292#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC661C
293
294#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6620
295
296#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6624
297
298#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC6628
299
300#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xFC662C
301
302#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC6630
303
304#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC6634
305
306#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6638
307
308#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC663C
309
310#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC6640
311
312#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6644
313
314#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC6648
315
316#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC664C
317
318#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6650
319
320#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC6654
321
322#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC6658
323
324#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xFC665C
325
326#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6660
327
328#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC6664
329
330#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6668
331
332#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC666C
333
334#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6670
335
336#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC6674
337
338#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC6678
339
340#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC667C
341
342#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC6680
343
344#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC6684
345
346#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC6688
347
348#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC668C
349
350#define mmTPC7_CFG_KERNEL_SRF_0 0xFC6690
351
352#define mmTPC7_CFG_KERNEL_SRF_1 0xFC6694
353
354#define mmTPC7_CFG_KERNEL_SRF_2 0xFC6698
355
356#define mmTPC7_CFG_KERNEL_SRF_3 0xFC669C
357
358#define mmTPC7_CFG_KERNEL_SRF_4 0xFC66A0
359
360#define mmTPC7_CFG_KERNEL_SRF_5 0xFC66A4
361
362#define mmTPC7_CFG_KERNEL_SRF_6 0xFC66A8
363
364#define mmTPC7_CFG_KERNEL_SRF_7 0xFC66AC
365
366#define mmTPC7_CFG_KERNEL_SRF_8 0xFC66B0
367
368#define mmTPC7_CFG_KERNEL_SRF_9 0xFC66B4
369
370#define mmTPC7_CFG_KERNEL_SRF_10 0xFC66B8
371
372#define mmTPC7_CFG_KERNEL_SRF_11 0xFC66BC
373
374#define mmTPC7_CFG_KERNEL_SRF_12 0xFC66C0
375
376#define mmTPC7_CFG_KERNEL_SRF_13 0xFC66C4
377
378#define mmTPC7_CFG_KERNEL_SRF_14 0xFC66C8
379
380#define mmTPC7_CFG_KERNEL_SRF_15 0xFC66CC
381
382#define mmTPC7_CFG_KERNEL_SRF_16 0xFC66D0
383
384#define mmTPC7_CFG_KERNEL_SRF_17 0xFC66D4
385
386#define mmTPC7_CFG_KERNEL_SRF_18 0xFC66D8
387
388#define mmTPC7_CFG_KERNEL_SRF_19 0xFC66DC
389
390#define mmTPC7_CFG_KERNEL_SRF_20 0xFC66E0
391
392#define mmTPC7_CFG_KERNEL_SRF_21 0xFC66E4
393
394#define mmTPC7_CFG_KERNEL_SRF_22 0xFC66E8
395
396#define mmTPC7_CFG_KERNEL_SRF_23 0xFC66EC
397
398#define mmTPC7_CFG_KERNEL_SRF_24 0xFC66F0
399
400#define mmTPC7_CFG_KERNEL_SRF_25 0xFC66F4
401
402#define mmTPC7_CFG_KERNEL_SRF_26 0xFC66F8
403
404#define mmTPC7_CFG_KERNEL_SRF_27 0xFC66FC
405
406#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6700
407
408#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6704
409
410#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6708
411
412#define mmTPC7_CFG_KERNEL_SRF_31 0xFC670C
413
414#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC6710
415
416#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6714
417
418#define mmTPC7_CFG_RESERVED_DESC_END 0xFC6738
419
420#define mmTPC7_CFG_ROUND_CSR 0xFC67FC
421
422#define mmTPC7_CFG_TBUF_BASE_ADDR_LOW 0xFC6800
423
424#define mmTPC7_CFG_TBUF_BASE_ADDR_HIGH 0xFC6804
425
426#define mmTPC7_CFG_SEMAPHORE 0xFC6808
427
428#define mmTPC7_CFG_VFLAGS 0xFC680C
429
430#define mmTPC7_CFG_SFLAGS 0xFC6810
431
432#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6818
433
434#define mmTPC7_CFG_STATUS 0xFC681C
435
436#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6820
437
438#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6824
439
440#define mmTPC7_CFG_SM_BASE_ADDRESS_LOW 0xFC6828
441
442#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC682C
443
444#define mmTPC7_CFG_TPC_CMD 0xFC6830
445
446#define mmTPC7_CFG_TPC_EXECUTE 0xFC6838
447
448#define mmTPC7_CFG_TPC_STALL 0xFC683C
449
450#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6840
451
452#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6844
453
454#define mmTPC7_CFG_MSS_CONFIG 0xFC6854
455
456#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6858
457
458#define mmTPC7_CFG_TPC_INTR_MASK 0xFC685C
459
460#define mmTPC7_CFG_TSB_CONFIG 0xFC6860
461
462#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00
463
464#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04
465
466#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08
467
468#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C
469
470#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10
471
472#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14
473
474#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6A18
475
476#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A1C
477
478#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A20
479
480#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6A24
481
482#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A28
483
484#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A2C
485
486#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6A30
487
488#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A34
489
490#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A38
491
492#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xFC6A3C
493
494#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A40
495
496#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A44
497
498#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6A48
499
500#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A4C
501
502#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A50
503
504#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A54
505
506#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A58
507
508#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A5C
509
510#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A60
511
512#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6A64
513
514#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A68
515
516#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A6C
517
518#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6A70
519
520#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A74
521
522#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A78
523
524#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xFC6A7C
525
526#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A80
527
528#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A84
529
530#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6A88
531
532#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A8C
533
534#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A90
535
536#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6A94
537
538#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A98
539
540#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A9C
541
542#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6AA0
543
544#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6AA4
545
546#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6AA8
547
548#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6AAC
549
550#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xFC6AB0
551
552#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6AB4
553
554#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6AB8
555
556#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xFC6ABC
557
558#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6AC0
559
560#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6AC4
561
562#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xFC6AC8
563
564#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6ACC
565
566#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6AD0
567
568#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xFC6AD4
569
570#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AD8
571
572#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6ADC
573
574#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xFC6AE0
575
576#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AE4
577
578#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AE8
579
580#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AEC
581
582#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AF0
583
584#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AF4
585
586#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6AF8
587
588#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xFC6AFC
589
590#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6B00
591
592#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6B04
593
594#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6B08
595
596#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6B0C
597
598#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6B10
599
600#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6B14
601
602#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6B18
603
604#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6B1C
605
606#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6B20
607
608#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6B24
609
610#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6B28
611
612#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xFC6B2C
613
614#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6B30
615
616#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6B34
617
618#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6B38
619
620#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6B3C
621
622#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6B40
623
624#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6B44
625
626#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6B48
627
628#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6B4C
629
630#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6B50
631
632#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6B54
633
634#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B58
635
636#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B5C
637
638#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6B60
639
640#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B64
641
642#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B68
643
644#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xFC6B6C
645
646#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B70
647
648#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B74
649
650#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6B78
651
652#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B7C
653
654#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B80
655
656#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B84
657
658#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B88
659
660#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B8C
661
662#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B90
663
664#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6B94
665
666#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B98
667
668#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B9C
669
670#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xFC6BA0
671
672#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6BA4
673
674#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6BA8
675
676#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xFC6BAC
677
678#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6BB0
679
680#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6BB4
681
682#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xFC6BB8
683
684#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6BBC
685
686#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6BC0
687
688#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xFC6BC4
689
690#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6BC8
691
692#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6BCC
693
694#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6BD0
695
696#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6BD4
697
698#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6BD8
699
700#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6BDC
701
702#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xFC6BE0
703
704#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6BE4
705
706#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6BE8
707
708#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xFC6BEC
709
710#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6BF0
711
712#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6BF4
713
714#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xFC6BF8
715
716#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6BFC
717
718#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6C00
719
720#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6C04
721
722#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6C08
723
724#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6C0C
725
726#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6C10
727
728#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6C14
729
730#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6C18
731
732#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6C1C
733
734#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6C20
735
736#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6C24
737
738#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6C28
739
740#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xFC6C2C
741
742#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6C30
743
744#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6C34
745
746#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6C38
747
748#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6C3C
749
750#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6C40
751
752#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6C44
753
754#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6C48
755
756#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6C4C
757
758#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6C50
759
760#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6C54
761
762#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6C58
763
764#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xFC6C5C
765
766#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6C60
767
768#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6C64
769
770#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6C68
771
772#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6C6C
773
774#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6C70
775
776#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6C74
777
778#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6C78
779
780#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6C7C
781
782#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6C80
783
784#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6C84
785
786#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6C88
787
788#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6C8C
789
790#define mmTPC7_CFG_QM_SRF_0 0xFC6C90
791
792#define mmTPC7_CFG_QM_SRF_1 0xFC6C94
793
794#define mmTPC7_CFG_QM_SRF_2 0xFC6C98
795
796#define mmTPC7_CFG_QM_SRF_3 0xFC6C9C
797
798#define mmTPC7_CFG_QM_SRF_4 0xFC6CA0
799
800#define mmTPC7_CFG_QM_SRF_5 0xFC6CA4
801
802#define mmTPC7_CFG_QM_SRF_6 0xFC6CA8
803
804#define mmTPC7_CFG_QM_SRF_7 0xFC6CAC
805
806#define mmTPC7_CFG_QM_SRF_8 0xFC6CB0
807
808#define mmTPC7_CFG_QM_SRF_9 0xFC6CB4
809
810#define mmTPC7_CFG_QM_SRF_10 0xFC6CB8
811
812#define mmTPC7_CFG_QM_SRF_11 0xFC6CBC
813
814#define mmTPC7_CFG_QM_SRF_12 0xFC6CC0
815
816#define mmTPC7_CFG_QM_SRF_13 0xFC6CC4
817
818#define mmTPC7_CFG_QM_SRF_14 0xFC6CC8
819
820#define mmTPC7_CFG_QM_SRF_15 0xFC6CCC
821
822#define mmTPC7_CFG_QM_SRF_16 0xFC6CD0
823
824#define mmTPC7_CFG_QM_SRF_17 0xFC6CD4
825
826#define mmTPC7_CFG_QM_SRF_18 0xFC6CD8
827
828#define mmTPC7_CFG_QM_SRF_19 0xFC6CDC
829
830#define mmTPC7_CFG_QM_SRF_20 0xFC6CE0
831
832#define mmTPC7_CFG_QM_SRF_21 0xFC6CE4
833
834#define mmTPC7_CFG_QM_SRF_22 0xFC6CE8
835
836#define mmTPC7_CFG_QM_SRF_23 0xFC6CEC
837
838#define mmTPC7_CFG_QM_SRF_24 0xFC6CF0
839
840#define mmTPC7_CFG_QM_SRF_25 0xFC6CF4
841
842#define mmTPC7_CFG_QM_SRF_26 0xFC6CF8
843
844#define mmTPC7_CFG_QM_SRF_27 0xFC6CFC
845
846#define mmTPC7_CFG_QM_SRF_28 0xFC6D00
847
848#define mmTPC7_CFG_QM_SRF_29 0xFC6D04
849
850#define mmTPC7_CFG_QM_SRF_30 0xFC6D08
851
852#define mmTPC7_CFG_QM_SRF_31 0xFC6D0C
853
854#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6D10
855
856#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D14
857
858#define mmTPC7_CFG_ARUSER 0xFC6D18
859
860#define mmTPC7_CFG_AWUSER 0xFC6D1C
861
862#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC6E00
863
864#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC6E04
865
866#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC6E08
867
868#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC6E0C
869
870#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC6E10
871
872#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC6E14
873
874#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC6E18
875
876#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC6E1C
877
878#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC6E20
879
880#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC6E24
881
882#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC6E28
883
884#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C
885
886#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
new file mode 100644
index 000000000000..65d83043bf63
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC7_CMDQ_REGS_H_
14#define ASIC_REG_TPC7_CMDQ_REGS_H_
15
16/*
17 *****************************************
18 * TPC7_CMDQ (Prototype: CMDQ)
19 *****************************************
20 */
21
22#define mmTPC7_CMDQ_GLBL_CFG0 0xFC9000
23
24#define mmTPC7_CMDQ_GLBL_CFG1 0xFC9004
25
26#define mmTPC7_CMDQ_GLBL_PROT 0xFC9008
27
28#define mmTPC7_CMDQ_GLBL_ERR_CFG 0xFC900C
29
30#define mmTPC7_CMDQ_GLBL_ERR_ADDR_LO 0xFC9010
31
32#define mmTPC7_CMDQ_GLBL_ERR_ADDR_HI 0xFC9014
33
34#define mmTPC7_CMDQ_GLBL_ERR_WDATA 0xFC9018
35
36#define mmTPC7_CMDQ_GLBL_SECURE_PROPS 0xFC901C
37
38#define mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS 0xFC9020
39
40#define mmTPC7_CMDQ_GLBL_STS0 0xFC9024
41
42#define mmTPC7_CMDQ_GLBL_STS1 0xFC9028
43
44#define mmTPC7_CMDQ_CQ_CFG0 0xFC90B0
45
46#define mmTPC7_CMDQ_CQ_CFG1 0xFC90B4
47
48#define mmTPC7_CMDQ_CQ_ARUSER 0xFC90B8
49
50#define mmTPC7_CMDQ_CQ_PTR_LO 0xFC90C0
51
52#define mmTPC7_CMDQ_CQ_PTR_HI 0xFC90C4
53
54#define mmTPC7_CMDQ_CQ_TSIZE 0xFC90C8
55
56#define mmTPC7_CMDQ_CQ_CTL 0xFC90CC
57
58#define mmTPC7_CMDQ_CQ_PTR_LO_STS 0xFC90D4
59
60#define mmTPC7_CMDQ_CQ_PTR_HI_STS 0xFC90D8
61
62#define mmTPC7_CMDQ_CQ_TSIZE_STS 0xFC90DC
63
64#define mmTPC7_CMDQ_CQ_CTL_STS 0xFC90E0
65
66#define mmTPC7_CMDQ_CQ_STS0 0xFC90E4
67
68#define mmTPC7_CMDQ_CQ_STS1 0xFC90E8
69
70#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN 0xFC90F0
71
72#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xFC90F4
73
74#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT 0xFC90F8
75
76#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT 0xFC90FC
77
78#define mmTPC7_CMDQ_CQ_IFIFO_CNT 0xFC9108
79
80#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO 0xFC9120
81
82#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI 0xFC9124
83
84#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO 0xFC9128
85
86#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI 0xFC912C
87
88#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO 0xFC9130
89
90#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI 0xFC9134
91
92#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO 0xFC9138
93
94#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI 0xFC913C
95
96#define mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET 0xFC9140
97
98#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC9144
99
100#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC9148
101
102#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xFC914C
103
104#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xFC9150
105
106#define mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET 0xFC9154
107
108#define mmTPC7_CMDQ_CP_FENCE0_RDATA 0xFC9158
109
110#define mmTPC7_CMDQ_CP_FENCE1_RDATA 0xFC915C
111
112#define mmTPC7_CMDQ_CP_FENCE2_RDATA 0xFC9160
113
114#define mmTPC7_CMDQ_CP_FENCE3_RDATA 0xFC9164
115
116#define mmTPC7_CMDQ_CP_FENCE0_CNT 0xFC9168
117
118#define mmTPC7_CMDQ_CP_FENCE1_CNT 0xFC916C
119
120#define mmTPC7_CMDQ_CP_FENCE2_CNT 0xFC9170
121
122#define mmTPC7_CMDQ_CP_FENCE3_CNT 0xFC9174
123
124#define mmTPC7_CMDQ_CP_STS 0xFC9178
125
126#define mmTPC7_CMDQ_CP_CURRENT_INST_LO 0xFC917C
127
128#define mmTPC7_CMDQ_CP_CURRENT_INST_HI 0xFC9180
129
130#define mmTPC7_CMDQ_CP_BARRIER_CFG 0xFC9184
131
132#define mmTPC7_CMDQ_CP_DBG_0 0xFC9188
133
134#define mmTPC7_CMDQ_CQ_BUF_ADDR 0xFC9308
135
136#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C
137
138#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
new file mode 100644
index 000000000000..3d5848d87304
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
@@ -0,0 +1,227 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC7_NRTR_REGS_H_
14#define ASIC_REG_TPC7_NRTR_REGS_H_
15
16/*
17 *****************************************
18 * TPC7_NRTR (Prototype: IF_NRTR)
19 *****************************************
20 */
21
22#define mmTPC7_NRTR_HBW_MAX_CRED 0xFC0100
23
24#define mmTPC7_NRTR_LBW_MAX_CRED 0xFC0120
25
26#define mmTPC7_NRTR_DBG_E_ARB 0xFC0300
27
28#define mmTPC7_NRTR_DBG_W_ARB 0xFC0304
29
30#define mmTPC7_NRTR_DBG_N_ARB 0xFC0308
31
32#define mmTPC7_NRTR_DBG_S_ARB 0xFC030C
33
34#define mmTPC7_NRTR_DBG_L_ARB 0xFC0310
35
36#define mmTPC7_NRTR_DBG_E_ARB_MAX 0xFC0320
37
38#define mmTPC7_NRTR_DBG_W_ARB_MAX 0xFC0324
39
40#define mmTPC7_NRTR_DBG_N_ARB_MAX 0xFC0328
41
42#define mmTPC7_NRTR_DBG_S_ARB_MAX 0xFC032C
43
44#define mmTPC7_NRTR_DBG_L_ARB_MAX 0xFC0330
45
46#define mmTPC7_NRTR_SPLIT_COEF_0 0xFC0400
47
48#define mmTPC7_NRTR_SPLIT_COEF_1 0xFC0404
49
50#define mmTPC7_NRTR_SPLIT_COEF_2 0xFC0408
51
52#define mmTPC7_NRTR_SPLIT_COEF_3 0xFC040C
53
54#define mmTPC7_NRTR_SPLIT_COEF_4 0xFC0410
55
56#define mmTPC7_NRTR_SPLIT_COEF_5 0xFC0414
57
58#define mmTPC7_NRTR_SPLIT_COEF_6 0xFC0418
59
60#define mmTPC7_NRTR_SPLIT_COEF_7 0xFC041C
61
62#define mmTPC7_NRTR_SPLIT_COEF_8 0xFC0420
63
64#define mmTPC7_NRTR_SPLIT_COEF_9 0xFC0424
65
66#define mmTPC7_NRTR_SPLIT_CFG 0xFC0440
67
68#define mmTPC7_NRTR_SPLIT_RD_SAT 0xFC0444
69
70#define mmTPC7_NRTR_SPLIT_RD_RST_TOKEN 0xFC0448
71
72#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_0 0xFC044C
73
74#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_1 0xFC0450
75
76#define mmTPC7_NRTR_SPLIT_WR_SAT 0xFC0454
77
78#define mmTPC7_NRTR_WPLIT_WR_TST_TOLEN 0xFC0458
79
80#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_0 0xFC045C
81
82#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_1 0xFC0460
83
84#define mmTPC7_NRTR_HBW_RANGE_HIT 0xFC0470
85
86#define mmTPC7_NRTR_HBW_RANGE_MASK_L_0 0xFC0480
87
88#define mmTPC7_NRTR_HBW_RANGE_MASK_L_1 0xFC0484
89
90#define mmTPC7_NRTR_HBW_RANGE_MASK_L_2 0xFC0488
91
92#define mmTPC7_NRTR_HBW_RANGE_MASK_L_3 0xFC048C
93
94#define mmTPC7_NRTR_HBW_RANGE_MASK_L_4 0xFC0490
95
96#define mmTPC7_NRTR_HBW_RANGE_MASK_L_5 0xFC0494
97
98#define mmTPC7_NRTR_HBW_RANGE_MASK_L_6 0xFC0498
99
100#define mmTPC7_NRTR_HBW_RANGE_MASK_L_7 0xFC049C
101
102#define mmTPC7_NRTR_HBW_RANGE_MASK_H_0 0xFC04A0
103
104#define mmTPC7_NRTR_HBW_RANGE_MASK_H_1 0xFC04A4
105
106#define mmTPC7_NRTR_HBW_RANGE_MASK_H_2 0xFC04A8
107
108#define mmTPC7_NRTR_HBW_RANGE_MASK_H_3 0xFC04AC
109
110#define mmTPC7_NRTR_HBW_RANGE_MASK_H_4 0xFC04B0
111
112#define mmTPC7_NRTR_HBW_RANGE_MASK_H_5 0xFC04B4
113
114#define mmTPC7_NRTR_HBW_RANGE_MASK_H_6 0xFC04B8
115
116#define mmTPC7_NRTR_HBW_RANGE_MASK_H_7 0xFC04BC
117
118#define mmTPC7_NRTR_HBW_RANGE_BASE_L_0 0xFC04C0
119
120#define mmTPC7_NRTR_HBW_RANGE_BASE_L_1 0xFC04C4
121
122#define mmTPC7_NRTR_HBW_RANGE_BASE_L_2 0xFC04C8
123
124#define mmTPC7_NRTR_HBW_RANGE_BASE_L_3 0xFC04CC
125
126#define mmTPC7_NRTR_HBW_RANGE_BASE_L_4 0xFC04D0
127
128#define mmTPC7_NRTR_HBW_RANGE_BASE_L_5 0xFC04D4
129
130#define mmTPC7_NRTR_HBW_RANGE_BASE_L_6 0xFC04D8
131
132#define mmTPC7_NRTR_HBW_RANGE_BASE_L_7 0xFC04DC
133
134#define mmTPC7_NRTR_HBW_RANGE_BASE_H_0 0xFC04E0
135
136#define mmTPC7_NRTR_HBW_RANGE_BASE_H_1 0xFC04E4
137
138#define mmTPC7_NRTR_HBW_RANGE_BASE_H_2 0xFC04E8
139
140#define mmTPC7_NRTR_HBW_RANGE_BASE_H_3 0xFC04EC
141
142#define mmTPC7_NRTR_HBW_RANGE_BASE_H_4 0xFC04F0
143
144#define mmTPC7_NRTR_HBW_RANGE_BASE_H_5 0xFC04F4
145
146#define mmTPC7_NRTR_HBW_RANGE_BASE_H_6 0xFC04F8
147
148#define mmTPC7_NRTR_HBW_RANGE_BASE_H_7 0xFC04FC
149
150#define mmTPC7_NRTR_LBW_RANGE_HIT 0xFC0500
151
152#define mmTPC7_NRTR_LBW_RANGE_MASK_0 0xFC0510
153
154#define mmTPC7_NRTR_LBW_RANGE_MASK_1 0xFC0514
155
156#define mmTPC7_NRTR_LBW_RANGE_MASK_2 0xFC0518
157
158#define mmTPC7_NRTR_LBW_RANGE_MASK_3 0xFC051C
159
160#define mmTPC7_NRTR_LBW_RANGE_MASK_4 0xFC0520
161
162#define mmTPC7_NRTR_LBW_RANGE_MASK_5 0xFC0524
163
164#define mmTPC7_NRTR_LBW_RANGE_MASK_6 0xFC0528
165
166#define mmTPC7_NRTR_LBW_RANGE_MASK_7 0xFC052C
167
168#define mmTPC7_NRTR_LBW_RANGE_MASK_8 0xFC0530
169
170#define mmTPC7_NRTR_LBW_RANGE_MASK_9 0xFC0534
171
172#define mmTPC7_NRTR_LBW_RANGE_MASK_10 0xFC0538
173
174#define mmTPC7_NRTR_LBW_RANGE_MASK_11 0xFC053C
175
176#define mmTPC7_NRTR_LBW_RANGE_MASK_12 0xFC0540
177
178#define mmTPC7_NRTR_LBW_RANGE_MASK_13 0xFC0544
179
180#define mmTPC7_NRTR_LBW_RANGE_MASK_14 0xFC0548
181
182#define mmTPC7_NRTR_LBW_RANGE_MASK_15 0xFC054C
183
184#define mmTPC7_NRTR_LBW_RANGE_BASE_0 0xFC0550
185
186#define mmTPC7_NRTR_LBW_RANGE_BASE_1 0xFC0554
187
188#define mmTPC7_NRTR_LBW_RANGE_BASE_2 0xFC0558
189
190#define mmTPC7_NRTR_LBW_RANGE_BASE_3 0xFC055C
191
192#define mmTPC7_NRTR_LBW_RANGE_BASE_4 0xFC0560
193
194#define mmTPC7_NRTR_LBW_RANGE_BASE_5 0xFC0564
195
196#define mmTPC7_NRTR_LBW_RANGE_BASE_6 0xFC0568
197
198#define mmTPC7_NRTR_LBW_RANGE_BASE_7 0xFC056C
199
200#define mmTPC7_NRTR_LBW_RANGE_BASE_8 0xFC0570
201
202#define mmTPC7_NRTR_LBW_RANGE_BASE_9 0xFC0574
203
204#define mmTPC7_NRTR_LBW_RANGE_BASE_10 0xFC0578
205
206#define mmTPC7_NRTR_LBW_RANGE_BASE_11 0xFC057C
207
208#define mmTPC7_NRTR_LBW_RANGE_BASE_12 0xFC0580
209
210#define mmTPC7_NRTR_LBW_RANGE_BASE_13 0xFC0584
211
212#define mmTPC7_NRTR_LBW_RANGE_BASE_14 0xFC0588
213
214#define mmTPC7_NRTR_LBW_RANGE_BASE_15 0xFC058C
215
216#define mmTPC7_NRTR_RGLTR 0xFC0590
217
218#define mmTPC7_NRTR_RGLTR_WR_RESULT 0xFC0594
219
220#define mmTPC7_NRTR_RGLTR_RD_RESULT 0xFC0598
221
222#define mmTPC7_NRTR_SCRAMB_EN 0xFC0600
223
224#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604
225
226#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
new file mode 100644
index 000000000000..25f5095f68fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
@@ -0,0 +1,179 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC7_QM_REGS_H_
14#define ASIC_REG_TPC7_QM_REGS_H_
15
16/*
17 *****************************************
18 * TPC7_QM (Prototype: QMAN)
19 *****************************************
20 */
21
22#define mmTPC7_QM_GLBL_CFG0 0xFC8000
23
24#define mmTPC7_QM_GLBL_CFG1 0xFC8004
25
26#define mmTPC7_QM_GLBL_PROT 0xFC8008
27
28#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C
29
30#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8010
31
32#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8014
33
34#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8018
35
36#define mmTPC7_QM_GLBL_SECURE_PROPS 0xFC801C
37
38#define mmTPC7_QM_GLBL_NON_SECURE_PROPS 0xFC8020
39
40#define mmTPC7_QM_GLBL_STS0 0xFC8024
41
42#define mmTPC7_QM_GLBL_STS1 0xFC8028
43
44#define mmTPC7_QM_PQ_BASE_LO 0xFC8060
45
46#define mmTPC7_QM_PQ_BASE_HI 0xFC8064
47
48#define mmTPC7_QM_PQ_SIZE 0xFC8068
49
50#define mmTPC7_QM_PQ_PI 0xFC806C
51
52#define mmTPC7_QM_PQ_CI 0xFC8070
53
54#define mmTPC7_QM_PQ_CFG0 0xFC8074
55
56#define mmTPC7_QM_PQ_CFG1 0xFC8078
57
58#define mmTPC7_QM_PQ_ARUSER 0xFC807C
59
60#define mmTPC7_QM_PQ_PUSH0 0xFC8080
61
62#define mmTPC7_QM_PQ_PUSH1 0xFC8084
63
64#define mmTPC7_QM_PQ_PUSH2 0xFC8088
65
66#define mmTPC7_QM_PQ_PUSH3 0xFC808C
67
68#define mmTPC7_QM_PQ_STS0 0xFC8090
69
70#define mmTPC7_QM_PQ_STS1 0xFC8094
71
72#define mmTPC7_QM_PQ_RD_RATE_LIM_EN 0xFC80A0
73
74#define mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xFC80A4
75
76#define mmTPC7_QM_PQ_RD_RATE_LIM_SAT 0xFC80A8
77
78#define mmTPC7_QM_PQ_RD_RATE_LIM_TOUT 0xFC80AC
79
80#define mmTPC7_QM_CQ_CFG0 0xFC80B0
81
82#define mmTPC7_QM_CQ_CFG1 0xFC80B4
83
84#define mmTPC7_QM_CQ_ARUSER 0xFC80B8
85
86#define mmTPC7_QM_CQ_PTR_LO 0xFC80C0
87
88#define mmTPC7_QM_CQ_PTR_HI 0xFC80C4
89
90#define mmTPC7_QM_CQ_TSIZE 0xFC80C8
91
92#define mmTPC7_QM_CQ_CTL 0xFC80CC
93
94#define mmTPC7_QM_CQ_PTR_LO_STS 0xFC80D4
95
96#define mmTPC7_QM_CQ_PTR_HI_STS 0xFC80D8
97
98#define mmTPC7_QM_CQ_TSIZE_STS 0xFC80DC
99
100#define mmTPC7_QM_CQ_CTL_STS 0xFC80E0
101
102#define mmTPC7_QM_CQ_STS0 0xFC80E4
103
104#define mmTPC7_QM_CQ_STS1 0xFC80E8
105
106#define mmTPC7_QM_CQ_RD_RATE_LIM_EN 0xFC80F0
107
108#define mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xFC80F4
109
110#define mmTPC7_QM_CQ_RD_RATE_LIM_SAT 0xFC80F8
111
112#define mmTPC7_QM_CQ_RD_RATE_LIM_TOUT 0xFC80FC
113
114#define mmTPC7_QM_CQ_IFIFO_CNT 0xFC8108
115
116#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO 0xFC8120
117
118#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI 0xFC8124
119
120#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO 0xFC8128
121
122#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI 0xFC812C
123
124#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO 0xFC8130
125
126#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI 0xFC8134
127
128#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO 0xFC8138
129
130#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI 0xFC813C
131
132#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET 0xFC8140
133
134#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC8144
135
136#define mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC8148
137
138#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xFC814C
139
140#define mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xFC8150
141
142#define mmTPC7_QM_CP_LDMA_COMMIT_OFFSET 0xFC8154
143
144#define mmTPC7_QM_CP_FENCE0_RDATA 0xFC8158
145
146#define mmTPC7_QM_CP_FENCE1_RDATA 0xFC815C
147
148#define mmTPC7_QM_CP_FENCE2_RDATA 0xFC8160
149
150#define mmTPC7_QM_CP_FENCE3_RDATA 0xFC8164
151
152#define mmTPC7_QM_CP_FENCE0_CNT 0xFC8168
153
154#define mmTPC7_QM_CP_FENCE1_CNT 0xFC816C
155
156#define mmTPC7_QM_CP_FENCE2_CNT 0xFC8170
157
158#define mmTPC7_QM_CP_FENCE3_CNT 0xFC8174
159
160#define mmTPC7_QM_CP_STS 0xFC8178
161
162#define mmTPC7_QM_CP_CURRENT_INST_LO 0xFC817C
163
164#define mmTPC7_QM_CP_CURRENT_INST_HI 0xFC8180
165
166#define mmTPC7_QM_CP_BARRIER_CFG 0xFC8184
167
168#define mmTPC7_QM_CP_DBG_0 0xFC8188
169
170#define mmTPC7_QM_PQ_BUF_ADDR 0xFC8300
171
172#define mmTPC7_QM_PQ_BUF_RDATA 0xFC8304
173
174#define mmTPC7_QM_CQ_BUF_ADDR 0xFC8308
175
176#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C
177
178#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
new file mode 100644
index 000000000000..920231d0afa5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
@@ -0,0 +1,105 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_TPC_PLL_REGS_H_
14#define ASIC_REG_TPC_PLL_REGS_H_
15
16/*
17 *****************************************
18 * TPC_PLL (Prototype: PLL)
19 *****************************************
20 */
21
22#define mmTPC_PLL_NR 0xE01100
23
24#define mmTPC_PLL_NF 0xE01104
25
26#define mmTPC_PLL_OD 0xE01108
27
28#define mmTPC_PLL_NB 0xE0110C
29
30#define mmTPC_PLL_CFG 0xE01110
31
32#define mmTPC_PLL_LOSE_MASK 0xE01120
33
34#define mmTPC_PLL_LOCK_INTR 0xE01128
35
36#define mmTPC_PLL_LOCK_BYPASS 0xE0112C
37
38#define mmTPC_PLL_DATA_CHNG 0xE01130
39
40#define mmTPC_PLL_RST 0xE01134
41
42#define mmTPC_PLL_SLIP_WD_CNTR 0xE01150
43
44#define mmTPC_PLL_DIV_FACTOR_0 0xE01200
45
46#define mmTPC_PLL_DIV_FACTOR_1 0xE01204
47
48#define mmTPC_PLL_DIV_FACTOR_2 0xE01208
49
50#define mmTPC_PLL_DIV_FACTOR_3 0xE0120C
51
52#define mmTPC_PLL_DIV_FACTOR_CMD_0 0xE01220
53
54#define mmTPC_PLL_DIV_FACTOR_CMD_1 0xE01224
55
56#define mmTPC_PLL_DIV_FACTOR_CMD_2 0xE01228
57
58#define mmTPC_PLL_DIV_FACTOR_CMD_3 0xE0122C
59
60#define mmTPC_PLL_DIV_SEL_0 0xE01280
61
62#define mmTPC_PLL_DIV_SEL_1 0xE01284
63
64#define mmTPC_PLL_DIV_SEL_2 0xE01288
65
66#define mmTPC_PLL_DIV_SEL_3 0xE0128C
67
68#define mmTPC_PLL_DIV_EN_0 0xE012A0
69
70#define mmTPC_PLL_DIV_EN_1 0xE012A4
71
72#define mmTPC_PLL_DIV_EN_2 0xE012A8
73
74#define mmTPC_PLL_DIV_EN_3 0xE012AC
75
76#define mmTPC_PLL_DIV_FACTOR_BUSY_0 0xE012C0
77
78#define mmTPC_PLL_DIV_FACTOR_BUSY_1 0xE012C4
79
80#define mmTPC_PLL_DIV_FACTOR_BUSY_2 0xE012C8
81
82#define mmTPC_PLL_DIV_FACTOR_BUSY_3 0xE012CC
83
84#define mmTPC_PLL_CLK_GATER 0xE01300
85
86#define mmTPC_PLL_CLK_RLX_0 0xE01310
87
88#define mmTPC_PLL_CLK_RLX_1 0xE01314
89
90#define mmTPC_PLL_CLK_RLX_2 0xE01318
91
92#define mmTPC_PLL_CLK_RLX_3 0xE0131C
93
94#define mmTPC_PLL_REF_CNTR_PERIOD 0xE01400
95
96#define mmTPC_PLL_REF_LOW_THRESHOLD 0xE01410
97
98#define mmTPC_PLL_REF_HIGH_THRESHOLD 0xE01420
99
100#define mmTPC_PLL_PLL_NOT_STABLE 0xE01430
101
102#define mmTPC_PLL_FREQ_CALC_EN 0xE01440
103
104#endif /* ASIC_REG_TPC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
new file mode 100644
index 000000000000..614149efa412
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -0,0 +1,45 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GOYA_H
9#define GOYA_H
10
11#include "asic_reg/goya_regs.h"
12
13#include <linux/types.h>
14
15#define SRAM_CFG_BAR_ID 0
16#define MSIX_BAR_ID 2
17#define DDR_BAR_ID 4
18
19#define CFG_BAR_SIZE 0x10000000ull /* 256MB */
20#define MSIX_BAR_SIZE 0x1000ull /* 4KB */
21
22#define CFG_BASE 0x7FFC000000ull
23#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/
24
25#define SRAM_BASE_ADDR 0x7FF0000000ull
26#define SRAM_SIZE 0x32A0000 /* 50.625MB */
27
28#define DRAM_PHYS_BASE 0x0ull
29
30#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */
31#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */
32
33#define GOYA_MSIX_ENTRIES 8
34
35#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
36
37#define MAX_ASID 1024
38
39#define PROT_BITS_OFFS 0xF80
40
41#define DMA_MAX_NUM 5
42
43#define TPC_MAX_NUM 8
44
45#endif /* GOYA_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
new file mode 100644
index 000000000000..497937a17ee9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -0,0 +1,186 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef __GOYA_ASYNC_EVENTS_H_
9#define __GOYA_ASYNC_EVENTS_H_
10
11enum goya_async_event_id {
12 GOYA_ASYNC_EVENT_ID_PCIE_IF = 33,
13 GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36,
14 GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39,
15 GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42,
16 GOYA_ASYNC_EVENT_ID_TPC3_ECC = 45,
17 GOYA_ASYNC_EVENT_ID_TPC4_ECC = 48,
18 GOYA_ASYNC_EVENT_ID_TPC5_ECC = 51,
19 GOYA_ASYNC_EVENT_ID_TPC6_ECC = 54,
20 GOYA_ASYNC_EVENT_ID_TPC7_ECC = 57,
21 GOYA_ASYNC_EVENT_ID_MME_ECC = 60,
22 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT = 61,
23 GOYA_ASYNC_EVENT_ID_MMU_ECC = 63,
24 GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64,
25 GOYA_ASYNC_EVENT_ID_DMA_ECC = 66,
26 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75,
27 GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78,
28 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79,
29 GOYA_ASYNC_EVENT_ID_SRAM0 = 81,
30 GOYA_ASYNC_EVENT_ID_SRAM1 = 82,
31 GOYA_ASYNC_EVENT_ID_SRAM2 = 83,
32 GOYA_ASYNC_EVENT_ID_SRAM3 = 84,
33 GOYA_ASYNC_EVENT_ID_SRAM4 = 85,
34 GOYA_ASYNC_EVENT_ID_SRAM5 = 86,
35 GOYA_ASYNC_EVENT_ID_SRAM6 = 87,
36 GOYA_ASYNC_EVENT_ID_SRAM7 = 88,
37 GOYA_ASYNC_EVENT_ID_SRAM8 = 89,
38 GOYA_ASYNC_EVENT_ID_SRAM9 = 90,
39 GOYA_ASYNC_EVENT_ID_SRAM10 = 91,
40 GOYA_ASYNC_EVENT_ID_SRAM11 = 92,
41 GOYA_ASYNC_EVENT_ID_SRAM12 = 93,
42 GOYA_ASYNC_EVENT_ID_SRAM13 = 94,
43 GOYA_ASYNC_EVENT_ID_SRAM14 = 95,
44 GOYA_ASYNC_EVENT_ID_SRAM15 = 96,
45 GOYA_ASYNC_EVENT_ID_SRAM16 = 97,
46 GOYA_ASYNC_EVENT_ID_SRAM17 = 98,
47 GOYA_ASYNC_EVENT_ID_SRAM18 = 99,
48 GOYA_ASYNC_EVENT_ID_SRAM19 = 100,
49 GOYA_ASYNC_EVENT_ID_SRAM20 = 101,
50 GOYA_ASYNC_EVENT_ID_SRAM21 = 102,
51 GOYA_ASYNC_EVENT_ID_SRAM22 = 103,
52 GOYA_ASYNC_EVENT_ID_SRAM23 = 104,
53 GOYA_ASYNC_EVENT_ID_SRAM24 = 105,
54 GOYA_ASYNC_EVENT_ID_SRAM25 = 106,
55 GOYA_ASYNC_EVENT_ID_SRAM26 = 107,
56 GOYA_ASYNC_EVENT_ID_SRAM27 = 108,
57 GOYA_ASYNC_EVENT_ID_SRAM28 = 109,
58 GOYA_ASYNC_EVENT_ID_SRAM29 = 110,
59 GOYA_ASYNC_EVENT_ID_GIC500 = 112,
60 GOYA_ASYNC_EVENT_ID_PCIE_DEC = 115,
61 GOYA_ASYNC_EVENT_ID_TPC0_DEC = 117,
62 GOYA_ASYNC_EVENT_ID_TPC1_DEC = 120,
63 GOYA_ASYNC_EVENT_ID_TPC2_DEC = 123,
64 GOYA_ASYNC_EVENT_ID_TPC3_DEC = 126,
65 GOYA_ASYNC_EVENT_ID_TPC4_DEC = 129,
66 GOYA_ASYNC_EVENT_ID_TPC5_DEC = 132,
67 GOYA_ASYNC_EVENT_ID_TPC6_DEC = 135,
68 GOYA_ASYNC_EVENT_ID_TPC7_DEC = 138,
69 GOYA_ASYNC_EVENT_ID_AXI_ECC = 139,
70 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC = 140,
71 GOYA_ASYNC_EVENT_ID_MME_WACS = 141,
72 GOYA_ASYNC_EVENT_ID_MME_WACSD = 142,
73 GOYA_ASYNC_EVENT_ID_PLL0 = 143,
74 GOYA_ASYNC_EVENT_ID_PLL1 = 144,
75 GOYA_ASYNC_EVENT_ID_PLL3 = 146,
76 GOYA_ASYNC_EVENT_ID_PLL4 = 147,
77 GOYA_ASYNC_EVENT_ID_PLL5 = 148,
78 GOYA_ASYNC_EVENT_ID_PLL6 = 149,
79 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER = 155,
80 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC = 159,
81 GOYA_ASYNC_EVENT_ID_PSOC = 160,
82 GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171,
83 GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172,
84 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174,
85 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175,
86 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176,
87 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG3 = 177,
88 GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG0 = 178,
89 GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG1 = 179,
90 GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG2 = 180,
91 GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG3 = 181,
92 GOYA_ASYNC_EVENT_ID_PCIE_APB = 182,
93 GOYA_ASYNC_EVENT_ID_PCIE_QDB = 183,
94 GOYA_ASYNC_EVENT_ID_PCIE_BM_D_P_WR = 184,
95 GOYA_ASYNC_EVENT_ID_PCIE_BM_D_RD = 185,
96 GOYA_ASYNC_EVENT_ID_PCIE_BM_U_P_WR = 186,
97 GOYA_ASYNC_EVENT_ID_PCIE_BM_U_RD = 187,
98 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU = 190,
99 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR = 191,
100 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU = 200,
101 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR = 201,
102 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU = 210,
103 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR = 211,
104 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU = 220,
105 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR = 221,
106 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU = 230,
107 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR = 231,
108 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU = 240,
109 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR = 241,
110 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU = 250,
111 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR = 251,
112 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU = 260,
113 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR = 261,
114 GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU0 = 270,
115 GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU1 = 271,
116 GOYA_ASYNC_EVENT_ID_MME_WACS_UP = 272,
117 GOYA_ASYNC_EVENT_ID_MME_WACS_DOWN = 273,
118 GOYA_ASYNC_EVENT_ID_MMU_PAGE_FAULT = 280,
119 GOYA_ASYNC_EVENT_ID_MMU_WR_PERM = 281,
120 GOYA_ASYNC_EVENT_ID_MMU_DBG_BM = 282,
121 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 = 290,
122 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1 = 291,
123 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2 = 292,
124 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3 = 293,
125 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 = 294,
126 GOYA_ASYNC_EVENT_ID_DDR0_PHY_DFI = 300,
127 GOYA_ASYNC_EVENT_ID_DDR0_ECC_SCRUB = 301,
128 GOYA_ASYNC_EVENT_ID_DDR0_DB_ECC = 302,
129 GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC = 303,
130 GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC_MC = 304,
131 GOYA_ASYNC_EVENT_ID_DDR0_AXI_RD = 305,
132 GOYA_ASYNC_EVENT_ID_DDR0_AXI_WR = 306,
133 GOYA_ASYNC_EVENT_ID_DDR1_PHY_DFI = 310,
134 GOYA_ASYNC_EVENT_ID_DDR1_ECC_SCRUB = 311,
135 GOYA_ASYNC_EVENT_ID_DDR1_DB_ECC = 312,
136 GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC = 313,
137 GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC_MC = 314,
138 GOYA_ASYNC_EVENT_ID_DDR1_AXI_RD = 315,
139 GOYA_ASYNC_EVENT_ID_DDR1_AXI_WR = 316,
140 GOYA_ASYNC_EVENT_ID_CPU_BMON = 320,
141 GOYA_ASYNC_EVENT_ID_TS_EAST = 322,
142 GOYA_ASYNC_EVENT_ID_TS_WEST = 323,
143 GOYA_ASYNC_EVENT_ID_TS_NORTH = 324,
144 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330,
145 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331,
146 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332,
147 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356,
148 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361,
149 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430,
150 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431,
151 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432,
152 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ = 433,
153 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ = 434,
154 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ = 435,
155 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ = 436,
156 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ = 437,
157 GOYA_ASYNC_EVENT_ID_TPC0_QM = 438,
158 GOYA_ASYNC_EVENT_ID_TPC1_QM = 439,
159 GOYA_ASYNC_EVENT_ID_TPC2_QM = 440,
160 GOYA_ASYNC_EVENT_ID_TPC3_QM = 441,
161 GOYA_ASYNC_EVENT_ID_TPC4_QM = 442,
162 GOYA_ASYNC_EVENT_ID_TPC5_QM = 443,
163 GOYA_ASYNC_EVENT_ID_TPC6_QM = 444,
164 GOYA_ASYNC_EVENT_ID_TPC7_QM = 445,
165 GOYA_ASYNC_EVENT_ID_MME_QM = 447,
166 GOYA_ASYNC_EVENT_ID_MME_CMDQ = 448,
167 GOYA_ASYNC_EVENT_ID_DMA0_QM = 449,
168 GOYA_ASYNC_EVENT_ID_DMA1_QM = 450,
169 GOYA_ASYNC_EVENT_ID_DMA2_QM = 451,
170 GOYA_ASYNC_EVENT_ID_DMA3_QM = 452,
171 GOYA_ASYNC_EVENT_ID_DMA4_QM = 453,
172 GOYA_ASYNC_EVENT_ID_DMA_ON_HBW = 454,
173 GOYA_ASYNC_EVENT_ID_DMA0_CH = 455,
174 GOYA_ASYNC_EVENT_ID_DMA1_CH = 456,
175 GOYA_ASYNC_EVENT_ID_DMA2_CH = 457,
176 GOYA_ASYNC_EVENT_ID_DMA3_CH = 458,
177 GOYA_ASYNC_EVENT_ID_DMA4_CH = 459,
178 GOYA_ASYNC_EVENT_ID_PI_UPDATE = 484,
179 GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
180 GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
181 GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
182 GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023,
183 GOYA_ASYNC_EVENT_ID_SIZE
184};
185
186#endif /* __GOYA_ASYNC_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
new file mode 100644
index 000000000000..a9920cb4a07b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -0,0 +1,28 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GOYA_FW_IF_H
9#define GOYA_FW_IF_H
10
11#define CPU_BOOT_ADDR 0x7FF8040000ull
12
13#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
14#define LINUX_FW_OFFSET 0x800000 /* 8MB in DDR */
15
16enum goya_pll_index {
17 CPU_PLL = 0,
18 IC_PLL,
19 MC_PLL,
20 MME_PLL,
21 PCI_PLL,
22 EMMC_PLL,
23 TPC_PLL
24};
25
26#define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */
27
28#endif /* GOYA_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
new file mode 100644
index 000000000000..a14407b975e4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -0,0 +1,129 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2017-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GOYA_PACKETS_H
9#define GOYA_PACKETS_H
10
11#include <linux/types.h>
12
13#define PACKET_HEADER_PACKET_ID_SHIFT 56
14#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull
15
16enum packet_id {
17 PACKET_WREG_32 = 0x1,
18 PACKET_WREG_BULK = 0x2,
19 PACKET_MSG_LONG = 0x3,
20 PACKET_MSG_SHORT = 0x4,
21 PACKET_CP_DMA = 0x5,
22 PACKET_MSG_PROT = 0x7,
23 PACKET_FENCE = 0x8,
24 PACKET_LIN_DMA = 0x9,
25 PACKET_NOP = 0xA,
26 PACKET_STOP = 0xB,
27 MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >>
28 PACKET_HEADER_PACKET_ID_SHIFT) + 1
29};
30
31enum goya_dma_direction {
32 DMA_HOST_TO_DRAM,
33 DMA_HOST_TO_SRAM,
34 DMA_DRAM_TO_SRAM,
35 DMA_SRAM_TO_DRAM,
36 DMA_SRAM_TO_HOST,
37 DMA_DRAM_TO_HOST,
38 DMA_DRAM_TO_DRAM,
39 DMA_SRAM_TO_SRAM,
40 DMA_ENUM_MAX
41};
42
43#define GOYA_PKT_CTL_OPCODE_SHIFT 24
44#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000
45
46#define GOYA_PKT_CTL_EB_SHIFT 29
47#define GOYA_PKT_CTL_EB_MASK 0x20000000
48
49#define GOYA_PKT_CTL_RB_SHIFT 30
50#define GOYA_PKT_CTL_RB_MASK 0x40000000
51
52#define GOYA_PKT_CTL_MB_SHIFT 31
53#define GOYA_PKT_CTL_MB_MASK 0x80000000
54
55struct packet_nop {
56 __le32 reserved;
57 __le32 ctl;
58};
59
60struct packet_stop {
61 __le32 reserved;
62 __le32 ctl;
63};
64
65#define GOYA_PKT_WREG32_CTL_REG_OFFSET_SHIFT 0
66#define GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK 0x0000FFFF
67
68struct packet_wreg32 {
69 __le32 value;
70 __le32 ctl;
71};
72
73struct packet_wreg_bulk {
74 __le32 size64;
75 __le32 ctl;
76 __le64 values[0]; /* data starts here */
77};
78
79struct packet_msg_long {
80 __le32 value;
81 __le32 ctl;
82 __le64 addr;
83};
84
85struct packet_msg_short {
86 __le32 value;
87 __le32 ctl;
88};
89
90struct packet_msg_prot {
91 __le32 value;
92 __le32 ctl;
93 __le64 addr;
94};
95
96struct packet_fence {
97 __le32 cfg;
98 __le32 ctl;
99};
100
101#define GOYA_PKT_LIN_DMA_CTL_WO_SHIFT 0
102#define GOYA_PKT_LIN_DMA_CTL_WO_MASK 0x00000001
103
104#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_SHIFT 1
105#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK 0x00000002
106
107#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 2
108#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000004
109
110#define GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT 6
111#define GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000040
112
113#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT 20
114#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK 0x00700000
115
116struct packet_lin_dma {
117 __le32 tsize;
118 __le32 ctl;
119 __le64 src_addr;
120 __le64 dst_addr;
121};
122
123struct packet_cp_dma {
124 __le32 tsize;
125 __le32 ctl;
126 __le64 src_addr;
127};
128
129#endif /* GOYA_PACKETS_H */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
new file mode 100644
index 000000000000..7475732b9996
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -0,0 +1,30 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef HL_BOOT_IF_H
9#define HL_BOOT_IF_H
10
11enum cpu_boot_status {
12 CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
13 CPU_BOOT_STATUS_IN_WFE,
14 CPU_BOOT_STATUS_DRAM_RDY,
15 CPU_BOOT_STATUS_SRAM_AVAIL,
16 CPU_BOOT_STATUS_IN_BTL, /* BTL is H/W FSM */
17 CPU_BOOT_STATUS_IN_PREBOOT,
18 CPU_BOOT_STATUS_IN_SPL,
19 CPU_BOOT_STATUS_IN_UBOOT,
20 CPU_BOOT_STATUS_DRAM_INIT_FAIL,
21 CPU_BOOT_STATUS_FIT_CORRUPTED
22};
23
24enum kmd_msg {
25 KMD_MSG_NA = 0,
26 KMD_MSG_GOTO_WFE,
27 KMD_MSG_FIT_RDY
28};
29
30#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
new file mode 100644
index 000000000000..b680052ee3f0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -0,0 +1,47 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef INCLUDE_MMU_GENERAL_H_
9#define INCLUDE_MMU_GENERAL_H_
10
11#define PAGE_SHIFT_4KB 12
12#define PAGE_SHIFT_2MB 21
13#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
14#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
15#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
16
17#define PAGE_PRESENT_MASK 0x0000000000001
18#define SWAP_OUT_MASK 0x0000000000004
19#define LAST_MASK 0x0000000000800
20#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull
21#define HOP0_MASK 0x3000000000000ull
22#define HOP1_MASK 0x0FF8000000000ull
23#define HOP2_MASK 0x0007FC0000000ull
24#define HOP3_MASK 0x000003FE00000
25#define HOP4_MASK 0x00000001FF000
26#define OFFSET_MASK 0x0000000000FFF
27
28#define HOP0_SHIFT 48
29#define HOP1_SHIFT 39
30#define HOP2_SHIFT 30
31#define HOP3_SHIFT 21
32#define HOP4_SHIFT 12
33
34#define PTE_PHYS_ADDR_SHIFT 12
35#define PTE_PHYS_ADDR_MASK ~0xFFF
36
37#define HL_PTE_SIZE sizeof(u64)
38#define HOP_TABLE_SIZE PAGE_SIZE_4KB
39#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE)
40#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID)
41
42#define MMU_HOP0_PA43_12_SHIFT 12
43#define MMU_HOP0_PA49_44_SHIFT (12 + 32)
44
45#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */
46
47#endif /* INCLUDE_MMU_GENERAL_H_ */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
new file mode 100644
index 000000000000..8539dd041f2c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
@@ -0,0 +1,15 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef INCLUDE_MMU_V1_0_H_
9#define INCLUDE_MMU_V1_0_H_
10
11#define MMU_HOP0_PA43_12 0x490004
12#define MMU_HOP0_PA49_44 0x490008
13#define MMU_ASID_BUSY 0x490000
14
15#endif /* INCLUDE_MMU_V1_0_H_ */
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
new file mode 100644
index 000000000000..bf59bbe27fdc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -0,0 +1,56 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef QMAN_IF_H
9#define QMAN_IF_H
10
11#include <linux/types.h>
12
13/*
14 * PRIMARY QUEUE
15 */
16
17struct hl_bd {
18 __le64 ptr;
19 __le32 len;
20 __le32 ctl;
21};
22
23#define HL_BD_SIZE sizeof(struct hl_bd)
24
25/*
26 * BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
27 * valid. 1 means the repeat field is valid, 0 means not-valid,
28 * i.e. repeat == 1
29 */
30#define BD_CTL_REPEAT_VALID_SHIFT 24
31#define BD_CTL_REPEAT_VALID_MASK 0x01000000
32
33#define BD_CTL_SHADOW_INDEX_SHIFT 0
34#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
35
36/*
37 * COMPLETION QUEUE
38 */
39
40struct hl_cq_entry {
41 __le32 data;
42};
43
44#define HL_CQ_ENTRY_SIZE sizeof(struct hl_cq_entry)
45
46#define CQ_ENTRY_READY_SHIFT 31
47#define CQ_ENTRY_READY_MASK 0x80000000
48
49#define CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT 30
50#define CQ_ENTRY_SHADOW_INDEX_VALID_MASK 0x40000000
51
52#define CQ_ENTRY_SHADOW_INDEX_SHIFT BD_CTL_SHADOW_INDEX_SHIFT
53#define CQ_ENTRY_SHADOW_INDEX_MASK BD_CTL_SHADOW_INDEX_MASK
54
55
56#endif /* QMAN_IF_H */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
new file mode 100644
index 000000000000..e69a09c10e3f
--- /dev/null
+++ b/drivers/misc/habanalabs/irq.c
@@ -0,0 +1,327 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12/**
13 * This structure is used to schedule work of EQ entry and armcp_reset event
14 *
15 * @eq_work - workqueue object to run when EQ entry is received
16 * @hdev - pointer to device structure
17 * @eq_entry - copy of the EQ entry
18 */
19struct hl_eqe_work {
20 struct work_struct eq_work;
21 struct hl_device *hdev;
22 struct hl_eq_entry eq_entry;
23};
24
25/*
26 * hl_cq_inc_ptr - increment ci or pi of cq
27 *
28 * @ptr: the current ci or pi value of the completion queue
29 *
30 * Increment ptr by 1. If it reaches the number of completion queue
31 * entries, set it to 0
32 */
33inline u32 hl_cq_inc_ptr(u32 ptr)
34{
35 ptr++;
36 if (unlikely(ptr == HL_CQ_LENGTH))
37 ptr = 0;
38 return ptr;
39}
40
41/*
42 * hl_eq_inc_ptr - increment ci of eq
43 *
44 * @ptr: the current ci value of the event queue
45 *
46 * Increment ptr by 1. If it reaches the number of event queue
47 * entries, set it to 0
48 */
49inline u32 hl_eq_inc_ptr(u32 ptr)
50{
51 ptr++;
52 if (unlikely(ptr == HL_EQ_LENGTH))
53 ptr = 0;
54 return ptr;
55}
56
57static void irq_handle_eqe(struct work_struct *work)
58{
59 struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
60 eq_work);
61 struct hl_device *hdev = eqe_work->hdev;
62
63 hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
64
65 kfree(eqe_work);
66}
67
68/*
69 * hl_irq_handler_cq - irq handler for completion queue
70 *
71 * @irq: irq number
72 * @arg: pointer to completion queue structure
73 *
74 */
75irqreturn_t hl_irq_handler_cq(int irq, void *arg)
76{
77 struct hl_cq *cq = arg;
78 struct hl_device *hdev = cq->hdev;
79 struct hl_hw_queue *queue;
80 struct hl_cs_job *job;
81 bool shadow_index_valid;
82 u16 shadow_index;
83 u32 *cq_entry;
84 u32 *cq_base;
85
86 if (hdev->disabled) {
87 dev_dbg(hdev->dev,
88 "Device disabled but received IRQ %d for CQ %d\n",
89 irq, cq->hw_queue_id);
90 return IRQ_HANDLED;
91 }
92
93 cq_base = (u32 *) (uintptr_t) cq->kernel_address;
94
95 while (1) {
96 bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
97 >> CQ_ENTRY_READY_SHIFT);
98
99 if (!entry_ready)
100 break;
101
102 cq_entry = (u32 *) &cq_base[cq->ci];
103
104 /*
105 * Make sure we read CQ entry contents after we've
106 * checked the ownership bit.
107 */
108 dma_rmb();
109
110 shadow_index_valid =
111 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
113
114 shadow_index = (u16)
115 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
117
118 queue = &hdev->kernel_queues[cq->hw_queue_id];
119
120 if ((shadow_index_valid) && (!hdev->disabled)) {
121 job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
122 queue_work(hdev->cq_wq, &job->finish_work);
123 }
124
125 /*
126 * Update ci of the context's queue. There is no
127 * need to protect it with spinlock because this update is
128 * done only inside IRQ and there is a different IRQ per
129 * queue
130 */
131 queue->ci = hl_queue_inc_ptr(queue->ci);
132
133 /* Clear CQ entry ready bit */
134 cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
135
136 cq->ci = hl_cq_inc_ptr(cq->ci);
137
138 /* Increment free slots */
139 atomic_inc(&cq->free_slots_cnt);
140 }
141
142 return IRQ_HANDLED;
143}
144
145/*
146 * hl_irq_handler_eq - irq handler for event queue
147 *
148 * @irq: irq number
149 * @arg: pointer to event queue structure
150 *
151 */
152irqreturn_t hl_irq_handler_eq(int irq, void *arg)
153{
154 struct hl_eq *eq = arg;
155 struct hl_device *hdev = eq->hdev;
156 struct hl_eq_entry *eq_entry;
157 struct hl_eq_entry *eq_base;
158 struct hl_eqe_work *handle_eqe_work;
159
160 eq_base = (struct hl_eq_entry *) (uintptr_t) eq->kernel_address;
161
162 while (1) {
163 bool entry_ready =
164 ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
165 EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
166
167 if (!entry_ready)
168 break;
169
170 eq_entry = &eq_base[eq->ci];
171
172 /*
173 * Make sure we read EQ entry contents after we've
174 * checked the ownership bit.
175 */
176 dma_rmb();
177
178 if (hdev->disabled) {
179 dev_warn(hdev->dev,
180 "Device disabled but received IRQ %d for EQ\n",
181 irq);
182 goto skip_irq;
183 }
184
185 handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
186 if (handle_eqe_work) {
187 INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
188 handle_eqe_work->hdev = hdev;
189
190 memcpy(&handle_eqe_work->eq_entry, eq_entry,
191 sizeof(*eq_entry));
192
193 queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
194 }
195skip_irq:
196 /* Clear EQ entry ready bit */
197 eq_entry->hdr.ctl =
198 __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) &
199 ~EQ_CTL_READY_MASK);
200
201 eq->ci = hl_eq_inc_ptr(eq->ci);
202
203 hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
204 }
205
206 return IRQ_HANDLED;
207}
208
209/*
210 * hl_cq_init - main initialization function for an cq object
211 *
212 * @hdev: pointer to device structure
213 * @q: pointer to cq structure
214 * @hw_queue_id: The H/W queue ID this completion queue belongs to
215 *
216 * Allocate dma-able memory for the completion queue and initialize fields
217 * Returns 0 on success
218 */
219int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
220{
221 void *p;
222
223 BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
224
225 p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
226 &q->bus_address, GFP_KERNEL | __GFP_ZERO);
227 if (!p)
228 return -ENOMEM;
229
230 q->hdev = hdev;
231 q->kernel_address = (u64) (uintptr_t) p;
232 q->hw_queue_id = hw_queue_id;
233 q->ci = 0;
234 q->pi = 0;
235
236 atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
237
238 return 0;
239}
240
241/*
242 * hl_cq_fini - destroy completion queue
243 *
244 * @hdev: pointer to device structure
245 * @q: pointer to cq structure
246 *
247 * Free the completion queue memory
248 */
249void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
250{
251 hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
252 (void *) (uintptr_t) q->kernel_address, q->bus_address);
253}
254
255void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
256{
257 q->ci = 0;
258 q->pi = 0;
259
260 atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
261
262 /*
263 * It's not enough to just reset the PI/CI because the H/W may have
264 * written valid completion entries before it was halted and therefore
265 * we need to clean the actual queues so we won't process old entries
266 * when the device is operational again
267 */
268
269 memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
270}
271
272/*
273 * hl_eq_init - main initialization function for an event queue object
274 *
275 * @hdev: pointer to device structure
276 * @q: pointer to eq structure
277 *
278 * Allocate dma-able memory for the event queue and initialize fields
279 * Returns 0 on success
280 */
281int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
282{
283 void *p;
284
285 BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
286
287 p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
288 &q->bus_address, GFP_KERNEL | __GFP_ZERO);
289 if (!p)
290 return -ENOMEM;
291
292 q->hdev = hdev;
293 q->kernel_address = (u64) (uintptr_t) p;
294 q->ci = 0;
295
296 return 0;
297}
298
299/*
300 * hl_eq_fini - destroy event queue
301 *
302 * @hdev: pointer to device structure
303 * @q: pointer to eq structure
304 *
305 * Free the event queue memory
306 */
307void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
308{
309 flush_workqueue(hdev->eq_wq);
310
311 hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
312 (void *) (uintptr_t) q->kernel_address, q->bus_address);
313}
314
315void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
316{
317 q->ci = 0;
318
319 /*
320 * It's not enough to just reset the PI/CI because the H/W may have
321 * written valid completion entries before it was halted and therefore
322 * we need to clean the actual queues so we won't process old entries
323 * when the device is operational again
324 */
325
326 memset((void *) (uintptr_t) q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
327}
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
new file mode 100644
index 000000000000..3a12fd1a5274
--- /dev/null
+++ b/drivers/misc/habanalabs/memory.c
@@ -0,0 +1,1723 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10#include "include/hw_ip/mmu/mmu_general.h"
11
12#include <linux/uaccess.h>
13#include <linux/slab.h>
14#include <linux/genalloc.h>
15
16#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
17#define HL_MMU_DEBUG 0
18
19/*
20 * The va ranges in context object contain a list with the available chunks of
21 * device virtual memory.
22 * There is one range for host allocations and one for DRAM allocations.
23 *
24 * On initialization each range contains one chunk of all of its available
25 * virtual range which is a half of the total device virtual range.
26 *
27 * On each mapping of physical pages, a suitable virtual range chunk (with a
28 * minimum size) is selected from the list. If the chunk size equals the
29 * requested size, the chunk is returned. Otherwise, the chunk is split into
30 * two chunks - one to return as result and a remainder to stay in the list.
31 *
32 * On each Unmapping of a virtual address, the relevant virtual chunk is
33 * returned to the list. The chunk is added to the list and if its edges match
34 * the edges of the adjacent chunks (means a contiguous chunk can be created),
35 * the chunks are merged.
36 *
37 * On finish, the list is checked to have only one chunk of all the relevant
38 * virtual range (which is a half of the device total virtual range).
39 * If not (means not all mappings were unmapped), a warning is printed.
40 */
41
42/*
43 * alloc_device_memory - allocate device memory
44 *
45 * @ctx : current context
46 * @args : host parameters containing the requested size
47 * @ret_handle : result handle
48 *
49 * This function does the following:
50 * - Allocate the requested size rounded up to 2MB pages
51 * - Return unique handle
52 */
53static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
54 u32 *ret_handle)
55{
56 struct hl_device *hdev = ctx->hdev;
57 struct hl_vm *vm = &hdev->vm;
58 struct hl_vm_phys_pg_pack *phys_pg_pack;
59 u64 paddr = 0;
60 u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
61 int handle, rc, i;
62 bool contiguous;
63
64 num_curr_pgs = 0;
65 page_size = hdev->asic_prop.dram_page_size;
66 page_shift = __ffs(page_size);
67 num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
68 total_size = num_pgs << page_shift;
69
70 contiguous = args->flags & HL_MEM_CONTIGUOUS;
71
72 if (contiguous) {
73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
74 if (!paddr) {
75 dev_err(hdev->dev,
76 "failed to allocate %u huge contiguous pages\n",
77 num_pgs);
78 return -ENOMEM;
79 }
80 }
81
82 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
83 if (!phys_pg_pack) {
84 rc = -ENOMEM;
85 goto pages_pack_err;
86 }
87
88 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
89 phys_pg_pack->asid = ctx->asid;
90 phys_pg_pack->npages = num_pgs;
91 phys_pg_pack->page_size = page_size;
92 phys_pg_pack->total_size = total_size;
93 phys_pg_pack->flags = args->flags;
94 phys_pg_pack->contiguous = contiguous;
95
96 phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
97 if (!phys_pg_pack->pages) {
98 rc = -ENOMEM;
99 goto pages_arr_err;
100 }
101
102 if (phys_pg_pack->contiguous) {
103 for (i = 0 ; i < num_pgs ; i++)
104 phys_pg_pack->pages[i] = paddr + i * page_size;
105 } else {
106 for (i = 0 ; i < num_pgs ; i++) {
107 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
108 vm->dram_pg_pool,
109 page_size);
110 if (!phys_pg_pack->pages[i]) {
111 dev_err(hdev->dev,
112 "ioctl failed to allocate page\n");
113 rc = -ENOMEM;
114 goto page_err;
115 }
116
117 num_curr_pgs++;
118 }
119 }
120
121 spin_lock(&vm->idr_lock);
122 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
123 GFP_ATOMIC);
124 spin_unlock(&vm->idr_lock);
125
126 if (handle < 0) {
127 dev_err(hdev->dev, "Failed to get handle for page\n");
128 rc = -EFAULT;
129 goto idr_err;
130 }
131
132 for (i = 0 ; i < num_pgs ; i++)
133 kref_get(&vm->dram_pg_pool_refcount);
134
135 phys_pg_pack->handle = handle;
136
137 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
138 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
139
140 *ret_handle = handle;
141
142 return 0;
143
144idr_err:
145page_err:
146 if (!phys_pg_pack->contiguous)
147 for (i = 0 ; i < num_curr_pgs ; i++)
148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
149 page_size);
150
151 kfree(phys_pg_pack->pages);
152pages_arr_err:
153 kfree(phys_pg_pack);
154pages_pack_err:
155 if (contiguous)
156 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
157
158 return rc;
159}
160
161/*
162 * get_userptr_from_host_va - initialize userptr structure from given host
163 * virtual address
164 *
165 * @hdev : habanalabs device structure
166 * @args : parameters containing the virtual address and size
167 * @p_userptr : pointer to result userptr structure
168 *
169 * This function does the following:
170 * - Allocate userptr structure
171 * - Pin the given host memory using the userptr structure
172 * - Perform DMA mapping to have the DMA addresses of the pages
173 */
174static int get_userptr_from_host_va(struct hl_device *hdev,
175 struct hl_mem_in *args, struct hl_userptr **p_userptr)
176{
177 struct hl_userptr *userptr;
178 int rc;
179
180 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
181 if (!userptr) {
182 rc = -ENOMEM;
183 goto userptr_err;
184 }
185
186 rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
187 args->map_host.mem_size, userptr);
188 if (rc) {
189 dev_err(hdev->dev, "Failed to pin host memory\n");
190 goto pin_err;
191 }
192
193 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
194 userptr->sgt->nents, DMA_BIDIRECTIONAL);
195 if (rc) {
196 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
197 goto dma_map_err;
198 }
199
200 userptr->dma_mapped = true;
201 userptr->dir = DMA_BIDIRECTIONAL;
202 userptr->vm_type = VM_TYPE_USERPTR;
203
204 *p_userptr = userptr;
205
206 return 0;
207
208dma_map_err:
209 hl_unpin_host_memory(hdev, userptr);
210pin_err:
211 kfree(userptr);
212userptr_err:
213
214 return rc;
215}
216
217/*
218 * free_userptr - free userptr structure
219 *
220 * @hdev : habanalabs device structure
221 * @userptr : userptr to free
222 *
223 * This function does the following:
224 * - Unpins the physical pages
225 * - Frees the userptr structure
226 */
227static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
228{
229 hl_unpin_host_memory(hdev, userptr);
230 kfree(userptr);
231}
232
233/*
234 * dram_pg_pool_do_release - free DRAM pages pool
235 *
236 * @ref : pointer to reference object
237 *
238 * This function does the following:
239 * - Frees the idr structure of physical pages handles
240 * - Frees the generic pool of DRAM physical pages
241 */
242static void dram_pg_pool_do_release(struct kref *ref)
243{
244 struct hl_vm *vm = container_of(ref, struct hl_vm,
245 dram_pg_pool_refcount);
246
247 /*
248 * free the idr here as only here we know for sure that there are no
249 * allocated physical pages and hence there are no handles in use
250 */
251 idr_destroy(&vm->phys_pg_pack_handles);
252 gen_pool_destroy(vm->dram_pg_pool);
253}
254
255/*
256 * free_phys_pg_pack - free physical page pack
257 *
258 * @hdev : habanalabs device structure
259 * @phys_pg_pack : physical page pack to free
260 *
261 * This function does the following:
262 * - For DRAM memory only, iterate over the pack and free each physical block
263 * structure by returning it to the general pool
264 * - Free the hl_vm_phys_pg_pack structure
265 */
266static void free_phys_pg_pack(struct hl_device *hdev,
267 struct hl_vm_phys_pg_pack *phys_pg_pack)
268{
269 struct hl_vm *vm = &hdev->vm;
270 int i;
271
272 if (!phys_pg_pack->created_from_userptr) {
273 if (phys_pg_pack->contiguous) {
274 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
275 phys_pg_pack->total_size);
276
277 for (i = 0; i < phys_pg_pack->npages ; i++)
278 kref_put(&vm->dram_pg_pool_refcount,
279 dram_pg_pool_do_release);
280 } else {
281 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
282 gen_pool_free(vm->dram_pg_pool,
283 phys_pg_pack->pages[i],
284 phys_pg_pack->page_size);
285 kref_put(&vm->dram_pg_pool_refcount,
286 dram_pg_pool_do_release);
287 }
288 }
289 }
290
291 kfree(phys_pg_pack->pages);
292 kfree(phys_pg_pack);
293}
294
295/*
296 * free_device_memory - free device memory
297 *
298 * @ctx : current context
299 * @handle : handle of the memory chunk to free
300 *
301 * This function does the following:
302 * - Free the device memory related to the given handle
303 */
304static int free_device_memory(struct hl_ctx *ctx, u32 handle)
305{
306 struct hl_device *hdev = ctx->hdev;
307 struct hl_vm *vm = &hdev->vm;
308 struct hl_vm_phys_pg_pack *phys_pg_pack;
309
310 spin_lock(&vm->idr_lock);
311 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
312 if (phys_pg_pack) {
313 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
314 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
315 handle);
316 spin_unlock(&vm->idr_lock);
317 return -EINVAL;
318 }
319
320 /*
321 * must remove from idr before the freeing of the physical
322 * pages as the refcount of the pool is also the trigger of the
323 * idr destroy
324 */
325 idr_remove(&vm->phys_pg_pack_handles, handle);
326 spin_unlock(&vm->idr_lock);
327
328 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
329 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
330
331 free_phys_pg_pack(hdev, phys_pg_pack);
332 } else {
333 spin_unlock(&vm->idr_lock);
334 dev_err(hdev->dev,
335 "free device memory failed, no match for handle %u\n",
336 handle);
337 return -EINVAL;
338 }
339
340 return 0;
341}
342
343/*
344 * clear_va_list_locked - free virtual addresses list
345 *
346 * @hdev : habanalabs device structure
347 * @va_list : list of virtual addresses to free
348 *
349 * This function does the following:
350 * - Iterate over the list and free each virtual addresses block
351 *
352 * This function should be called only when va_list lock is taken
353 */
354static void clear_va_list_locked(struct hl_device *hdev,
355 struct list_head *va_list)
356{
357 struct hl_vm_va_block *va_block, *tmp;
358
359 list_for_each_entry_safe(va_block, tmp, va_list, node) {
360 list_del(&va_block->node);
361 kfree(va_block);
362 }
363}
364
365/*
366 * print_va_list_locked - print virtual addresses list
367 *
368 * @hdev : habanalabs device structure
369 * @va_list : list of virtual addresses to print
370 *
371 * This function does the following:
372 * - Iterate over the list and print each virtual addresses block
373 *
374 * This function should be called only when va_list lock is taken
375 */
376static void print_va_list_locked(struct hl_device *hdev,
377 struct list_head *va_list)
378{
379#if HL_MMU_DEBUG
380 struct hl_vm_va_block *va_block;
381
382 dev_dbg(hdev->dev, "print va list:\n");
383
384 list_for_each_entry(va_block, va_list, node)
385 dev_dbg(hdev->dev,
386 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
387 va_block->start, va_block->end, va_block->size);
388#endif
389}
390
391/*
392 * merge_va_blocks_locked - merge a virtual block if possible
393 *
394 * @hdev : pointer to the habanalabs device structure
395 * @va_list : pointer to the virtual addresses block list
396 * @va_block : virtual block to merge with adjacent blocks
397 *
398 * This function does the following:
399 * - Merge the given blocks with the adjacent blocks if their virtual ranges
400 * create a contiguous virtual range
401 *
402 * This Function should be called only when va_list lock is taken
403 */
404static void merge_va_blocks_locked(struct hl_device *hdev,
405 struct list_head *va_list, struct hl_vm_va_block *va_block)
406{
407 struct hl_vm_va_block *prev, *next;
408
409 prev = list_prev_entry(va_block, node);
410 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
411 prev->end = va_block->end;
412 prev->size = prev->end - prev->start;
413 list_del(&va_block->node);
414 kfree(va_block);
415 va_block = prev;
416 }
417
418 next = list_next_entry(va_block, node);
419 if (&next->node != va_list && va_block->end + 1 == next->start) {
420 next->start = va_block->start;
421 next->size = next->end - next->start;
422 list_del(&va_block->node);
423 kfree(va_block);
424 }
425}
426
427/*
428 * add_va_block_locked - add a virtual block to the virtual addresses list
429 *
430 * @hdev : pointer to the habanalabs device structure
431 * @va_list : pointer to the virtual addresses block list
432 * @start : start virtual address
433 * @end : end virtual address
434 *
435 * This function does the following:
436 * - Add the given block to the virtual blocks list and merge with other
437 * blocks if a contiguous virtual block can be created
438 *
439 * This Function should be called only when va_list lock is taken
440 */
441static int add_va_block_locked(struct hl_device *hdev,
442 struct list_head *va_list, u64 start, u64 end)
443{
444 struct hl_vm_va_block *va_block, *res = NULL;
445 u64 size = end - start;
446
447 print_va_list_locked(hdev, va_list);
448
449 list_for_each_entry(va_block, va_list, node) {
450 /* TODO: remove upon matureness */
451 if (hl_mem_area_crosses_range(start, size, va_block->start,
452 va_block->end)) {
453 dev_err(hdev->dev,
454 "block crossing ranges at start 0x%llx, end 0x%llx\n",
455 va_block->start, va_block->end);
456 return -EINVAL;
457 }
458
459 if (va_block->end < start)
460 res = va_block;
461 }
462
463 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
464 if (!va_block)
465 return -ENOMEM;
466
467 va_block->start = start;
468 va_block->end = end;
469 va_block->size = size;
470
471 if (!res)
472 list_add(&va_block->node, va_list);
473 else
474 list_add(&va_block->node, &res->node);
475
476 merge_va_blocks_locked(hdev, va_list, va_block);
477
478 print_va_list_locked(hdev, va_list);
479
480 return 0;
481}
482
483/*
484 * add_va_block - wrapper for add_va_block_locked
485 *
486 * @hdev : pointer to the habanalabs device structure
487 * @va_list : pointer to the virtual addresses block list
488 * @start : start virtual address
489 * @end : end virtual address
490 *
491 * This function does the following:
492 * - Takes the list lock and calls add_va_block_locked
493 */
494static inline int add_va_block(struct hl_device *hdev,
495 struct hl_va_range *va_range, u64 start, u64 end)
496{
497 int rc;
498
499 mutex_lock(&va_range->lock);
500 rc = add_va_block_locked(hdev, &va_range->list, start, end);
501 mutex_unlock(&va_range->lock);
502
503 return rc;
504}
505
506/*
507 * get_va_block - get a virtual block with the requested size
508 *
509 * @hdev : pointer to the habanalabs device structure
510 * @va_range : pointer to the virtual addresses range
511 * @size : requested block size
512 * @hint_addr : hint for request address by the user
513 * @is_userptr : is host or DRAM memory
514 *
515 * This function does the following:
516 * - Iterate on the virtual block list to find a suitable virtual block for the
517 * requested size
518 * - Reserve the requested block and update the list
519 * - Return the start address of the virtual block
520 */
521static u64 get_va_block(struct hl_device *hdev,
522 struct hl_va_range *va_range, u32 size, u64 hint_addr,
523 bool is_userptr)
524{
525 struct hl_vm_va_block *va_block, *new_va_block = NULL;
526 u64 valid_start, valid_size, prev_start, prev_end, page_mask,
527 res_valid_start = 0, res_valid_size = 0;
528 u32 page_size;
529 bool add_prev = false;
530
531 if (is_userptr) {
532 /*
533 * We cannot know if the user allocated memory with huge pages
534 * or not, hence we continue with the biggest possible
535 * granularity.
536 */
537 page_size = PAGE_SIZE_2MB;
538 page_mask = PAGE_MASK_2MB;
539 } else {
540 page_size = hdev->asic_prop.dram_page_size;
541 page_mask = ~((u64)page_size - 1);
542 }
543
544 mutex_lock(&va_range->lock);
545
546 print_va_list_locked(hdev, &va_range->list);
547
548 list_for_each_entry(va_block, &va_range->list, node) {
549 /* calc the first possible aligned addr */
550 valid_start = va_block->start;
551
552
553 if (valid_start & (page_size - 1)) {
554 valid_start &= page_mask;
555 valid_start += page_size;
556 if (valid_start > va_block->end)
557 continue;
558 }
559
560 valid_size = va_block->end - valid_start;
561
562 if (valid_size >= size &&
563 (!new_va_block || valid_size < res_valid_size)) {
564
565 new_va_block = va_block;
566 res_valid_start = valid_start;
567 res_valid_size = valid_size;
568 }
569
570 if (hint_addr && hint_addr >= valid_start &&
571 ((hint_addr + size) <= va_block->end)) {
572 new_va_block = va_block;
573 res_valid_start = hint_addr;
574 res_valid_size = valid_size;
575 break;
576 }
577 }
578
579 if (!new_va_block) {
580 dev_err(hdev->dev, "no available va block for size %u\n", size);
581 goto out;
582 }
583
584 if (res_valid_start > new_va_block->start) {
585 prev_start = new_va_block->start;
586 prev_end = res_valid_start - 1;
587
588 new_va_block->start = res_valid_start;
589 new_va_block->size = res_valid_size;
590
591 add_prev = true;
592 }
593
594 if (new_va_block->size > size) {
595 new_va_block->start += size;
596 new_va_block->size = new_va_block->end - new_va_block->start;
597 } else {
598 list_del(&new_va_block->node);
599 kfree(new_va_block);
600 }
601
602 if (add_prev)
603 add_va_block_locked(hdev, &va_range->list, prev_start,
604 prev_end);
605
606 print_va_list_locked(hdev, &va_range->list);
607out:
608 mutex_unlock(&va_range->lock);
609
610 return res_valid_start;
611}
612
613/*
614 * get_sg_info - get number of pages and the DMA address from SG list
615 *
616 * @sg : the SG list
617 * @dma_addr : pointer to DMA address to return
618 *
619 * Calculate the number of consecutive pages described by the SG list. Take the
620 * offset of the address in the first page, add to it the length and round it up
621 * to the number of needed pages.
622 */
623static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
624{
625 *dma_addr = sg_dma_address(sg);
626
627 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
628 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
629}
630
631/*
632 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
633 * memory
634 *
635 * @ctx : current context
636 * @userptr : userptr to initialize from
637 * @pphys_pg_pack : res pointer
638 *
639 * This function does the following:
640 * - Pin the physical pages related to the given virtual block
641 * - Create a physical page pack from the physical pages related to the given
642 * virtual block
643 */
644static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
645 struct hl_userptr *userptr,
646 struct hl_vm_phys_pg_pack **pphys_pg_pack)
647{
648 struct hl_vm_phys_pg_pack *phys_pg_pack;
649 struct scatterlist *sg;
650 dma_addr_t dma_addr;
651 u64 page_mask;
652 u32 npages, total_npages, page_size = PAGE_SIZE;
653 bool first = true, is_huge_page_opt = true;
654 int rc, i, j;
655
656 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
657 if (!phys_pg_pack)
658 return -ENOMEM;
659
660 phys_pg_pack->vm_type = userptr->vm_type;
661 phys_pg_pack->created_from_userptr = true;
662 phys_pg_pack->asid = ctx->asid;
663 atomic_set(&phys_pg_pack->mapping_cnt, 1);
664
665 /* Only if all dma_addrs are aligned to 2MB and their
666 * sizes is at least 2MB, we can use huge page mapping.
667 * We limit the 2MB optimization to this condition,
668 * since later on we acquire the related VA range as one
669 * consecutive block.
670 */
671 total_npages = 0;
672 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
673 npages = get_sg_info(sg, &dma_addr);
674
675 total_npages += npages;
676
677 if (first) {
678 first = false;
679 dma_addr &= PAGE_MASK_2MB;
680 }
681
682 if ((npages % PGS_IN_2MB_PAGE) ||
683 (dma_addr & (PAGE_SIZE_2MB - 1)))
684 is_huge_page_opt = false;
685 }
686
687 if (is_huge_page_opt) {
688 page_size = PAGE_SIZE_2MB;
689 total_npages /= PGS_IN_2MB_PAGE;
690 }
691
692 page_mask = ~(((u64) page_size) - 1);
693
694 phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
695 if (!phys_pg_pack->pages) {
696 rc = -ENOMEM;
697 goto page_pack_arr_mem_err;
698 }
699
700 phys_pg_pack->npages = total_npages;
701 phys_pg_pack->page_size = page_size;
702 phys_pg_pack->total_size = total_npages * page_size;
703
704 j = 0;
705 first = true;
706 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
707 npages = get_sg_info(sg, &dma_addr);
708
709 /* align down to physical page size and save the offset */
710 if (first) {
711 first = false;
712 phys_pg_pack->offset = dma_addr & (page_size - 1);
713 dma_addr &= page_mask;
714 }
715
716 while (npages) {
717 phys_pg_pack->pages[j++] = dma_addr;
718 dma_addr += page_size;
719
720 if (is_huge_page_opt)
721 npages -= PGS_IN_2MB_PAGE;
722 else
723 npages--;
724 }
725 }
726
727 *pphys_pg_pack = phys_pg_pack;
728
729 return 0;
730
731page_pack_arr_mem_err:
732 kfree(phys_pg_pack);
733
734 return rc;
735}
736
737/*
738 * map_phys_page_pack - maps the physical page pack
739 *
740 * @ctx : current context
741 * @vaddr : start address of the virtual area to map from
742 * @phys_pg_pack : the pack of physical pages to map to
743 *
744 * This function does the following:
745 * - Maps each chunk of virtual memory to matching physical chunk
746 * - Stores number of successful mappings in the given argument
747 * - Returns 0 on success, error code otherwise.
748 */
749static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
750 struct hl_vm_phys_pg_pack *phys_pg_pack)
751{
752 struct hl_device *hdev = ctx->hdev;
753 u64 next_vaddr = vaddr, paddr;
754 u32 page_size = phys_pg_pack->page_size;
755 int i, rc = 0, mapped_pg_cnt = 0;
756
757 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
758 paddr = phys_pg_pack->pages[i];
759
760 /* For accessing the host we need to turn on bit 39 */
761 if (phys_pg_pack->created_from_userptr)
762 paddr += hdev->asic_prop.host_phys_base_address;
763
764 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
765 if (rc) {
766 dev_err(hdev->dev,
767 "map failed for handle %u, npages: %d, mapped: %d",
768 phys_pg_pack->handle, phys_pg_pack->npages,
769 mapped_pg_cnt);
770 goto err;
771 }
772
773 mapped_pg_cnt++;
774 next_vaddr += page_size;
775 }
776
777 return 0;
778
779err:
780 next_vaddr = vaddr;
781 for (i = 0 ; i < mapped_pg_cnt ; i++) {
782 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
783 dev_warn_ratelimited(hdev->dev,
784 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
785 phys_pg_pack->handle, next_vaddr,
786 phys_pg_pack->pages[i], page_size);
787
788 next_vaddr += page_size;
789 }
790
791 return rc;
792}
793
794static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
795 u64 *paddr)
796{
797 struct hl_device *hdev = ctx->hdev;
798 struct hl_vm *vm = &hdev->vm;
799 struct hl_vm_phys_pg_pack *phys_pg_pack;
800 u32 handle;
801
802 handle = lower_32_bits(args->map_device.handle);
803 spin_lock(&vm->idr_lock);
804 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
805 if (!phys_pg_pack) {
806 spin_unlock(&vm->idr_lock);
807 dev_err(hdev->dev, "no match for handle %u\n", handle);
808 return -EINVAL;
809 }
810
811 *paddr = phys_pg_pack->pages[0];
812
813 spin_unlock(&vm->idr_lock);
814
815 return 0;
816}
817
818/*
819 * map_device_va - map the given memory
820 *
821 * @ctx : current context
822 * @args : host parameters with handle/host virtual address
823 * @device_addr : pointer to result device virtual address
824 *
825 * This function does the following:
826 * - If given a physical device memory handle, map to a device virtual block
827 * and return the start address of this block
828 * - If given a host virtual address and size, find the related physical pages,
829 * map a device virtual block to this pages and return the start address of
830 * this block
831 */
832static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
833 u64 *device_addr)
834{
835 struct hl_device *hdev = ctx->hdev;
836 struct hl_vm *vm = &hdev->vm;
837 struct hl_vm_phys_pg_pack *phys_pg_pack;
838 struct hl_userptr *userptr = NULL;
839 struct hl_vm_hash_node *hnode;
840 enum vm_type_t *vm_type;
841 u64 ret_vaddr, hint_addr;
842 u32 handle = 0;
843 int rc;
844 bool is_userptr = args->flags & HL_MEM_USERPTR;
845
846 /* Assume failure */
847 *device_addr = 0;
848
849 if (is_userptr) {
850 rc = get_userptr_from_host_va(hdev, args, &userptr);
851 if (rc) {
852 dev_err(hdev->dev, "failed to get userptr from va\n");
853 return rc;
854 }
855
856 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
857 &phys_pg_pack);
858 if (rc) {
859 dev_err(hdev->dev,
860 "unable to init page pack for vaddr 0x%llx\n",
861 args->map_host.host_virt_addr);
862 goto init_page_pack_err;
863 }
864
865 vm_type = (enum vm_type_t *) userptr;
866 hint_addr = args->map_host.hint_addr;
867 } else {
868 handle = lower_32_bits(args->map_device.handle);
869
870 spin_lock(&vm->idr_lock);
871 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
872 if (!phys_pg_pack) {
873 spin_unlock(&vm->idr_lock);
874 dev_err(hdev->dev,
875 "no match for handle %u\n", handle);
876 return -EINVAL;
877 }
878
879 /* increment now to avoid freeing device memory while mapping */
880 atomic_inc(&phys_pg_pack->mapping_cnt);
881
882 spin_unlock(&vm->idr_lock);
883
884 vm_type = (enum vm_type_t *) phys_pg_pack;
885
886 hint_addr = args->map_device.hint_addr;
887 }
888
889 /*
890 * relevant for mapping device physical memory only, as host memory is
891 * implicitly shared
892 */
893 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
894 phys_pg_pack->asid != ctx->asid) {
895 dev_err(hdev->dev,
896 "Failed to map memory, handle %u is not shared\n",
897 handle);
898 rc = -EPERM;
899 goto shared_err;
900 }
901
902 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
903 if (!hnode) {
904 rc = -ENOMEM;
905 goto hnode_err;
906 }
907
908 ret_vaddr = get_va_block(hdev,
909 is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
910 phys_pg_pack->total_size, hint_addr, is_userptr);
911 if (!ret_vaddr) {
912 dev_err(hdev->dev, "no available va block for handle %u\n",
913 handle);
914 rc = -ENOMEM;
915 goto va_block_err;
916 }
917
918 mutex_lock(&ctx->mmu_lock);
919
920 rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
921 if (rc) {
922 mutex_unlock(&ctx->mmu_lock);
923 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
924 handle);
925 goto map_err;
926 }
927
928 hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
929
930 mutex_unlock(&ctx->mmu_lock);
931
932 ret_vaddr += phys_pg_pack->offset;
933
934 hnode->ptr = vm_type;
935 hnode->vaddr = ret_vaddr;
936
937 mutex_lock(&ctx->mem_hash_lock);
938 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
939 mutex_unlock(&ctx->mem_hash_lock);
940
941 *device_addr = ret_vaddr;
942
943 if (is_userptr)
944 free_phys_pg_pack(hdev, phys_pg_pack);
945
946 return 0;
947
948map_err:
949 if (add_va_block(hdev,
950 is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
951 ret_vaddr,
952 ret_vaddr + phys_pg_pack->total_size - 1))
953 dev_warn(hdev->dev,
954 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
955 handle, ret_vaddr);
956
957va_block_err:
958 kfree(hnode);
959hnode_err:
960shared_err:
961 atomic_dec(&phys_pg_pack->mapping_cnt);
962 if (is_userptr)
963 free_phys_pg_pack(hdev, phys_pg_pack);
964init_page_pack_err:
965 if (is_userptr)
966 free_userptr(hdev, userptr);
967
968 return rc;
969}
970
971/*
972 * unmap_device_va - unmap the given device virtual address
973 *
974 * @ctx : current context
975 * @vaddr : device virtual address to unmap
976 *
977 * This function does the following:
978 * - Unmap the physical pages related to the given virtual address
979 * - return the device virtual block to the virtual block list
980 */
981static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
982{
983 struct hl_device *hdev = ctx->hdev;
984 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
985 struct hl_vm_hash_node *hnode = NULL;
986 struct hl_userptr *userptr = NULL;
987 enum vm_type_t *vm_type;
988 u64 next_vaddr;
989 u32 page_size;
990 bool is_userptr;
991 int i, rc;
992
993 /* protect from double entrance */
994 mutex_lock(&ctx->mem_hash_lock);
995 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
996 if (vaddr == hnode->vaddr)
997 break;
998
999 if (!hnode) {
1000 mutex_unlock(&ctx->mem_hash_lock);
1001 dev_err(hdev->dev,
1002 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1003 vaddr);
1004 return -EINVAL;
1005 }
1006
1007 hash_del(&hnode->node);
1008 mutex_unlock(&ctx->mem_hash_lock);
1009
1010 vm_type = hnode->ptr;
1011
1012 if (*vm_type == VM_TYPE_USERPTR) {
1013 is_userptr = true;
1014 userptr = hnode->ptr;
1015 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1016 &phys_pg_pack);
1017 if (rc) {
1018 dev_err(hdev->dev,
1019 "unable to init page pack for vaddr 0x%llx\n",
1020 vaddr);
1021 goto vm_type_err;
1022 }
1023 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1024 is_userptr = false;
1025 phys_pg_pack = hnode->ptr;
1026 } else {
1027 dev_warn(hdev->dev,
1028 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1029 vaddr);
1030 rc = -EFAULT;
1031 goto vm_type_err;
1032 }
1033
1034 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1035 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1036 rc = -EINVAL;
1037 goto mapping_cnt_err;
1038 }
1039
1040 page_size = phys_pg_pack->page_size;
1041 vaddr &= ~(((u64) page_size) - 1);
1042
1043 next_vaddr = vaddr;
1044
1045 mutex_lock(&ctx->mmu_lock);
1046
1047 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
1048 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
1049 dev_warn_ratelimited(hdev->dev,
1050 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1051
1052 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
1053
1054 mutex_unlock(&ctx->mmu_lock);
1055
1056 if (add_va_block(hdev,
1057 is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
1058 vaddr,
1059 vaddr + phys_pg_pack->total_size - 1))
1060 dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
1061 vaddr);
1062
1063 atomic_dec(&phys_pg_pack->mapping_cnt);
1064 kfree(hnode);
1065
1066 if (is_userptr) {
1067 free_phys_pg_pack(hdev, phys_pg_pack);
1068 free_userptr(hdev, userptr);
1069 }
1070
1071 return 0;
1072
1073mapping_cnt_err:
1074 if (is_userptr)
1075 free_phys_pg_pack(hdev, phys_pg_pack);
1076vm_type_err:
1077 mutex_lock(&ctx->mem_hash_lock);
1078 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1079 mutex_unlock(&ctx->mem_hash_lock);
1080
1081 return rc;
1082}
1083
1084int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1085{
1086 union hl_mem_args *args = data;
1087 struct hl_device *hdev = hpriv->hdev;
1088 struct hl_ctx *ctx = hpriv->ctx;
1089 u64 device_addr = 0;
1090 u32 handle = 0;
1091 int rc;
1092
1093 if (hl_device_disabled_or_in_reset(hdev)) {
1094 dev_warn_ratelimited(hdev->dev,
1095 "Device is disabled or in reset. Can't execute memory IOCTL\n");
1096 return -EBUSY;
1097 }
1098
1099 if (hdev->mmu_enable) {
1100 switch (args->in.op) {
1101 case HL_MEM_OP_ALLOC:
1102 if (!hdev->dram_supports_virtual_memory) {
1103 dev_err(hdev->dev,
1104 "DRAM alloc is not supported\n");
1105 rc = -EINVAL;
1106 goto out;
1107 }
1108 if (args->in.alloc.mem_size == 0) {
1109 dev_err(hdev->dev,
1110 "alloc size must be larger than 0\n");
1111 rc = -EINVAL;
1112 goto out;
1113 }
1114 rc = alloc_device_memory(ctx, &args->in, &handle);
1115
1116 memset(args, 0, sizeof(*args));
1117 args->out.handle = (__u64) handle;
1118 break;
1119
1120 case HL_MEM_OP_FREE:
1121 if (!hdev->dram_supports_virtual_memory) {
1122 dev_err(hdev->dev,
1123 "DRAM free is not supported\n");
1124 rc = -EINVAL;
1125 goto out;
1126 }
1127 rc = free_device_memory(ctx, args->in.free.handle);
1128 break;
1129
1130 case HL_MEM_OP_MAP:
1131 rc = map_device_va(ctx, &args->in, &device_addr);
1132
1133 memset(args, 0, sizeof(*args));
1134 args->out.device_virt_addr = device_addr;
1135 break;
1136
1137 case HL_MEM_OP_UNMAP:
1138 rc = unmap_device_va(ctx,
1139 args->in.unmap.device_virt_addr);
1140 break;
1141
1142 default:
1143 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1144 rc = -ENOTTY;
1145 break;
1146 }
1147 } else {
1148 switch (args->in.op) {
1149 case HL_MEM_OP_ALLOC:
1150 if (args->in.alloc.mem_size == 0) {
1151 dev_err(hdev->dev,
1152 "alloc size must be larger than 0\n");
1153 rc = -EINVAL;
1154 goto out;
1155 }
1156
1157 /* Force contiguous as there are no real MMU
1158 * translations to overcome physical memory gaps
1159 */
1160 args->in.flags |= HL_MEM_CONTIGUOUS;
1161 rc = alloc_device_memory(ctx, &args->in, &handle);
1162
1163 memset(args, 0, sizeof(*args));
1164 args->out.handle = (__u64) handle;
1165 break;
1166
1167 case HL_MEM_OP_FREE:
1168 rc = free_device_memory(ctx, args->in.free.handle);
1169 break;
1170
1171 case HL_MEM_OP_MAP:
1172 if (args->in.flags & HL_MEM_USERPTR) {
1173 device_addr = args->in.map_host.host_virt_addr;
1174 rc = 0;
1175 } else {
1176 rc = get_paddr_from_handle(ctx, &args->in,
1177 &device_addr);
1178 }
1179
1180 memset(args, 0, sizeof(*args));
1181 args->out.device_virt_addr = device_addr;
1182 break;
1183
1184 case HL_MEM_OP_UNMAP:
1185 rc = 0;
1186 break;
1187
1188 default:
1189 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1190 rc = -ENOTTY;
1191 break;
1192 }
1193 }
1194
1195out:
1196 return rc;
1197}
1198
1199/*
1200 * hl_pin_host_memory - pins a chunk of host memory
1201 *
1202 * @hdev : pointer to the habanalabs device structure
1203 * @addr : the user-space virtual address of the memory area
1204 * @size : the size of the memory area
1205 * @userptr : pointer to hl_userptr structure
1206 *
1207 * This function does the following:
1208 * - Pins the physical pages
1209 * - Create a SG list from those pages
1210 */
1211int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1212 struct hl_userptr *userptr)
1213{
1214 u64 start, end;
1215 u32 npages, offset;
1216 int rc;
1217
1218 if (!size) {
1219 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1220 return -EINVAL;
1221 }
1222
1223 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1224 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1225 return -EFAULT;
1226 }
1227
1228 /*
1229 * If the combination of the address and size requested for this memory
1230 * region causes an integer overflow, return error.
1231 */
1232 if (((addr + size) < addr) ||
1233 PAGE_ALIGN(addr + size) < (addr + size)) {
1234 dev_err(hdev->dev,
1235 "user pointer 0x%llx + %llu causes integer overflow\n",
1236 addr, size);
1237 return -EINVAL;
1238 }
1239
1240 start = addr & PAGE_MASK;
1241 offset = addr & ~PAGE_MASK;
1242 end = PAGE_ALIGN(addr + size);
1243 npages = (end - start) >> PAGE_SHIFT;
1244
1245 userptr->size = size;
1246 userptr->addr = addr;
1247 userptr->dma_mapped = false;
1248 INIT_LIST_HEAD(&userptr->job_node);
1249
1250 userptr->vec = frame_vector_create(npages);
1251 if (!userptr->vec) {
1252 dev_err(hdev->dev, "Failed to create frame vector\n");
1253 return -ENOMEM;
1254 }
1255
1256 rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1257 userptr->vec);
1258
1259 if (rc != npages) {
1260 dev_err(hdev->dev,
1261 "Failed to map host memory, user ptr probably wrong\n");
1262 if (rc < 0)
1263 goto destroy_framevec;
1264 rc = -EFAULT;
1265 goto put_framevec;
1266 }
1267
1268 if (frame_vector_to_pages(userptr->vec) < 0) {
1269 dev_err(hdev->dev,
1270 "Failed to translate frame vector to pages\n");
1271 rc = -EFAULT;
1272 goto put_framevec;
1273 }
1274
1275 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1276 if (!userptr->sgt) {
1277 rc = -ENOMEM;
1278 goto put_framevec;
1279 }
1280
1281 rc = sg_alloc_table_from_pages(userptr->sgt,
1282 frame_vector_pages(userptr->vec),
1283 npages, offset, size, GFP_ATOMIC);
1284 if (rc < 0) {
1285 dev_err(hdev->dev, "failed to create SG table from pages\n");
1286 goto free_sgt;
1287 }
1288
1289 hl_debugfs_add_userptr(hdev, userptr);
1290
1291 return 0;
1292
1293free_sgt:
1294 kfree(userptr->sgt);
1295put_framevec:
1296 put_vaddr_frames(userptr->vec);
1297destroy_framevec:
1298 frame_vector_destroy(userptr->vec);
1299 return rc;
1300}
1301
1302/*
1303 * hl_unpin_host_memory - unpins a chunk of host memory
1304 *
1305 * @hdev : pointer to the habanalabs device structure
1306 * @userptr : pointer to hl_userptr structure
1307 *
1308 * This function does the following:
1309 * - Unpins the physical pages related to the host memory
1310 * - Free the SG list
1311 */
1312int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1313{
1314 struct page **pages;
1315
1316 hl_debugfs_remove_userptr(hdev, userptr);
1317
1318 if (userptr->dma_mapped)
1319 hdev->asic_funcs->hl_dma_unmap_sg(hdev,
1320 userptr->sgt->sgl,
1321 userptr->sgt->nents,
1322 userptr->dir);
1323
1324 pages = frame_vector_pages(userptr->vec);
1325 if (!IS_ERR(pages)) {
1326 int i;
1327
1328 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1329 set_page_dirty_lock(pages[i]);
1330 }
1331 put_vaddr_frames(userptr->vec);
1332 frame_vector_destroy(userptr->vec);
1333
1334 list_del(&userptr->job_node);
1335
1336 sg_free_table(userptr->sgt);
1337 kfree(userptr->sgt);
1338
1339 return 0;
1340}
1341
1342/*
1343 * hl_userptr_delete_list - clear userptr list
1344 *
1345 * @hdev : pointer to the habanalabs device structure
1346 * @userptr_list : pointer to the list to clear
1347 *
1348 * This function does the following:
1349 * - Iterates over the list and unpins the host memory and frees the userptr
1350 * structure.
1351 */
1352void hl_userptr_delete_list(struct hl_device *hdev,
1353 struct list_head *userptr_list)
1354{
1355 struct hl_userptr *userptr, *tmp;
1356
1357 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1358 hl_unpin_host_memory(hdev, userptr);
1359 kfree(userptr);
1360 }
1361
1362 INIT_LIST_HEAD(userptr_list);
1363}
1364
1365/*
1366 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1367 *
1368 * @hdev : pointer to the habanalabs device structure
1369 * @userptr_list : pointer to the list to clear
1370 * @userptr : pointer to userptr to check
1371 *
1372 * This function does the following:
1373 * - Iterates over the list and checks if the given userptr is in it, means is
1374 * pinned. If so, returns true, otherwise returns false.
1375 */
1376bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1377 u32 size, struct list_head *userptr_list,
1378 struct hl_userptr **userptr)
1379{
1380 list_for_each_entry((*userptr), userptr_list, job_node) {
1381 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1382 return true;
1383 }
1384
1385 return false;
1386}
1387
1388/*
1389 * hl_va_range_init - initialize virtual addresses range
1390 *
1391 * @hdev : pointer to the habanalabs device structure
1392 * @va_range : pointer to the range to initialize
1393 * @start : range start address
1394 * @end : range end address
1395 *
1396 * This function does the following:
1397 * - Initializes the virtual addresses list of the given range with the given
1398 * addresses.
1399 */
1400static int hl_va_range_init(struct hl_device *hdev,
1401 struct hl_va_range *va_range, u64 start, u64 end)
1402{
1403 int rc;
1404
1405 INIT_LIST_HEAD(&va_range->list);
1406
1407 /* PAGE_SIZE alignment */
1408
1409 if (start & (PAGE_SIZE - 1)) {
1410 start &= PAGE_MASK;
1411 start += PAGE_SIZE;
1412 }
1413
1414 if (end & (PAGE_SIZE - 1))
1415 end &= PAGE_MASK;
1416
1417 if (start >= end) {
1418 dev_err(hdev->dev, "too small vm range for va list\n");
1419 return -EFAULT;
1420 }
1421
1422 rc = add_va_block(hdev, va_range, start, end);
1423
1424 if (rc) {
1425 dev_err(hdev->dev, "Failed to init host va list\n");
1426 return rc;
1427 }
1428
1429 va_range->start_addr = start;
1430 va_range->end_addr = end;
1431
1432 return 0;
1433}
1434
1435/*
1436 * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
1437 *
1438 * @ctx : pointer to the habanalabs context structure
1439 * @host_range_start : host virtual addresses range start
1440 * @host_range_end : host virtual addresses range end
1441 * @dram_range_start : dram virtual addresses range start
1442 * @dram_range_end : dram virtual addresses range end
1443 *
1444 * This function initializes the following:
1445 * - MMU for context
1446 * - Virtual address to area descriptor hashtable
1447 * - Virtual block list of available virtual memory
1448 */
1449static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
1450 u64 host_range_end, u64 dram_range_start,
1451 u64 dram_range_end)
1452{
1453 struct hl_device *hdev = ctx->hdev;
1454 int rc;
1455
1456 rc = hl_mmu_ctx_init(ctx);
1457 if (rc) {
1458 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1459 return rc;
1460 }
1461
1462 mutex_init(&ctx->mem_hash_lock);
1463 hash_init(ctx->mem_hash);
1464
1465 mutex_init(&ctx->host_va_range.lock);
1466
1467 rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
1468 host_range_end);
1469 if (rc) {
1470 dev_err(hdev->dev, "failed to init host vm range\n");
1471 goto host_vm_err;
1472 }
1473
1474 mutex_init(&ctx->dram_va_range.lock);
1475
1476 rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
1477 dram_range_end);
1478 if (rc) {
1479 dev_err(hdev->dev, "failed to init dram vm range\n");
1480 goto dram_vm_err;
1481 }
1482
1483 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1484
1485 return 0;
1486
1487dram_vm_err:
1488 mutex_destroy(&ctx->dram_va_range.lock);
1489
1490 mutex_lock(&ctx->host_va_range.lock);
1491 clear_va_list_locked(hdev, &ctx->host_va_range.list);
1492 mutex_unlock(&ctx->host_va_range.lock);
1493host_vm_err:
1494 mutex_destroy(&ctx->host_va_range.lock);
1495 mutex_destroy(&ctx->mem_hash_lock);
1496 hl_mmu_ctx_fini(ctx);
1497
1498 return rc;
1499}
1500
1501int hl_vm_ctx_init(struct hl_ctx *ctx)
1502{
1503 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1504 u64 host_range_start, host_range_end, dram_range_start,
1505 dram_range_end;
1506
1507 atomic64_set(&ctx->dram_phys_mem, 0);
1508
1509 /*
1510 * - If MMU is enabled, init the ranges as usual.
1511 * - If MMU is disabled, in case of host mapping, the returned address
1512 * is the given one.
1513 * In case of DRAM mapping, the returned address is the physical
1514 * address of the memory related to the given handle.
1515 */
1516 if (ctx->hdev->mmu_enable) {
1517 dram_range_start = prop->va_space_dram_start_address;
1518 dram_range_end = prop->va_space_dram_end_address;
1519 host_range_start = prop->va_space_host_start_address;
1520 host_range_end = prop->va_space_host_end_address;
1521 } else {
1522 dram_range_start = prop->dram_user_base_address;
1523 dram_range_end = prop->dram_end_address;
1524 host_range_start = prop->dram_user_base_address;
1525 host_range_end = prop->dram_end_address;
1526 }
1527
1528 return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1529 dram_range_start, dram_range_end);
1530}
1531
1532/*
1533 * hl_va_range_fini - clear a virtual addresses range
1534 *
1535 * @hdev : pointer to the habanalabs structure
1536 * va_range : pointer to virtual addresses range
1537 *
1538 * This function initializes the following:
1539 * - Checks that the given range contains the whole initial range
1540 * - Frees the virtual addresses block list and its lock
1541 */
1542static void hl_va_range_fini(struct hl_device *hdev,
1543 struct hl_va_range *va_range)
1544{
1545 struct hl_vm_va_block *va_block;
1546
1547 if (list_empty(&va_range->list)) {
1548 dev_warn(hdev->dev,
1549 "va list should not be empty on cleanup!\n");
1550 goto out;
1551 }
1552
1553 if (!list_is_singular(&va_range->list)) {
1554 dev_warn(hdev->dev,
1555 "va list should not contain multiple blocks on cleanup!\n");
1556 goto free_va_list;
1557 }
1558
1559 va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
1560
1561 if (va_block->start != va_range->start_addr ||
1562 va_block->end != va_range->end_addr) {
1563 dev_warn(hdev->dev,
1564 "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
1565 va_block->start, va_block->end);
1566 goto free_va_list;
1567 }
1568
1569free_va_list:
1570 mutex_lock(&va_range->lock);
1571 clear_va_list_locked(hdev, &va_range->list);
1572 mutex_unlock(&va_range->lock);
1573
1574out:
1575 mutex_destroy(&va_range->lock);
1576}
1577
1578/*
1579 * hl_vm_ctx_fini - virtual memory teardown of context
1580 *
1581 * @ctx : pointer to the habanalabs context structure
1582 *
1583 * This function perform teardown the following:
1584 * - Virtual block list of available virtual memory
1585 * - Virtual address to area descriptor hashtable
1586 * - MMU for context
1587 *
1588 * In addition this function does the following:
1589 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1590 * hashtable should be empty as no valid mappings should exist at this
1591 * point.
1592 * - Frees any existing physical page list from the idr which relates to the
1593 * current context asid.
1594 * - This function checks the virtual block list for correctness. At this point
1595 * the list should contain one element which describes the whole virtual
1596 * memory range of the context. Otherwise, a warning is printed.
1597 */
1598void hl_vm_ctx_fini(struct hl_ctx *ctx)
1599{
1600 struct hl_device *hdev = ctx->hdev;
1601 struct hl_vm *vm = &hdev->vm;
1602 struct hl_vm_phys_pg_pack *phys_pg_list;
1603 struct hl_vm_hash_node *hnode;
1604 struct hlist_node *tmp_node;
1605 int i;
1606
1607 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1608
1609 if (!hash_empty(ctx->mem_hash))
1610 dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
1611
1612 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1613 dev_dbg(hdev->dev,
1614 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1615 hnode->vaddr, ctx->asid);
1616 unmap_device_va(ctx, hnode->vaddr);
1617 }
1618
1619 spin_lock(&vm->idr_lock);
1620 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1621 if (phys_pg_list->asid == ctx->asid) {
1622 dev_dbg(hdev->dev,
1623 "page list 0x%p of asid %d is still alive\n",
1624 phys_pg_list, ctx->asid);
1625 free_phys_pg_pack(hdev, phys_pg_list);
1626 idr_remove(&vm->phys_pg_pack_handles, i);
1627 }
1628 spin_unlock(&vm->idr_lock);
1629
1630 hl_va_range_fini(hdev, &ctx->dram_va_range);
1631 hl_va_range_fini(hdev, &ctx->host_va_range);
1632
1633 mutex_destroy(&ctx->mem_hash_lock);
1634 hl_mmu_ctx_fini(ctx);
1635}
1636
1637/*
1638 * hl_vm_init - initialize virtual memory module
1639 *
1640 * @hdev : pointer to the habanalabs device structure
1641 *
1642 * This function initializes the following:
1643 * - MMU module
1644 * - DRAM physical pages pool of 2MB
1645 * - Idr for device memory allocation handles
1646 */
1647int hl_vm_init(struct hl_device *hdev)
1648{
1649 struct asic_fixed_properties *prop = &hdev->asic_prop;
1650 struct hl_vm *vm = &hdev->vm;
1651 int rc;
1652
1653 rc = hl_mmu_init(hdev);
1654 if (rc) {
1655 dev_err(hdev->dev, "Failed to init MMU\n");
1656 return rc;
1657 }
1658
1659 vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1660 if (!vm->dram_pg_pool) {
1661 dev_err(hdev->dev, "Failed to create dram page pool\n");
1662 rc = -ENOMEM;
1663 goto pool_create_err;
1664 }
1665
1666 kref_init(&vm->dram_pg_pool_refcount);
1667
1668 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1669 prop->dram_end_address - prop->dram_user_base_address,
1670 -1);
1671
1672 if (rc) {
1673 dev_err(hdev->dev,
1674 "Failed to add memory to dram page pool %d\n", rc);
1675 goto pool_add_err;
1676 }
1677
1678 spin_lock_init(&vm->idr_lock);
1679 idr_init(&vm->phys_pg_pack_handles);
1680
1681 atomic64_set(&hdev->dram_used_mem, 0);
1682
1683 vm->init_done = true;
1684
1685 return 0;
1686
1687pool_add_err:
1688 gen_pool_destroy(vm->dram_pg_pool);
1689pool_create_err:
1690 hl_mmu_fini(hdev);
1691
1692 return rc;
1693}
1694
1695/*
1696 * hl_vm_fini - virtual memory module teardown
1697 *
1698 * @hdev : pointer to the habanalabs device structure
1699 *
1700 * This function perform teardown to the following:
1701 * - Idr for device memory allocation handles
1702 * - DRAM physical pages pool of 2MB
1703 * - MMU module
1704 */
1705void hl_vm_fini(struct hl_device *hdev)
1706{
1707 struct hl_vm *vm = &hdev->vm;
1708
1709 if (!vm->init_done)
1710 return;
1711
1712 /*
1713 * At this point all the contexts should be freed and hence no DRAM
1714 * memory should be in use. Hence the DRAM pool should be freed here.
1715 */
1716 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1717 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1718 __func__);
1719
1720 hl_mmu_fini(hdev);
1721
1722 vm->init_done = false;
1723}
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
new file mode 100644
index 000000000000..2f2e99cb2743
--- /dev/null
+++ b/drivers/misc/habanalabs/mmu.c
@@ -0,0 +1,906 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9#include "include/hw_ip/mmu/mmu_general.h"
10
11#include <linux/genalloc.h>
12#include <linux/slab.h>
13
14static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
15{
16 struct pgt_info *pgt_info = NULL;
17
18 hash_for_each_possible(ctx->mmu_hash, pgt_info, node,
19 (unsigned long) addr)
20 if (addr == pgt_info->addr)
21 break;
22
23 return pgt_info;
24}
25
26static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
27{
28 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
29
30 gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr,
31 ctx->hdev->asic_prop.mmu_hop_table_size);
32 hash_del(&pgt_info->node);
33
34 kfree(pgt_info);
35}
36
37static u64 alloc_hop(struct hl_ctx *ctx)
38{
39 struct hl_device *hdev = ctx->hdev;
40 struct pgt_info *pgt_info;
41 u64 addr;
42
43 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
44 if (!pgt_info)
45 return ULLONG_MAX;
46
47 addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
48 hdev->asic_prop.mmu_hop_table_size);
49 if (!addr) {
50 dev_err(hdev->dev, "failed to allocate page\n");
51 kfree(pgt_info);
52 return ULLONG_MAX;
53 }
54
55 pgt_info->addr = addr;
56 pgt_info->ctx = ctx;
57 pgt_info->num_of_ptes = 0;
58 hash_add(ctx->mmu_hash, &pgt_info->node, addr);
59
60 return addr;
61}
62
63static inline void clear_pte(struct hl_device *hdev, u64 pte_addr)
64{
65 /* clear the last and present bits */
66 hdev->asic_funcs->write_pte(hdev, pte_addr, 0);
67}
68
69static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
70{
71 get_pgt_info(ctx, hop_addr)->num_of_ptes++;
72}
73
74/*
75 * put_pte - decrement the num of ptes and free the hop if possible
76 *
77 * @ctx: pointer to the context structure
78 * @hop_addr: addr of the hop
79 *
80 * This function returns the number of ptes left on this hop. If the number is
81 * 0, it means the pte was freed.
82 */
83static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
84{
85 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
86 int num_of_ptes_left;
87
88 pgt_info->num_of_ptes--;
89
90 /*
91 * Need to save the number of ptes left because free_hop might free
92 * the pgt_info
93 */
94 num_of_ptes_left = pgt_info->num_of_ptes;
95 if (!num_of_ptes_left)
96 free_hop(ctx, hop_addr);
97
98 return num_of_ptes_left;
99}
100
101static inline u64 get_hop0_addr(struct hl_ctx *ctx)
102{
103 return ctx->hdev->asic_prop.mmu_pgt_addr +
104 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
105}
106
107static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
108 u64 virt_addr, u64 mask, u64 shift)
109{
110 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
111 ((virt_addr & mask) >> shift);
112}
113
114static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
115{
116 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
117}
118
119static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
120{
121 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
122}
123
124static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
125{
126 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
127}
128
129static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
130{
131 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
132}
133
134static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
135{
136 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
137}
138
139static inline u64 get_next_hop_addr(u64 curr_pte)
140{
141 if (curr_pte & PAGE_PRESENT_MASK)
142 return curr_pte & PHYS_ADDR_MASK;
143 else
144 return ULLONG_MAX;
145}
146
147static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
148 bool *is_new_hop)
149{
150 u64 hop_addr = get_next_hop_addr(curr_pte);
151
152 if (hop_addr == ULLONG_MAX) {
153 hop_addr = alloc_hop(ctx);
154 *is_new_hop = (hop_addr != ULLONG_MAX);
155 }
156
157 return hop_addr;
158}
159
160/*
161 * hl_mmu_init - init the mmu module
162 *
163 * @hdev: pointer to the habanalabs device structure
164 *
165 * This function does the following:
166 * - Allocate max_asid zeroed hop0 pgts so no mapping is available
167 * - Enable mmu in hw
168 * - Invalidate the mmu cache
169 * - Create a pool of pages for pgts
170 * - Returns 0 on success
171 *
172 * This function depends on DMA QMAN to be working!
173 */
174int hl_mmu_init(struct hl_device *hdev)
175{
176 struct asic_fixed_properties *prop = &hdev->asic_prop;
177 int rc;
178
179 if (!hdev->mmu_enable)
180 return 0;
181
182 /* MMU HW init was already done in device hw_init() */
183
184 mutex_init(&hdev->mmu_cache_lock);
185
186 hdev->mmu_pgt_pool =
187 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
188
189 if (!hdev->mmu_pgt_pool) {
190 dev_err(hdev->dev, "Failed to create page gen pool\n");
191 rc = -ENOMEM;
192 goto err_pool_create;
193 }
194
195 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
196 prop->mmu_hop0_tables_total_size,
197 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
198 -1);
199 if (rc) {
200 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
201 goto err_pool_add;
202 }
203
204 return 0;
205
206err_pool_add:
207 gen_pool_destroy(hdev->mmu_pgt_pool);
208err_pool_create:
209 mutex_destroy(&hdev->mmu_cache_lock);
210
211 return rc;
212}
213
214/*
215 * hl_mmu_fini - release the mmu module.
216 *
217 * @hdev: pointer to the habanalabs device structure
218 *
219 * This function does the following:
220 * - Disable mmu in hw
221 * - free the pgts pool
222 *
223 * All ctxs should be freed before calling this func
224 */
225void hl_mmu_fini(struct hl_device *hdev)
226{
227 if (!hdev->mmu_enable)
228 return;
229
230 gen_pool_destroy(hdev->mmu_pgt_pool);
231
232 mutex_destroy(&hdev->mmu_cache_lock);
233
234 /* MMU HW fini will be done in device hw_fini() */
235}
236
237/**
238 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
239 * @ctx: pointer to the context structure to initialize.
240 *
241 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
242 * page tables hops related to this context and an optional DRAM default page
243 * mapping.
244 * Return: 0 on success, non-zero otherwise.
245 */
246int hl_mmu_ctx_init(struct hl_ctx *ctx)
247{
248 struct hl_device *hdev = ctx->hdev;
249 struct asic_fixed_properties *prop = &hdev->asic_prop;
250 u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
251 hop3_pte_addr, pte_val;
252 int rc, i, j, hop3_allocated = 0;
253
254 if (!hdev->mmu_enable)
255 return 0;
256
257 mutex_init(&ctx->mmu_lock);
258 hash_init(ctx->mmu_hash);
259
260 if (!hdev->dram_supports_virtual_memory ||
261 !hdev->dram_default_page_mapping)
262 return 0;
263
264 num_of_hop3 = prop->dram_size_for_default_page_mapping;
265 do_div(num_of_hop3, prop->dram_page_size);
266 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
267
268 /* add hop1 and hop2 */
269 total_hops = num_of_hop3 + 2;
270
271 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
272 if (!ctx->dram_default_hops) {
273 rc = -ENOMEM;
274 goto alloc_err;
275 }
276
277 hop1_addr = alloc_hop(ctx);
278 if (hop1_addr == ULLONG_MAX) {
279 dev_err(hdev->dev, "failed to alloc hop 1\n");
280 rc = -ENOMEM;
281 goto hop1_err;
282 }
283
284 ctx->dram_default_hops[total_hops - 1] = hop1_addr;
285
286 hop2_addr = alloc_hop(ctx);
287 if (hop2_addr == ULLONG_MAX) {
288 dev_err(hdev->dev, "failed to alloc hop 2\n");
289 rc = -ENOMEM;
290 goto hop2_err;
291 }
292
293 ctx->dram_default_hops[total_hops - 2] = hop2_addr;
294
295 for (i = 0 ; i < num_of_hop3 ; i++) {
296 ctx->dram_default_hops[i] = alloc_hop(ctx);
297 if (ctx->dram_default_hops[i] == ULLONG_MAX) {
298 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
299 rc = -ENOMEM;
300 goto hop3_err;
301 }
302 hop3_allocated++;
303 }
304
305 /* need only pte 0 in hops 0 and 1 */
306 pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
307 hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val);
308
309 pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
310 hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val);
311 get_pte(ctx, hop1_addr);
312
313 hop2_pte_addr = hop2_addr;
314 for (i = 0 ; i < num_of_hop3 ; i++) {
315 pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
316 PAGE_PRESENT_MASK;
317 hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val);
318 get_pte(ctx, hop2_addr);
319 hop2_pte_addr += HL_PTE_SIZE;
320 }
321
322 pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
323 LAST_MASK | PAGE_PRESENT_MASK;
324
325 for (i = 0 ; i < num_of_hop3 ; i++) {
326 hop3_pte_addr = ctx->dram_default_hops[i];
327 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
328 hdev->asic_funcs->write_pte(hdev, hop3_pte_addr,
329 pte_val);
330 get_pte(ctx, ctx->dram_default_hops[i]);
331 hop3_pte_addr += HL_PTE_SIZE;
332 }
333 }
334
335 /* flush all writes to reach PCI */
336 mb();
337 hdev->asic_funcs->read_pte(hdev, hop2_addr);
338
339 return 0;
340
341hop3_err:
342 for (i = 0 ; i < hop3_allocated ; i++)
343 free_hop(ctx, ctx->dram_default_hops[i]);
344 free_hop(ctx, hop2_addr);
345hop2_err:
346 free_hop(ctx, hop1_addr);
347hop1_err:
348 kfree(ctx->dram_default_hops);
349alloc_err:
350 mutex_destroy(&ctx->mmu_lock);
351
352 return rc;
353}
354
355/*
356 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
357 *
358 * @ctx: pointer to the context structure
359 *
360 * This function does the following:
361 * - Free any pgts which were not freed yet
362 * - Free the mutex
363 * - Free DRAM default page mapping hops
364 */
365void hl_mmu_ctx_fini(struct hl_ctx *ctx)
366{
367 struct hl_device *hdev = ctx->hdev;
368 struct asic_fixed_properties *prop = &hdev->asic_prop;
369 struct pgt_info *pgt_info;
370 struct hlist_node *tmp;
371 u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
372 hop3_pte_addr;
373 int i, j;
374
375 if (!ctx->hdev->mmu_enable)
376 return;
377
378 if (hdev->dram_supports_virtual_memory &&
379 hdev->dram_default_page_mapping) {
380
381 num_of_hop3 = prop->dram_size_for_default_page_mapping;
382 do_div(num_of_hop3, prop->dram_page_size);
383 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
384
385 /* add hop1 and hop2 */
386 total_hops = num_of_hop3 + 2;
387 hop1_addr = ctx->dram_default_hops[total_hops - 1];
388 hop2_addr = ctx->dram_default_hops[total_hops - 2];
389
390 for (i = 0 ; i < num_of_hop3 ; i++) {
391 hop3_pte_addr = ctx->dram_default_hops[i];
392 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
393 clear_pte(hdev, hop3_pte_addr);
394 put_pte(ctx, ctx->dram_default_hops[i]);
395 hop3_pte_addr += HL_PTE_SIZE;
396 }
397 }
398
399 hop2_pte_addr = hop2_addr;
400 for (i = 0 ; i < num_of_hop3 ; i++) {
401 clear_pte(hdev, hop2_pte_addr);
402 put_pte(ctx, hop2_addr);
403 hop2_pte_addr += HL_PTE_SIZE;
404 }
405
406 clear_pte(hdev, hop1_addr);
407 put_pte(ctx, hop1_addr);
408 clear_pte(hdev, get_hop0_addr(ctx));
409
410 kfree(ctx->dram_default_hops);
411
412 /* flush all writes to reach PCI */
413 mb();
414 hdev->asic_funcs->read_pte(hdev, hop2_addr);
415 }
416
417 if (!hash_empty(ctx->mmu_hash))
418 dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
419
420 hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) {
421 dev_err(hdev->dev,
422 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
423 pgt_info->addr, ctx->asid, pgt_info->num_of_ptes);
424 free_hop(ctx, pgt_info->addr);
425 }
426
427 mutex_destroy(&ctx->mmu_lock);
428}
429
430static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
431{
432 struct hl_device *hdev = ctx->hdev;
433 struct asic_fixed_properties *prop = &hdev->asic_prop;
434 u64 hop0_addr = 0, hop0_pte_addr = 0,
435 hop1_addr = 0, hop1_pte_addr = 0,
436 hop2_addr = 0, hop2_pte_addr = 0,
437 hop3_addr = 0, hop3_pte_addr = 0,
438 hop4_addr = 0, hop4_pte_addr = 0,
439 curr_pte;
440 int clear_hop3 = 1;
441 bool is_dram_addr, is_huge, is_dram_default_page_mapping;
442
443 is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
444 prop->va_space_dram_start_address,
445 prop->va_space_dram_end_address);
446
447 hop0_addr = get_hop0_addr(ctx);
448
449 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
450
451 curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
452
453 hop1_addr = get_next_hop_addr(curr_pte);
454
455 if (hop1_addr == ULLONG_MAX)
456 goto not_mapped;
457
458 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
459
460 curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
461
462 hop2_addr = get_next_hop_addr(curr_pte);
463
464 if (hop2_addr == ULLONG_MAX)
465 goto not_mapped;
466
467 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
468
469 curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
470
471 hop3_addr = get_next_hop_addr(curr_pte);
472
473 if (hop3_addr == ULLONG_MAX)
474 goto not_mapped;
475
476 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
477
478 curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
479
480 is_huge = curr_pte & LAST_MASK;
481
482 if (is_dram_addr && !is_huge) {
483 dev_err(hdev->dev,
484 "DRAM unmapping should use huge pages only\n");
485 return -EFAULT;
486 }
487
488 is_dram_default_page_mapping =
489 hdev->dram_default_page_mapping && is_dram_addr;
490
491 if (!is_huge) {
492 hop4_addr = get_next_hop_addr(curr_pte);
493
494 if (hop4_addr == ULLONG_MAX)
495 goto not_mapped;
496
497 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
498
499 curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
500
501 clear_hop3 = 0;
502 }
503
504 if (is_dram_default_page_mapping) {
505 u64 zero_pte = (prop->mmu_dram_default_page_addr &
506 PTE_PHYS_ADDR_MASK) | LAST_MASK |
507 PAGE_PRESENT_MASK;
508 if (curr_pte == zero_pte) {
509 dev_err(hdev->dev,
510 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
511 virt_addr);
512 goto not_mapped;
513 }
514
515 if (!(curr_pte & PAGE_PRESENT_MASK)) {
516 dev_err(hdev->dev,
517 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
518 virt_addr);
519 goto not_mapped;
520 }
521
522 hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte);
523 put_pte(ctx, hop3_addr);
524 } else {
525 if (!(curr_pte & PAGE_PRESENT_MASK))
526 goto not_mapped;
527
528 clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr);
529
530 if (hop4_addr && !put_pte(ctx, hop4_addr))
531 clear_hop3 = 1;
532
533 if (!clear_hop3)
534 goto flush;
535 clear_pte(hdev, hop3_pte_addr);
536
537 if (put_pte(ctx, hop3_addr))
538 goto flush;
539 clear_pte(hdev, hop2_pte_addr);
540
541 if (put_pte(ctx, hop2_addr))
542 goto flush;
543 clear_pte(hdev, hop1_pte_addr);
544
545 if (put_pte(ctx, hop1_addr))
546 goto flush;
547 clear_pte(hdev, hop0_pte_addr);
548 }
549
550flush:
551 /* flush all writes from all cores to reach PCI */
552 mb();
553
554 hdev->asic_funcs->read_pte(hdev,
555 hop4_addr ? hop4_pte_addr : hop3_pte_addr);
556
557 return 0;
558
559not_mapped:
560 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
561 virt_addr);
562
563 return -EINVAL;
564}
565
566/*
567 * hl_mmu_unmap - unmaps a virtual addr
568 *
569 * @ctx: pointer to the context structure
570 * @virt_addr: virt addr to map from
571 * @page_size: size of the page to unmap
572 *
573 * This function does the following:
574 * - Check that the virt addr is mapped
575 * - Unmap the virt addr and frees pgts if possible
576 * - Returns 0 on success, -EINVAL if the given addr is not mapped
577 *
578 * Because this function changes the page tables in the device and because it
579 * changes the MMU hash, it must be protected by a lock.
580 * However, because it maps only a single page, the lock should be implemented
581 * in a higher level in order to protect the entire mapping of the memory area
582 */
583int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
584{
585 struct hl_device *hdev = ctx->hdev;
586 u64 real_virt_addr;
587 u32 real_page_size, npages;
588 int i, rc;
589
590 if (!hdev->mmu_enable)
591 return 0;
592
593 /*
594 * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
595 * is bigger, we break it to sub-pages and unmap them separately.
596 */
597 if ((page_size % PAGE_SIZE_2MB) == 0) {
598 real_page_size = PAGE_SIZE_2MB;
599 } else if ((page_size % PAGE_SIZE_4KB) == 0) {
600 real_page_size = PAGE_SIZE_4KB;
601 } else {
602 dev_err(hdev->dev,
603 "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
604 page_size);
605
606 return -EFAULT;
607 }
608
609 npages = page_size / real_page_size;
610 real_virt_addr = virt_addr;
611
612 for (i = 0 ; i < npages ; i++) {
613 rc = _hl_mmu_unmap(ctx, real_virt_addr);
614 if (rc)
615 return rc;
616
617 real_virt_addr += real_page_size;
618 }
619
620 return 0;
621}
622
623static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
624 u32 page_size)
625{
626 struct hl_device *hdev = ctx->hdev;
627 struct asic_fixed_properties *prop = &hdev->asic_prop;
628 u64 hop0_addr = 0, hop0_pte_addr = 0,
629 hop1_addr = 0, hop1_pte_addr = 0,
630 hop2_addr = 0, hop2_pte_addr = 0,
631 hop3_addr = 0, hop3_pte_addr = 0,
632 hop4_addr = 0, hop4_pte_addr = 0,
633 curr_pte = 0;
634 bool hop1_new = false, hop2_new = false, hop3_new = false,
635 hop4_new = false, is_huge, is_dram_addr,
636 is_dram_default_page_mapping;
637 int rc = -ENOMEM;
638
639 /*
640 * This mapping function can map a 4KB/2MB page. For 2MB page there are
641 * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
642 * pages only but user memory could have been allocated with one of the
643 * two page sizes. Since this is a common code for all the three cases,
644 * we need this hugs page check.
645 */
646 is_huge = page_size == PAGE_SIZE_2MB;
647
648 is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
649 prop->va_space_dram_start_address,
650 prop->va_space_dram_end_address);
651
652 if (is_dram_addr && !is_huge) {
653 dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
654 return -EFAULT;
655 }
656
657 is_dram_default_page_mapping =
658 hdev->dram_default_page_mapping && is_dram_addr;
659
660 hop0_addr = get_hop0_addr(ctx);
661
662 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
663
664 curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
665
666 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
667
668 if (hop1_addr == ULLONG_MAX)
669 goto err;
670
671 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
672
673 curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
674
675 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
676
677 if (hop2_addr == ULLONG_MAX)
678 goto err;
679
680 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
681
682 curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
683
684 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
685
686 if (hop3_addr == ULLONG_MAX)
687 goto err;
688
689 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
690
691 curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
692
693 if (!is_huge) {
694 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
695
696 if (hop4_addr == ULLONG_MAX)
697 goto err;
698
699 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
700
701 curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
702 }
703
704 if (is_dram_default_page_mapping) {
705 u64 zero_pte = (prop->mmu_dram_default_page_addr &
706 PTE_PHYS_ADDR_MASK) | LAST_MASK |
707 PAGE_PRESENT_MASK;
708
709 if (curr_pte != zero_pte) {
710 dev_err(hdev->dev,
711 "DRAM: mapping already exists for virt_addr 0x%llx\n",
712 virt_addr);
713 rc = -EINVAL;
714 goto err;
715 }
716
717 if (hop1_new || hop2_new || hop3_new || hop4_new) {
718 dev_err(hdev->dev,
719 "DRAM mapping should not allocate more hops\n");
720 rc = -EFAULT;
721 goto err;
722 }
723 } else if (curr_pte & PAGE_PRESENT_MASK) {
724 dev_err(hdev->dev,
725 "mapping already exists for virt_addr 0x%llx\n",
726 virt_addr);
727
728 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
729 hdev->asic_funcs->read_pte(hdev, hop0_pte_addr),
730 hop0_pte_addr);
731 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
732 hdev->asic_funcs->read_pte(hdev, hop1_pte_addr),
733 hop1_pte_addr);
734 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
735 hdev->asic_funcs->read_pte(hdev, hop2_pte_addr),
736 hop2_pte_addr);
737 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
738 hdev->asic_funcs->read_pte(hdev, hop3_pte_addr),
739 hop3_pte_addr);
740
741 if (!is_huge)
742 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
743 hdev->asic_funcs->read_pte(hdev,
744 hop4_pte_addr),
745 hop4_pte_addr);
746
747 rc = -EINVAL;
748 goto err;
749 }
750
751 curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
752 | PAGE_PRESENT_MASK;
753
754 hdev->asic_funcs->write_pte(hdev,
755 is_huge ? hop3_pte_addr : hop4_pte_addr,
756 curr_pte);
757
758 if (hop1_new) {
759 curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) |
760 PAGE_PRESENT_MASK;
761 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr,
762 curr_pte);
763 }
764 if (hop2_new) {
765 curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) |
766 PAGE_PRESENT_MASK;
767 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr,
768 curr_pte);
769 get_pte(ctx, hop1_addr);
770 }
771 if (hop3_new) {
772 curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) |
773 PAGE_PRESENT_MASK;
774 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr,
775 curr_pte);
776 get_pte(ctx, hop2_addr);
777 }
778
779 if (!is_huge) {
780 if (hop4_new) {
781 curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
782 PAGE_PRESENT_MASK;
783 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
784 hop3_pte_addr, curr_pte);
785 get_pte(ctx, hop3_addr);
786 }
787
788 get_pte(ctx, hop4_addr);
789 } else {
790 get_pte(ctx, hop3_addr);
791 }
792
793 /* flush all writes from all cores to reach PCI */
794 mb();
795
796 hdev->asic_funcs->read_pte(hdev,
797 is_huge ? hop3_pte_addr : hop4_pte_addr);
798
799 return 0;
800
801err:
802 if (hop4_new)
803 free_hop(ctx, hop4_addr);
804 if (hop3_new)
805 free_hop(ctx, hop3_addr);
806 if (hop2_new)
807 free_hop(ctx, hop2_addr);
808 if (hop1_new)
809 free_hop(ctx, hop1_addr);
810
811 return rc;
812}
813
814/*
815 * hl_mmu_map - maps a virtual addr to physical addr
816 *
817 * @ctx: pointer to the context structure
818 * @virt_addr: virt addr to map from
819 * @phys_addr: phys addr to map to
820 * @page_size: physical page size
821 *
822 * This function does the following:
823 * - Check that the virt addr is not mapped
824 * - Allocate pgts as necessary in order to map the virt addr to the phys
825 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
826 *
827 * Because this function changes the page tables in the device and because it
828 * changes the MMU hash, it must be protected by a lock.
829 * However, because it maps only a single page, the lock should be implemented
830 * in a higher level in order to protect the entire mapping of the memory area
831 */
832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
833{
834 struct hl_device *hdev = ctx->hdev;
835 u64 real_virt_addr;
836 u32 real_page_size, npages;
837 int i, rc, mapped_cnt = 0;
838
839 if (!hdev->mmu_enable)
840 return 0;
841
842 /*
843 * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
844 * is bigger, we break it to sub-pages and map them separately.
845 */
846 if ((page_size % PAGE_SIZE_2MB) == 0) {
847 real_page_size = PAGE_SIZE_2MB;
848 } else if ((page_size % PAGE_SIZE_4KB) == 0) {
849 real_page_size = PAGE_SIZE_4KB;
850 } else {
851 dev_err(hdev->dev,
852 "page size of %u is not 4KB nor 2MB aligned, can't map\n",
853 page_size);
854
855 return -EFAULT;
856 }
857
858 npages = page_size / real_page_size;
859 real_virt_addr = virt_addr;
860
861 for (i = 0 ; i < npages ; i++) {
862 rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
863 real_page_size);
864 if (rc)
865 goto err;
866
867 real_virt_addr += real_page_size;
868 mapped_cnt++;
869 }
870
871 return 0;
872
873err:
874 real_virt_addr = virt_addr;
875 for (i = 0 ; i < mapped_cnt ; i++) {
876 if (_hl_mmu_unmap(ctx, real_virt_addr))
877 dev_warn_ratelimited(hdev->dev,
878 "failed to unmap va: 0x%llx\n", real_virt_addr);
879
880 real_virt_addr += real_page_size;
881 }
882
883 return rc;
884}
885
886/*
887 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
888 *
889 * @ctx: pointer to the context structure
890 *
891 */
892void hl_mmu_swap_out(struct hl_ctx *ctx)
893{
894
895}
896
897/*
898 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
899 *
900 * @ctx: pointer to the context structure
901 *
902 */
903void hl_mmu_swap_in(struct hl_ctx *ctx)
904{
905
906}
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
new file mode 100644
index 000000000000..c900ab15cceb
--- /dev/null
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -0,0 +1,539 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/pci.h>
11
12#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
13#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
14
15long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
16{
17 struct armcp_packet pkt;
18 long result;
19 int rc;
20
21 memset(&pkt, 0, sizeof(pkt));
22
23 if (curr)
24 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
25 ARMCP_PKT_CTL_OPCODE_SHIFT);
26 else
27 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
28 ARMCP_PKT_CTL_OPCODE_SHIFT);
29 pkt.pll_index = __cpu_to_le32(pll_index);
30
31 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
32 SET_CLK_PKT_TIMEOUT, &result);
33
34 if (rc) {
35 dev_err(hdev->dev,
36 "Failed to get frequency of PLL %d, error %d\n",
37 pll_index, rc);
38 result = rc;
39 }
40
41 return result;
42}
43
44void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
45{
46 struct armcp_packet pkt;
47 int rc;
48
49 memset(&pkt, 0, sizeof(pkt));
50
51 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
52 ARMCP_PKT_CTL_OPCODE_SHIFT);
53 pkt.pll_index = __cpu_to_le32(pll_index);
54 pkt.value = __cpu_to_le64(freq);
55
56 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
57 SET_CLK_PKT_TIMEOUT, NULL);
58
59 if (rc)
60 dev_err(hdev->dev,
61 "Failed to set frequency to PLL %d, error %d\n",
62 pll_index, rc);
63}
64
65u64 hl_get_max_power(struct hl_device *hdev)
66{
67 struct armcp_packet pkt;
68 long result;
69 int rc;
70
71 memset(&pkt, 0, sizeof(pkt));
72
73 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
74 ARMCP_PKT_CTL_OPCODE_SHIFT);
75
76 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
77 SET_PWR_PKT_TIMEOUT, &result);
78
79 if (rc) {
80 dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
81 result = rc;
82 }
83
84 return result;
85}
86
87void hl_set_max_power(struct hl_device *hdev, u64 value)
88{
89 struct armcp_packet pkt;
90 int rc;
91
92 memset(&pkt, 0, sizeof(pkt));
93
94 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
95 ARMCP_PKT_CTL_OPCODE_SHIFT);
96 pkt.value = __cpu_to_le64(value);
97
98 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
99 SET_PWR_PKT_TIMEOUT, NULL);
100
101 if (rc)
102 dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
103}
104
105static ssize_t pm_mng_profile_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct hl_device *hdev = dev_get_drvdata(dev);
109
110 if (hl_device_disabled_or_in_reset(hdev))
111 return -ENODEV;
112
113 return sprintf(buf, "%s\n",
114 (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
115 (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
116 "unknown");
117}
118
119static ssize_t pm_mng_profile_store(struct device *dev,
120 struct device_attribute *attr, const char *buf, size_t count)
121{
122 struct hl_device *hdev = dev_get_drvdata(dev);
123
124 if (hl_device_disabled_or_in_reset(hdev)) {
125 count = -ENODEV;
126 goto out;
127 }
128
129 mutex_lock(&hdev->fd_open_cnt_lock);
130
131 if (atomic_read(&hdev->fd_open_cnt) > 0) {
132 dev_err(hdev->dev,
133 "Can't change PM profile while user process is opened on the device\n");
134 count = -EPERM;
135 goto unlock_mutex;
136 }
137
138 if (strncmp("auto", buf, strlen("auto")) == 0) {
139 /* Make sure we are in LOW PLL when changing modes */
140 if (hdev->pm_mng_profile == PM_MANUAL) {
141 atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
142 hl_device_set_frequency(hdev, PLL_LOW);
143 hdev->pm_mng_profile = PM_AUTO;
144 }
145 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
146 /* Make sure we are in LOW PLL when changing modes */
147 if (hdev->pm_mng_profile == PM_AUTO) {
148 flush_delayed_work(&hdev->work_freq);
149 hdev->pm_mng_profile = PM_MANUAL;
150 }
151 } else {
152 dev_err(hdev->dev, "value should be auto or manual\n");
153 count = -EINVAL;
154 goto unlock_mutex;
155 }
156
157unlock_mutex:
158 mutex_unlock(&hdev->fd_open_cnt_lock);
159out:
160 return count;
161}
162
163static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
164 char *buf)
165{
166 struct hl_device *hdev = dev_get_drvdata(dev);
167
168 if (hl_device_disabled_or_in_reset(hdev))
169 return -ENODEV;
170
171 return sprintf(buf, "%u\n", hdev->high_pll);
172}
173
174static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t count)
176{
177 struct hl_device *hdev = dev_get_drvdata(dev);
178 long value;
179 int rc;
180
181 if (hl_device_disabled_or_in_reset(hdev)) {
182 count = -ENODEV;
183 goto out;
184 }
185
186 rc = kstrtoul(buf, 0, &value);
187
188 if (rc) {
189 count = -EINVAL;
190 goto out;
191 }
192
193 hdev->high_pll = value;
194
195out:
196 return count;
197}
198
199static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
200 char *buf)
201{
202 struct hl_device *hdev = dev_get_drvdata(dev);
203
204 return sprintf(buf, "%s\n", hdev->asic_prop.uboot_ver);
205}
206
207static ssize_t armcp_kernel_ver_show(struct device *dev,
208 struct device_attribute *attr, char *buf)
209{
210 struct hl_device *hdev = dev_get_drvdata(dev);
211
212 return sprintf(buf, "%s", hdev->asic_prop.armcp_info.kernel_version);
213}
214
215static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr,
216 char *buf)
217{
218 struct hl_device *hdev = dev_get_drvdata(dev);
219
220 return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.armcp_version);
221}
222
223static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
224 char *buf)
225{
226 struct hl_device *hdev = dev_get_drvdata(dev);
227
228 return sprintf(buf, "0x%08x\n",
229 hdev->asic_prop.armcp_info.cpld_version);
230}
231
232static ssize_t infineon_ver_show(struct device *dev,
233 struct device_attribute *attr, char *buf)
234{
235 struct hl_device *hdev = dev_get_drvdata(dev);
236
237 return sprintf(buf, "0x%04x\n",
238 hdev->asic_prop.armcp_info.infineon_version);
239}
240
241static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
242 char *buf)
243{
244 struct hl_device *hdev = dev_get_drvdata(dev);
245
246 return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.fuse_version);
247}
248
249static ssize_t thermal_ver_show(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 struct hl_device *hdev = dev_get_drvdata(dev);
253
254 return sprintf(buf, "%s", hdev->asic_prop.armcp_info.thermal_version);
255}
256
257static ssize_t preboot_btl_ver_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 struct hl_device *hdev = dev_get_drvdata(dev);
261
262 return sprintf(buf, "%s\n", hdev->asic_prop.preboot_ver);
263}
264
265static ssize_t soft_reset_store(struct device *dev,
266 struct device_attribute *attr, const char *buf,
267 size_t count)
268{
269 struct hl_device *hdev = dev_get_drvdata(dev);
270 long value;
271 int rc;
272
273 rc = kstrtoul(buf, 0, &value);
274
275 if (rc) {
276 count = -EINVAL;
277 goto out;
278 }
279
280 hl_device_reset(hdev, false, false);
281
282out:
283 return count;
284}
285
286static ssize_t hard_reset_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t count)
289{
290 struct hl_device *hdev = dev_get_drvdata(dev);
291 long value;
292 int rc;
293
294 rc = kstrtoul(buf, 0, &value);
295
296 if (rc) {
297 count = -EINVAL;
298 goto out;
299 }
300
301 hl_device_reset(hdev, true, false);
302
303out:
304 return count;
305}
306
307static ssize_t device_type_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309{
310 struct hl_device *hdev = dev_get_drvdata(dev);
311 char *str;
312
313 switch (hdev->asic_type) {
314 case ASIC_GOYA:
315 str = "GOYA";
316 break;
317 default:
318 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
319 hdev->asic_type);
320 return -EINVAL;
321 }
322
323 return sprintf(buf, "%s\n", str);
324}
325
326static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr,
327 char *buf)
328{
329 struct hl_device *hdev = dev_get_drvdata(dev);
330
331 /* Use dummy, fixed address for simulator */
332 if (!hdev->pdev)
333 return sprintf(buf, "0000:%02d:00.0\n", hdev->id);
334
335 return sprintf(buf, "%04x:%02x:%02x.%x\n",
336 pci_domain_nr(hdev->pdev->bus),
337 hdev->pdev->bus->number,
338 PCI_SLOT(hdev->pdev->devfn),
339 PCI_FUNC(hdev->pdev->devfn));
340}
341
342static ssize_t status_show(struct device *dev, struct device_attribute *attr,
343 char *buf)
344{
345 struct hl_device *hdev = dev_get_drvdata(dev);
346 char *str;
347
348 if (atomic_read(&hdev->in_reset))
349 str = "In reset";
350 else if (hdev->disabled)
351 str = "Malfunction";
352 else
353 str = "Operational";
354
355 return sprintf(buf, "%s\n", str);
356}
357
358static ssize_t write_open_cnt_show(struct device *dev,
359 struct device_attribute *attr, char *buf)
360{
361 struct hl_device *hdev = dev_get_drvdata(dev);
362
363 return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0);
364}
365
366static ssize_t soft_reset_cnt_show(struct device *dev,
367 struct device_attribute *attr, char *buf)
368{
369 struct hl_device *hdev = dev_get_drvdata(dev);
370
371 return sprintf(buf, "%d\n", hdev->soft_reset_cnt);
372}
373
374static ssize_t hard_reset_cnt_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 struct hl_device *hdev = dev_get_drvdata(dev);
378
379 return sprintf(buf, "%d\n", hdev->hard_reset_cnt);
380}
381
382static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
383 char *buf)
384{
385 struct hl_device *hdev = dev_get_drvdata(dev);
386 long val;
387
388 if (hl_device_disabled_or_in_reset(hdev))
389 return -ENODEV;
390
391 val = hl_get_max_power(hdev);
392
393 return sprintf(buf, "%lu\n", val);
394}
395
396static ssize_t max_power_store(struct device *dev,
397 struct device_attribute *attr, const char *buf, size_t count)
398{
399 struct hl_device *hdev = dev_get_drvdata(dev);
400 unsigned long value;
401 int rc;
402
403 if (hl_device_disabled_or_in_reset(hdev)) {
404 count = -ENODEV;
405 goto out;
406 }
407
408 rc = kstrtoul(buf, 0, &value);
409
410 if (rc) {
411 count = -EINVAL;
412 goto out;
413 }
414
415 hdev->max_power = value;
416 hl_set_max_power(hdev, value);
417
418out:
419 return count;
420}
421
422static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
423 struct bin_attribute *attr, char *buf, loff_t offset,
424 size_t max_size)
425{
426 struct device *dev = container_of(kobj, struct device, kobj);
427 struct hl_device *hdev = dev_get_drvdata(dev);
428 char *data;
429 int rc;
430
431 if (!max_size)
432 return -EINVAL;
433
434 data = kzalloc(max_size, GFP_KERNEL);
435 if (!data)
436 return -ENOMEM;
437
438 rc = hdev->asic_funcs->get_eeprom_data(hdev, data, max_size);
439 if (rc)
440 goto out;
441
442 memcpy(buf, data, max_size);
443
444out:
445 kfree(data);
446
447 return max_size;
448}
449
450static DEVICE_ATTR_RO(armcp_kernel_ver);
451static DEVICE_ATTR_RO(armcp_ver);
452static DEVICE_ATTR_RO(cpld_ver);
453static DEVICE_ATTR_RO(device_type);
454static DEVICE_ATTR_RO(fuse_ver);
455static DEVICE_ATTR_WO(hard_reset);
456static DEVICE_ATTR_RO(hard_reset_cnt);
457static DEVICE_ATTR_RW(high_pll);
458static DEVICE_ATTR_RO(infineon_ver);
459static DEVICE_ATTR_RW(max_power);
460static DEVICE_ATTR_RO(pci_addr);
461static DEVICE_ATTR_RW(pm_mng_profile);
462static DEVICE_ATTR_RO(preboot_btl_ver);
463static DEVICE_ATTR_WO(soft_reset);
464static DEVICE_ATTR_RO(soft_reset_cnt);
465static DEVICE_ATTR_RO(status);
466static DEVICE_ATTR_RO(thermal_ver);
467static DEVICE_ATTR_RO(uboot_ver);
468static DEVICE_ATTR_RO(write_open_cnt);
469
470static struct bin_attribute bin_attr_eeprom = {
471 .attr = {.name = "eeprom", .mode = (0444)},
472 .size = PAGE_SIZE,
473 .read = eeprom_read_handler
474};
475
476static struct attribute *hl_dev_attrs[] = {
477 &dev_attr_armcp_kernel_ver.attr,
478 &dev_attr_armcp_ver.attr,
479 &dev_attr_cpld_ver.attr,
480 &dev_attr_device_type.attr,
481 &dev_attr_fuse_ver.attr,
482 &dev_attr_hard_reset.attr,
483 &dev_attr_hard_reset_cnt.attr,
484 &dev_attr_high_pll.attr,
485 &dev_attr_infineon_ver.attr,
486 &dev_attr_max_power.attr,
487 &dev_attr_pci_addr.attr,
488 &dev_attr_pm_mng_profile.attr,
489 &dev_attr_preboot_btl_ver.attr,
490 &dev_attr_soft_reset.attr,
491 &dev_attr_soft_reset_cnt.attr,
492 &dev_attr_status.attr,
493 &dev_attr_thermal_ver.attr,
494 &dev_attr_uboot_ver.attr,
495 &dev_attr_write_open_cnt.attr,
496 NULL,
497};
498
499static struct bin_attribute *hl_dev_bin_attrs[] = {
500 &bin_attr_eeprom,
501 NULL
502};
503
504static struct attribute_group hl_dev_attr_group = {
505 .attrs = hl_dev_attrs,
506 .bin_attrs = hl_dev_bin_attrs,
507};
508
509static struct attribute_group hl_dev_clks_attr_group;
510
511static const struct attribute_group *hl_dev_attr_groups[] = {
512 &hl_dev_attr_group,
513 &hl_dev_clks_attr_group,
514 NULL,
515};
516
517int hl_sysfs_init(struct hl_device *hdev)
518{
519 int rc;
520
521 hdev->pm_mng_profile = PM_AUTO;
522 hdev->max_power = hdev->asic_prop.max_power_default;
523
524 hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
525
526 rc = device_add_groups(hdev->dev, hl_dev_attr_groups);
527 if (rc) {
528 dev_err(hdev->dev,
529 "Failed to add groups to device, error %d\n", rc);
530 return rc;
531 }
532
533 return 0;
534}
535
536void hl_sysfs_fini(struct hl_device *hdev)
537{
538 device_remove_groups(hdev->dev, hl_dev_attr_groups);
539}
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index e9c9ef52c76a..927309b86bab 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -29,6 +29,13 @@ static struct class *ilo_class;
29static unsigned int ilo_major; 29static unsigned int ilo_major;
30static unsigned int max_ccb = 16; 30static unsigned int max_ccb = 16;
31static char ilo_hwdev[MAX_ILO_DEV]; 31static char ilo_hwdev[MAX_ILO_DEV];
32static const struct pci_device_id ilo_blacklist[] = {
33 /* auxiliary iLO */
34 {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
35 /* CL */
36 {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
37 {}
38};
32 39
33static inline int get_entry_id(int entry) 40static inline int get_entry_id(int entry)
34{ 41{
@@ -763,9 +770,10 @@ static int ilo_probe(struct pci_dev *pdev,
763 int devnum, minor, start, error = 0; 770 int devnum, minor, start, error = 0;
764 struct ilo_hwinfo *ilo_hw; 771 struct ilo_hwinfo *ilo_hw;
765 772
766 /* Ignore subsystem_device = 0x1979 (set by BIOS) */ 773 if (pci_match_id(ilo_blacklist, pdev)) {
767 if (pdev->subsystem_device == 0x1979) 774 dev_dbg(&pdev->dev, "Not supported on this device\n");
768 return 0; 775 return -ENODEV;
776 }
769 777
770 if (max_ccb > MAX_CCB) 778 if (max_ccb > MAX_CCB)
771 max_ccb = MAX_CCB; 779 max_ccb = MAX_CCB;
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 81a0541ef3ac..294fb2f66bfe 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -146,6 +146,8 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
146 */ 146 */
147 for (i = 0; i < NUM_MIRRORED_REGS; i++) { 147 for (i = 0; i < NUM_MIRRORED_REGS; i++) {
148 temp = i2c_smbus_read_word_data(client, regs_to_copy[i]); 148 temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
149 if (temp < 0)
150 data->regs[regs_to_copy[i]] = 0;
149 data->regs[regs_to_copy[i]] = temp >> 8; 151 data->regs[regs_to_copy[i]] = temp >> 8;
150 } 152 }
151 153
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 2837dc77478e..b51cf182b031 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -37,16 +37,9 @@
37#include <linux/kprobes.h> 37#include <linux/kprobes.h>
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/hrtimer.h>
42#include <linux/slab.h> 40#include <linux/slab.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/debugfs.h> 41#include <linux/debugfs.h>
45 42
46#ifdef CONFIG_IDE
47#include <linux/ide.h>
48#endif
49
50#define DEFAULT_COUNT 10 43#define DEFAULT_COUNT 10
51 44
52static int lkdtm_debugfs_open(struct inode *inode, struct file *file); 45static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
@@ -102,9 +95,7 @@ static struct crashpoint crashpoints[] = {
102 CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"), 95 CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
103 CRASHPOINT("TIMERADD", "hrtimer_start"), 96 CRASHPOINT("TIMERADD", "hrtimer_start"),
104 CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"), 97 CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
105# ifdef CONFIG_IDE
106 CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"), 98 CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
107# endif
108#endif 99#endif
109}; 100};
110 101
@@ -152,7 +143,9 @@ static const struct crashtype crashtypes[] = {
152 CRASHTYPE(EXEC_VMALLOC), 143 CRASHTYPE(EXEC_VMALLOC),
153 CRASHTYPE(EXEC_RODATA), 144 CRASHTYPE(EXEC_RODATA),
154 CRASHTYPE(EXEC_USERSPACE), 145 CRASHTYPE(EXEC_USERSPACE),
146 CRASHTYPE(EXEC_NULL),
155 CRASHTYPE(ACCESS_USERSPACE), 147 CRASHTYPE(ACCESS_USERSPACE),
148 CRASHTYPE(ACCESS_NULL),
156 CRASHTYPE(WRITE_RO), 149 CRASHTYPE(WRITE_RO),
157 CRASHTYPE(WRITE_RO_AFTER_INIT), 150 CRASHTYPE(WRITE_RO_AFTER_INIT),
158 CRASHTYPE(WRITE_KERN), 151 CRASHTYPE(WRITE_KERN),
@@ -347,9 +340,9 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
347 if (buf == NULL) 340 if (buf == NULL)
348 return -ENOMEM; 341 return -ENOMEM;
349 342
350 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); 343 n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
351 for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { 344 for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
352 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", 345 n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
353 crashtypes[i].name); 346 crashtypes[i].name);
354 } 347 }
355 buf[n] = '\0'; 348 buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 3c6fd327e166..b69ee004a3f7 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
45void lkdtm_EXEC_VMALLOC(void); 45void lkdtm_EXEC_VMALLOC(void);
46void lkdtm_EXEC_RODATA(void); 46void lkdtm_EXEC_RODATA(void);
47void lkdtm_EXEC_USERSPACE(void); 47void lkdtm_EXEC_USERSPACE(void);
48void lkdtm_EXEC_NULL(void);
48void lkdtm_ACCESS_USERSPACE(void); 49void lkdtm_ACCESS_USERSPACE(void);
50void lkdtm_ACCESS_NULL(void);
49 51
50/* lkdtm_refcount.c */ 52/* lkdtm_refcount.c */
51void lkdtm_REFCOUNT_INC_OVERFLOW(void); 53void lkdtm_REFCOUNT_INC_OVERFLOW(void);
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 53b85c9d16b8..62f76d506f04 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
47{ 47{
48 void (*func)(void) = dst; 48 void (*func)(void) = dst;
49 49
50 pr_info("attempting ok execution at %p\n", do_nothing); 50 pr_info("attempting ok execution at %px\n", do_nothing);
51 do_nothing(); 51 do_nothing();
52 52
53 if (write == CODE_WRITE) { 53 if (write == CODE_WRITE) {
@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
55 flush_icache_range((unsigned long)dst, 55 flush_icache_range((unsigned long)dst,
56 (unsigned long)dst + EXEC_SIZE); 56 (unsigned long)dst + EXEC_SIZE);
57 } 57 }
58 pr_info("attempting bad execution at %p\n", func); 58 pr_info("attempting bad execution at %px\n", func);
59 func(); 59 func();
60} 60}
61 61
@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
66 /* Intentionally crossing kernel/user memory boundary. */ 66 /* Intentionally crossing kernel/user memory boundary. */
67 void (*func)(void) = dst; 67 void (*func)(void) = dst;
68 68
69 pr_info("attempting ok execution at %p\n", do_nothing); 69 pr_info("attempting ok execution at %px\n", do_nothing);
70 do_nothing(); 70 do_nothing();
71 71
72 copied = access_process_vm(current, (unsigned long)dst, do_nothing, 72 copied = access_process_vm(current, (unsigned long)dst, do_nothing,
73 EXEC_SIZE, FOLL_WRITE); 73 EXEC_SIZE, FOLL_WRITE);
74 if (copied < EXEC_SIZE) 74 if (copied < EXEC_SIZE)
75 return; 75 return;
76 pr_info("attempting bad execution at %p\n", func); 76 pr_info("attempting bad execution at %px\n", func);
77 func(); 77 func();
78} 78}
79 79
@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
82 /* Explicitly cast away "const" for the test. */ 82 /* Explicitly cast away "const" for the test. */
83 unsigned long *ptr = (unsigned long *)&rodata; 83 unsigned long *ptr = (unsigned long *)&rodata;
84 84
85 pr_info("attempting bad rodata write at %p\n", ptr); 85 pr_info("attempting bad rodata write at %px\n", ptr);
86 *ptr ^= 0xabcd1234; 86 *ptr ^= 0xabcd1234;
87} 87}
88 88
@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
100 return; 100 return;
101 } 101 }
102 102
103 pr_info("attempting bad ro_after_init write at %p\n", ptr); 103 pr_info("attempting bad ro_after_init write at %px\n", ptr);
104 *ptr ^= 0xabcd1234; 104 *ptr ^= 0xabcd1234;
105} 105}
106 106
@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
112 size = (unsigned long)do_overwritten - (unsigned long)do_nothing; 112 size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
113 ptr = (unsigned char *)do_overwritten; 113 ptr = (unsigned char *)do_overwritten;
114 114
115 pr_info("attempting bad %zu byte write at %p\n", size, ptr); 115 pr_info("attempting bad %zu byte write at %px\n", size, ptr);
116 memcpy(ptr, (unsigned char *)do_nothing, size); 116 memcpy(ptr, (unsigned char *)do_nothing, size);
117 flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); 117 flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
118 118
@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
164 vm_munmap(user_addr, PAGE_SIZE); 164 vm_munmap(user_addr, PAGE_SIZE);
165} 165}
166 166
167void lkdtm_EXEC_NULL(void)
168{
169 execute_location(NULL, CODE_AS_IS);
170}
171
167void lkdtm_ACCESS_USERSPACE(void) 172void lkdtm_ACCESS_USERSPACE(void)
168{ 173{
169 unsigned long user_addr, tmp = 0; 174 unsigned long user_addr, tmp = 0;
@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
185 190
186 ptr = (unsigned long *)user_addr; 191 ptr = (unsigned long *)user_addr;
187 192
188 pr_info("attempting bad read at %p\n", ptr); 193 pr_info("attempting bad read at %px\n", ptr);
189 tmp = *ptr; 194 tmp = *ptr;
190 tmp += 0xc0dec0de; 195 tmp += 0xc0dec0de;
191 196
192 pr_info("attempting bad write at %p\n", ptr); 197 pr_info("attempting bad write at %px\n", ptr);
193 *ptr = tmp; 198 *ptr = tmp;
194 199
195 vm_munmap(user_addr, PAGE_SIZE); 200 vm_munmap(user_addr, PAGE_SIZE);
196} 201}
197 202
203void lkdtm_ACCESS_NULL(void)
204{
205 unsigned long tmp;
206 unsigned long *ptr = (unsigned long *)NULL;
207
208 pr_info("attempting bad read at %px\n", ptr);
209 tmp = *ptr;
210 tmp += 0xc0dec0de;
211
212 pr_info("attempting bad write at %px\n", ptr);
213 *ptr = tmp;
214}
215
198void __init lkdtm_perms_init(void) 216void __init lkdtm_perms_init(void)
199{ 217{
200 /* Make sure we can write to __ro_after_init values during __init */ 218 /* Make sure we can write to __ro_after_init values during __init */
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c49e1d2269af..74e2c667dce0 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -43,3 +43,13 @@ config INTEL_MEI_TXE
43 43
44 Supported SoCs: 44 Supported SoCs:
45 Intel Bay Trail 45 Intel Bay Trail
46
47config INTEL_MEI_HDCP
48 tristate "Intel HDCP2.2 services of ME Interface"
49 select INTEL_MEI_ME
50 depends on DRM_I915
51 help
52 MEI Support for HDCP2.2 Services on Intel platforms.
53
54 Enables the ME FW services required for HDCP2.2 support through
55 I915 display driver of Intel.
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index d9215fc4e499..8c2d9565a4cb 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -24,3 +24,5 @@ mei-txe-objs += hw-txe.o
24 24
25mei-$(CONFIG_EVENT_TRACING) += mei-trace.o 25mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
26CFLAGS_mei-trace.o = -I$(src) 26CFLAGS_mei-trace.o = -I$(src)
27
28obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 80215c312f0e..5fcac02233af 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -40,6 +40,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
40#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \ 40#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
41 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb) 41 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
42 42
43#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \
44 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
45
43#define MEI_UUID_ANY NULL_UUID_LE 46#define MEI_UUID_ANY NULL_UUID_LE
44 47
45/** 48/**
@@ -71,6 +74,18 @@ static void blacklist(struct mei_cl_device *cldev)
71 cldev->do_match = 0; 74 cldev->do_match = 0;
72} 75}
73 76
77/**
78 * whitelist - forcefully whitelist client
79 *
80 * @cldev: me clients device
81 */
82static void whitelist(struct mei_cl_device *cldev)
83{
84 dev_dbg(&cldev->dev, "running hook %s\n", __func__);
85
86 cldev->do_match = 1;
87}
88
74#define OSTYPE_LINUX 2 89#define OSTYPE_LINUX 2
75struct mei_os_ver { 90struct mei_os_ver {
76 __le16 build; 91 __le16 build;
@@ -472,6 +487,7 @@ static struct mei_fixup {
472 MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), 487 MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
473 MEI_FIXUP(MEI_UUID_WD, mei_wd), 488 MEI_FIXUP(MEI_UUID_WD, mei_wd),
474 MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), 489 MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
490 MEI_FIXUP(MEI_UUID_HDCP, whitelist),
475}; 491};
476 492
477/** 493/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index fc3872fe7b25..65bec998eb6e 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -28,7 +28,6 @@
28#include "client.h" 28#include "client.h"
29 29
30#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) 30#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
31#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
32 31
33/** 32/**
34 * __mei_cl_send - internal client send (write) 33 * __mei_cl_send - internal client send (write)
@@ -541,17 +540,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
541 goto out; 540 goto out;
542 } 541 }
543 542
544 if (!mei_cl_bus_module_get(cldev)) {
545 dev_err(&cldev->dev, "get hw module failed");
546 ret = -ENODEV;
547 goto out;
548 }
549
550 ret = mei_cl_connect(cl, cldev->me_cl, NULL); 543 ret = mei_cl_connect(cl, cldev->me_cl, NULL);
551 if (ret < 0) { 544 if (ret < 0)
552 dev_err(&cldev->dev, "cannot connect\n"); 545 dev_err(&cldev->dev, "cannot connect\n");
553 mei_cl_bus_module_put(cldev);
554 }
555 546
556out: 547out:
557 mutex_unlock(&bus->device_lock); 548 mutex_unlock(&bus->device_lock);
@@ -614,7 +605,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
614 if (err < 0) 605 if (err < 0)
615 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 606 dev_err(bus->dev, "Could not disconnect from the ME client\n");
616 607
617 mei_cl_bus_module_put(cldev);
618out: 608out:
619 /* Flush queues and remove any pending read */ 609 /* Flush queues and remove any pending read */
620 mei_cl_flush_queues(cl, NULL); 610 mei_cl_flush_queues(cl, NULL);
@@ -725,9 +715,16 @@ static int mei_cl_device_probe(struct device *dev)
725 if (!id) 715 if (!id)
726 return -ENODEV; 716 return -ENODEV;
727 717
718 if (!mei_cl_bus_module_get(cldev)) {
719 dev_err(&cldev->dev, "get hw module failed");
720 return -ENODEV;
721 }
722
728 ret = cldrv->probe(cldev, id); 723 ret = cldrv->probe(cldev, id);
729 if (ret) 724 if (ret) {
725 mei_cl_bus_module_put(cldev);
730 return ret; 726 return ret;
727 }
731 728
732 __module_get(THIS_MODULE); 729 __module_get(THIS_MODULE);
733 return 0; 730 return 0;
@@ -755,6 +752,7 @@ static int mei_cl_device_remove(struct device *dev)
755 752
756 mei_cldev_unregister_callbacks(cldev); 753 mei_cldev_unregister_callbacks(cldev);
757 754
755 mei_cl_bus_module_put(cldev);
758 module_put(THIS_MODULE); 756 module_put(THIS_MODULE);
759 dev->driver = NULL; 757 dev->driver = NULL;
760 return ret; 758 return ret;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 8f7616557c97..e6207f614816 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1029,29 +1029,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
1029 dev->version.minor_version >= HBM_MINOR_VERSION_PGI) 1029 dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
1030 dev->hbm_f_pg_supported = 1; 1030 dev->hbm_f_pg_supported = 1;
1031 1031
1032 dev->hbm_f_dc_supported = 0;
1032 if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) 1033 if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
1033 dev->hbm_f_dc_supported = 1; 1034 dev->hbm_f_dc_supported = 1;
1034 1035
1036 dev->hbm_f_ie_supported = 0;
1035 if (dev->version.major_version >= HBM_MAJOR_VERSION_IE) 1037 if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
1036 dev->hbm_f_ie_supported = 1; 1038 dev->hbm_f_ie_supported = 1;
1037 1039
1038 /* disconnect on connect timeout instead of link reset */ 1040 /* disconnect on connect timeout instead of link reset */
1041 dev->hbm_f_dot_supported = 0;
1039 if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) 1042 if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
1040 dev->hbm_f_dot_supported = 1; 1043 dev->hbm_f_dot_supported = 1;
1041 1044
1042 /* Notification Event Support */ 1045 /* Notification Event Support */
1046 dev->hbm_f_ev_supported = 0;
1043 if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) 1047 if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
1044 dev->hbm_f_ev_supported = 1; 1048 dev->hbm_f_ev_supported = 1;
1045 1049
1046 /* Fixed Address Client Support */ 1050 /* Fixed Address Client Support */
1051 dev->hbm_f_fa_supported = 0;
1047 if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) 1052 if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
1048 dev->hbm_f_fa_supported = 1; 1053 dev->hbm_f_fa_supported = 1;
1049 1054
1050 /* OS ver message Support */ 1055 /* OS ver message Support */
1056 dev->hbm_f_os_supported = 0;
1051 if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) 1057 if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
1052 dev->hbm_f_os_supported = 1; 1058 dev->hbm_f_os_supported = 1;
1053 1059
1054 /* DMA Ring Support */ 1060 /* DMA Ring Support */
1061 dev->hbm_f_dr_supported = 0;
1055 if (dev->version.major_version > HBM_MAJOR_VERSION_DR || 1062 if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
1056 (dev->version.major_version == HBM_MAJOR_VERSION_DR && 1063 (dev->version.major_version == HBM_MAJOR_VERSION_DR &&
1057 dev->version.minor_version >= HBM_MINOR_VERSION_DR)) 1064 dev->version.minor_version >= HBM_MINOR_VERSION_DR))
diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile
new file mode 100644
index 000000000000..adbe7506282d
--- /dev/null
+++ b/drivers/misc/mei/hdcp/Makefile
@@ -0,0 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Copyright (c) 2019, Intel Corporation.
4#
5# Makefile - HDCP client driver for Intel MEI Bus Driver.
6
7obj-$(CONFIG_INTEL_MEI_HDCP) += mei_hdcp.o
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
new file mode 100644
index 000000000000..90b6ae8e9dae
--- /dev/null
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -0,0 +1,849 @@
1// SPDX-License-Identifier: (GPL-2.0)
2/*
3 * Copyright © 2019 Intel Corporation
4 *
5 * Mei_hdcp.c: HDCP client driver for mei bus
6 *
7 * Author:
8 * Ramalingam C <ramalingam.c@intel.com>
9 */
10
11/**
12 * DOC: MEI_HDCP Client Driver
13 *
14 * This is a client driver to the mei_bus to make the HDCP2.2 services of
15 * ME FW available for the interested consumers like I915.
16 *
17 * This module will act as a translation layer between HDCP protocol
18 * implementor(I915) and ME FW by translating HDCP2.2 authentication
19 * messages to ME FW command payloads and vice versa.
20 */
21
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/uuid.h>
25#include <linux/mei_cl_bus.h>
26#include <linux/component.h>
27#include <drm/drm_connector.h>
28#include <drm/i915_component.h>
29#include <drm/i915_mei_hdcp_interface.h>
30
31#include "mei_hdcp.h"
32
33static inline u8 mei_get_ddi_index(enum port port)
34{
35 switch (port) {
36 case PORT_A:
37 return MEI_DDI_A;
38 case PORT_B ... PORT_F:
39 return (u8)port;
40 default:
41 return MEI_DDI_INVALID_PORT;
42 }
43}
44
45/**
46 * mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
47 * @dev: device corresponding to the mei_cl_device
48 * @data: Intel HW specific hdcp data
49 * @ake_data: AKE_Init msg output.
50 *
51 * Return: 0 on Success, <0 on Failure.
52 */
53static int
54mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
55 struct hdcp2_ake_init *ake_data)
56{
57 struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
58 struct wired_cmd_initiate_hdcp2_session_out
59 session_init_out = { { 0 } };
60 struct mei_cl_device *cldev;
61 ssize_t byte;
62
63 if (!dev || !data || !ake_data)
64 return -EINVAL;
65
66 cldev = to_mei_cl_device(dev);
67
68 session_init_in.header.api_version = HDCP_API_VERSION;
69 session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
70 session_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
71 session_init_in.header.buffer_len =
72 WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
73
74 session_init_in.port.integrated_port_type = data->port_type;
75 session_init_in.port.physical_port = mei_get_ddi_index(data->port);
76 session_init_in.protocol = data->protocol;
77
78 byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
79 sizeof(session_init_in));
80 if (byte < 0) {
81 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
82 return byte;
83 }
84
85 byte = mei_cldev_recv(cldev, (u8 *)&session_init_out,
86 sizeof(session_init_out));
87 if (byte < 0) {
88 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
89 return byte;
90 }
91
92 if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
93 dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
94 WIRED_INITIATE_HDCP2_SESSION,
95 session_init_out.header.status);
96 return -EIO;
97 }
98
99 ake_data->msg_id = HDCP_2_2_AKE_INIT;
100 ake_data->tx_caps = session_init_out.tx_caps;
101 memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
102
103 return 0;
104}
105
106/**
107 * mei_hdcp_verify_receiver_cert_prepare_km() - Verify the Receiver Certificate
108 * AKE_Send_Cert and prepare AKE_Stored_Km/AKE_No_Stored_Km
109 * @dev: device corresponding to the mei_cl_device
110 * @data: Intel HW specific hdcp data
111 * @rx_cert: AKE_Send_Cert for verification
112 * @km_stored: Pairing status flag output
113 * @ek_pub_km: AKE_Stored_Km/AKE_No_Stored_Km output msg
114 * @msg_sz : size of AKE_XXXXX_Km output msg
115 *
116 * Return: 0 on Success, <0 on Failure
117 */
118static int
119mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
120 struct hdcp_port_data *data,
121 struct hdcp2_ake_send_cert *rx_cert,
122 bool *km_stored,
123 struct hdcp2_ake_no_stored_km
124 *ek_pub_km,
125 size_t *msg_sz)
126{
127 struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
128 struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
129 struct mei_cl_device *cldev;
130 ssize_t byte;
131
132 if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
133 return -EINVAL;
134
135 cldev = to_mei_cl_device(dev);
136
137 verify_rxcert_in.header.api_version = HDCP_API_VERSION;
138 verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
139 verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS;
140 verify_rxcert_in.header.buffer_len =
141 WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
142
143 verify_rxcert_in.port.integrated_port_type = data->port_type;
144 verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port);
145
146 verify_rxcert_in.cert_rx = rx_cert->cert_rx;
147 memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
148 memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
149
150 byte = mei_cldev_send(cldev, (u8 *)&verify_rxcert_in,
151 sizeof(verify_rxcert_in));
152 if (byte < 0) {
153 dev_dbg(dev, "mei_cldev_send failed: %zd\n", byte);
154 return byte;
155 }
156
157 byte = mei_cldev_recv(cldev, (u8 *)&verify_rxcert_out,
158 sizeof(verify_rxcert_out));
159 if (byte < 0) {
160 dev_dbg(dev, "mei_cldev_recv failed: %zd\n", byte);
161 return byte;
162 }
163
164 if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) {
165 dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
166 WIRED_VERIFY_RECEIVER_CERT,
167 verify_rxcert_out.header.status);
168 return -EIO;
169 }
170
171 *km_stored = !!verify_rxcert_out.km_stored;
172 if (verify_rxcert_out.km_stored) {
173 ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
174 *msg_sz = sizeof(struct hdcp2_ake_stored_km);
175 } else {
176 ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
177 *msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
178 }
179
180 memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
181 sizeof(verify_rxcert_out.ekm_buff));
182
183 return 0;
184}
185
186/**
187 * mei_hdcp_verify_hprime() - Verify AKE_Send_H_prime at ME FW.
188 * @dev: device corresponding to the mei_cl_device
189 * @data: Intel HW specific hdcp data
190 * @rx_hprime: AKE_Send_H_prime msg for ME FW verification
191 *
192 * Return: 0 on Success, <0 on Failure
193 */
194static int
195mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
196 struct hdcp2_ake_send_hprime *rx_hprime)
197{
198 struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
199 struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
200 struct mei_cl_device *cldev;
201 ssize_t byte;
202
203 if (!dev || !data || !rx_hprime)
204 return -EINVAL;
205
206 cldev = to_mei_cl_device(dev);
207
208 send_hprime_in.header.api_version = HDCP_API_VERSION;
209 send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
210 send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
211 send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
212
213 send_hprime_in.port.integrated_port_type = data->port_type;
214 send_hprime_in.port.physical_port = mei_get_ddi_index(data->port);
215
216 memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
217 HDCP_2_2_H_PRIME_LEN);
218
219 byte = mei_cldev_send(cldev, (u8 *)&send_hprime_in,
220 sizeof(send_hprime_in));
221 if (byte < 0) {
222 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
223 return byte;
224 }
225
226 byte = mei_cldev_recv(cldev, (u8 *)&send_hprime_out,
227 sizeof(send_hprime_out));
228 if (byte < 0) {
229 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
230 return byte;
231 }
232
233 if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
234 dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
235 WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
236 return -EIO;
237 }
238
239 return 0;
240}
241
242/**
243 * mei_hdcp_store_pairing_info() - Store pairing info received at ME FW
244 * @dev: device corresponding to the mei_cl_device
245 * @data: Intel HW specific hdcp data
246 * @pairing_info: AKE_Send_Pairing_Info msg input to ME FW
247 *
248 * Return: 0 on Success, <0 on Failure
249 */
250static int
251mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
252 struct hdcp2_ake_send_pairing_info *pairing_info)
253{
254 struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
255 struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
256 struct mei_cl_device *cldev;
257 ssize_t byte;
258
259 if (!dev || !data || !pairing_info)
260 return -EINVAL;
261
262 cldev = to_mei_cl_device(dev);
263
264 pairing_info_in.header.api_version = HDCP_API_VERSION;
265 pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
266 pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS;
267 pairing_info_in.header.buffer_len =
268 WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
269
270 pairing_info_in.port.integrated_port_type = data->port_type;
271 pairing_info_in.port.physical_port = mei_get_ddi_index(data->port);
272
273 memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
274 HDCP_2_2_E_KH_KM_LEN);
275
276 byte = mei_cldev_send(cldev, (u8 *)&pairing_info_in,
277 sizeof(pairing_info_in));
278 if (byte < 0) {
279 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
280 return byte;
281 }
282
283 byte = mei_cldev_recv(cldev, (u8 *)&pairing_info_out,
284 sizeof(pairing_info_out));
285 if (byte < 0) {
286 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
287 return byte;
288 }
289
290 if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) {
291 dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n",
292 WIRED_AKE_SEND_PAIRING_INFO,
293 pairing_info_out.header.status);
294 return -EIO;
295 }
296
297 return 0;
298}
299
300/**
301 * mei_hdcp_initiate_locality_check() - Prepare LC_Init
302 * @dev: device corresponding to the mei_cl_device
303 * @data: Intel HW specific hdcp data
304 * @lc_init_data: LC_Init msg output
305 *
306 * Return: 0 on Success, <0 on Failure
307 */
308static int
309mei_hdcp_initiate_locality_check(struct device *dev,
310 struct hdcp_port_data *data,
311 struct hdcp2_lc_init *lc_init_data)
312{
313 struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
314 struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
315 struct mei_cl_device *cldev;
316 ssize_t byte;
317
318 if (!dev || !data || !lc_init_data)
319 return -EINVAL;
320
321 cldev = to_mei_cl_device(dev);
322
323 lc_init_in.header.api_version = HDCP_API_VERSION;
324 lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
325 lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
326 lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
327
328 lc_init_in.port.integrated_port_type = data->port_type;
329 lc_init_in.port.physical_port = mei_get_ddi_index(data->port);
330
331 byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
332 if (byte < 0) {
333 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
334 return byte;
335 }
336
337 byte = mei_cldev_recv(cldev, (u8 *)&lc_init_out, sizeof(lc_init_out));
338 if (byte < 0) {
339 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
340 return byte;
341 }
342
343 if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
344 dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n",
345 WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
346 return -EIO;
347 }
348
349 lc_init_data->msg_id = HDCP_2_2_LC_INIT;
350 memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
351
352 return 0;
353}
354
355/**
356 * mei_hdcp_verify_lprime() - Verify lprime.
357 * @dev: device corresponding to the mei_cl_device
358 * @data: Intel HW specific hdcp data
359 * @rx_lprime: LC_Send_L_prime msg for ME FW verification
360 *
361 * Return: 0 on Success, <0 on Failure
362 */
363static int
364mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
365 struct hdcp2_lc_send_lprime *rx_lprime)
366{
367 struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
368 struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
369 struct mei_cl_device *cldev;
370 ssize_t byte;
371
372 if (!dev || !data || !rx_lprime)
373 return -EINVAL;
374
375 cldev = to_mei_cl_device(dev);
376
377 verify_lprime_in.header.api_version = HDCP_API_VERSION;
378 verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
379 verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
380 verify_lprime_in.header.buffer_len =
381 WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
382
383 verify_lprime_in.port.integrated_port_type = data->port_type;
384 verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port);
385
386 memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
387 HDCP_2_2_L_PRIME_LEN);
388
389 byte = mei_cldev_send(cldev, (u8 *)&verify_lprime_in,
390 sizeof(verify_lprime_in));
391 if (byte < 0) {
392 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
393 return byte;
394 }
395
396 byte = mei_cldev_recv(cldev, (u8 *)&verify_lprime_out,
397 sizeof(verify_lprime_out));
398 if (byte < 0) {
399 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
400 return byte;
401 }
402
403 if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
404 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
405 WIRED_VALIDATE_LOCALITY,
406 verify_lprime_out.header.status);
407 return -EIO;
408 }
409
410 return 0;
411}
412
413/**
414 * mei_hdcp_get_session_key() - Prepare SKE_Send_Eks.
415 * @dev: device corresponding to the mei_cl_device
416 * @data: Intel HW specific hdcp data
417 * @ske_data: SKE_Send_Eks msg output from ME FW.
418 *
419 * Return: 0 on Success, <0 on Failure
420 */
421static int mei_hdcp_get_session_key(struct device *dev,
422 struct hdcp_port_data *data,
423 struct hdcp2_ske_send_eks *ske_data)
424{
425 struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
426 struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
427 struct mei_cl_device *cldev;
428 ssize_t byte;
429
430 if (!dev || !data || !ske_data)
431 return -EINVAL;
432
433 cldev = to_mei_cl_device(dev);
434
435 get_skey_in.header.api_version = HDCP_API_VERSION;
436 get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
437 get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS;
438 get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
439
440 get_skey_in.port.integrated_port_type = data->port_type;
441 get_skey_in.port.physical_port = mei_get_ddi_index(data->port);
442
443 byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
444 if (byte < 0) {
445 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
446 return byte;
447 }
448
449 byte = mei_cldev_recv(cldev, (u8 *)&get_skey_out, sizeof(get_skey_out));
450
451 if (byte < 0) {
452 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
453 return byte;
454 }
455
456 if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) {
457 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
458 WIRED_GET_SESSION_KEY, get_skey_out.header.status);
459 return -EIO;
460 }
461
462 ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
463 memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
464 HDCP_2_2_E_DKEY_KS_LEN);
465 memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
466
467 return 0;
468}
469
470/**
471 * mei_hdcp_repeater_check_flow_prepare_ack() - Validate the Downstream topology
472 * and prepare rep_ack.
473 * @dev: device corresponding to the mei_cl_device
474 * @data: Intel HW specific hdcp data
475 * @rep_topology: Receiver ID List to be validated
476 * @rep_send_ack : repeater ack from ME FW.
477 *
478 * Return: 0 on Success, <0 on Failure
479 */
480static int
481mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
482 struct hdcp_port_data *data,
483 struct hdcp2_rep_send_receiverid_list
484 *rep_topology,
485 struct hdcp2_rep_send_ack
486 *rep_send_ack)
487{
488 struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
489 struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
490 struct mei_cl_device *cldev;
491 ssize_t byte;
492
493 if (!dev || !rep_topology || !rep_send_ack || !data)
494 return -EINVAL;
495
496 cldev = to_mei_cl_device(dev);
497
498 verify_repeater_in.header.api_version = HDCP_API_VERSION;
499 verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
500 verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS;
501 verify_repeater_in.header.buffer_len =
502 WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
503
504 verify_repeater_in.port.integrated_port_type = data->port_type;
505 verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port);
506
507 memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
508 HDCP_2_2_RXINFO_LEN);
509 memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
510 HDCP_2_2_SEQ_NUM_LEN);
511 memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
512 HDCP_2_2_V_PRIME_HALF_LEN);
513 memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
514 HDCP_2_2_RECEIVER_IDS_MAX_LEN);
515
516 byte = mei_cldev_send(cldev, (u8 *)&verify_repeater_in,
517 sizeof(verify_repeater_in));
518 if (byte < 0) {
519 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
520 return byte;
521 }
522
523 byte = mei_cldev_recv(cldev, (u8 *)&verify_repeater_out,
524 sizeof(verify_repeater_out));
525 if (byte < 0) {
526 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
527 return byte;
528 }
529
530 if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) {
531 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
532 WIRED_VERIFY_REPEATER,
533 verify_repeater_out.header.status);
534 return -EIO;
535 }
536
537 memcpy(rep_send_ack->v, verify_repeater_out.v,
538 HDCP_2_2_V_PRIME_HALF_LEN);
539 rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
540
541 return 0;
542}
543
544/**
545 * mei_hdcp_verify_mprime() - Verify mprime.
546 * @dev: device corresponding to the mei_cl_device
547 * @data: Intel HW specific hdcp data
548 * @stream_ready: RepeaterAuth_Stream_Ready msg for ME FW verification.
549 *
550 * Return: 0 on Success, <0 on Failure
551 */
552static int mei_hdcp_verify_mprime(struct device *dev,
553 struct hdcp_port_data *data,
554 struct hdcp2_rep_stream_ready *stream_ready)
555{
556 struct wired_cmd_repeater_auth_stream_req_in
557 verify_mprime_in = { { 0 } };
558 struct wired_cmd_repeater_auth_stream_req_out
559 verify_mprime_out = { { 0 } };
560 struct mei_cl_device *cldev;
561 ssize_t byte;
562
563 if (!dev || !stream_ready || !data)
564 return -EINVAL;
565
566 cldev = to_mei_cl_device(dev);
567
568 verify_mprime_in.header.api_version = HDCP_API_VERSION;
569 verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
570 verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
571 verify_mprime_in.header.buffer_len =
572 WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
573
574 verify_mprime_in.port.integrated_port_type = data->port_type;
575 verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port);
576
577 memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
578 HDCP_2_2_MPRIME_LEN);
579 drm_hdcp2_u32_to_seq_num(verify_mprime_in.seq_num_m, data->seq_num_m);
580 memcpy(verify_mprime_in.streams, data->streams,
581 (data->k * sizeof(struct hdcp2_streamid_type)));
582
583 verify_mprime_in.k = cpu_to_be16(data->k);
584
585 byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in,
586 sizeof(verify_mprime_in));
587 if (byte < 0) {
588 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
589 return byte;
590 }
591
592 byte = mei_cldev_recv(cldev, (u8 *)&verify_mprime_out,
593 sizeof(verify_mprime_out));
594 if (byte < 0) {
595 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
596 return byte;
597 }
598
599 if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
600 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
601 WIRED_REPEATER_AUTH_STREAM_REQ,
602 verify_mprime_out.header.status);
603 return -EIO;
604 }
605
606 return 0;
607}
608
609/**
610 * mei_hdcp_enable_authentication() - Mark a port as authenticated
611 * through ME FW
612 * @dev: device corresponding to the mei_cl_device
613 * @data: Intel HW specific hdcp data
614 *
615 * Return: 0 on Success, <0 on Failure
616 */
617static int mei_hdcp_enable_authentication(struct device *dev,
618 struct hdcp_port_data *data)
619{
620 struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
621 struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
622 struct mei_cl_device *cldev;
623 ssize_t byte;
624
625 if (!dev || !data)
626 return -EINVAL;
627
628 cldev = to_mei_cl_device(dev);
629
630 enable_auth_in.header.api_version = HDCP_API_VERSION;
631 enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
632 enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS;
633 enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
634
635 enable_auth_in.port.integrated_port_type = data->port_type;
636 enable_auth_in.port.physical_port = mei_get_ddi_index(data->port);
637 enable_auth_in.stream_type = data->streams[0].stream_type;
638
639 byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
640 sizeof(enable_auth_in));
641 if (byte < 0) {
642 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
643 return byte;
644 }
645
646 byte = mei_cldev_recv(cldev, (u8 *)&enable_auth_out,
647 sizeof(enable_auth_out));
648 if (byte < 0) {
649 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
650 return byte;
651 }
652
653 if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) {
654 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
655 WIRED_ENABLE_AUTH, enable_auth_out.header.status);
656 return -EIO;
657 }
658
659 return 0;
660}
661
662/**
663 * mei_hdcp_close_session() - Close the Wired HDCP Tx session of ME FW per port.
664 * This also disables the authenticated state of the port.
665 * @dev: device corresponding to the mei_cl_device
666 * @data: Intel HW specific hdcp data
667 *
668 * Return: 0 on Success, <0 on Failure
669 */
670static int
671mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
672{
673 struct wired_cmd_close_session_in session_close_in = { { 0 } };
674 struct wired_cmd_close_session_out session_close_out = { { 0 } };
675 struct mei_cl_device *cldev;
676 ssize_t byte;
677
678 if (!dev || !data)
679 return -EINVAL;
680
681 cldev = to_mei_cl_device(dev);
682
683 session_close_in.header.api_version = HDCP_API_VERSION;
684 session_close_in.header.command_id = WIRED_CLOSE_SESSION;
685 session_close_in.header.status = ME_HDCP_STATUS_SUCCESS;
686 session_close_in.header.buffer_len =
687 WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
688
689 session_close_in.port.integrated_port_type = data->port_type;
690 session_close_in.port.physical_port = mei_get_ddi_index(data->port);
691
692 byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
693 sizeof(session_close_in));
694 if (byte < 0) {
695 dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
696 return byte;
697 }
698
699 byte = mei_cldev_recv(cldev, (u8 *)&session_close_out,
700 sizeof(session_close_out));
701 if (byte < 0) {
702 dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
703 return byte;
704 }
705
706 if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) {
707 dev_dbg(dev, "Session Close Failed. status: 0x%X\n",
708 session_close_out.header.status);
709 return -EIO;
710 }
711
712 return 0;
713}
714
715static const struct i915_hdcp_component_ops mei_hdcp_ops = {
716 .owner = THIS_MODULE,
717 .initiate_hdcp2_session = mei_hdcp_initiate_session,
718 .verify_receiver_cert_prepare_km =
719 mei_hdcp_verify_receiver_cert_prepare_km,
720 .verify_hprime = mei_hdcp_verify_hprime,
721 .store_pairing_info = mei_hdcp_store_pairing_info,
722 .initiate_locality_check = mei_hdcp_initiate_locality_check,
723 .verify_lprime = mei_hdcp_verify_lprime,
724 .get_session_key = mei_hdcp_get_session_key,
725 .repeater_check_flow_prepare_ack =
726 mei_hdcp_repeater_check_flow_prepare_ack,
727 .verify_mprime = mei_hdcp_verify_mprime,
728 .enable_hdcp_authentication = mei_hdcp_enable_authentication,
729 .close_hdcp_session = mei_hdcp_close_session,
730};
731
732static int mei_component_master_bind(struct device *dev)
733{
734 struct mei_cl_device *cldev = to_mei_cl_device(dev);
735 struct i915_hdcp_comp_master *comp_master =
736 mei_cldev_get_drvdata(cldev);
737 int ret;
738
739 dev_dbg(dev, "%s\n", __func__);
740 comp_master->ops = &mei_hdcp_ops;
741 comp_master->mei_dev = dev;
742 ret = component_bind_all(dev, comp_master);
743 if (ret < 0)
744 return ret;
745
746 return 0;
747}
748
749static void mei_component_master_unbind(struct device *dev)
750{
751 struct mei_cl_device *cldev = to_mei_cl_device(dev);
752 struct i915_hdcp_comp_master *comp_master =
753 mei_cldev_get_drvdata(cldev);
754
755 dev_dbg(dev, "%s\n", __func__);
756 component_unbind_all(dev, comp_master);
757}
758
759static const struct component_master_ops mei_component_master_ops = {
760 .bind = mei_component_master_bind,
761 .unbind = mei_component_master_unbind,
762};
763
764static int mei_hdcp_component_match(struct device *dev, int subcomponent,
765 void *data)
766{
767 return !strcmp(dev->driver->name, "i915") &&
768 subcomponent == I915_COMPONENT_HDCP;
769}
770
771static int mei_hdcp_probe(struct mei_cl_device *cldev,
772 const struct mei_cl_device_id *id)
773{
774 struct i915_hdcp_comp_master *comp_master;
775 struct component_match *master_match;
776 int ret;
777
778 ret = mei_cldev_enable(cldev);
779 if (ret < 0) {
780 dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
781 goto enable_err_exit;
782 }
783
784 comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
785 if (!comp_master) {
786 ret = -ENOMEM;
787 goto err_exit;
788 }
789
790 master_match = NULL;
791 component_match_add_typed(&cldev->dev, &master_match,
792 mei_hdcp_component_match, comp_master);
793 if (IS_ERR_OR_NULL(master_match)) {
794 ret = -ENOMEM;
795 goto err_exit;
796 }
797
798 mei_cldev_set_drvdata(cldev, comp_master);
799 ret = component_master_add_with_match(&cldev->dev,
800 &mei_component_master_ops,
801 master_match);
802 if (ret < 0) {
803 dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
804 goto err_exit;
805 }
806
807 return 0;
808
809err_exit:
810 mei_cldev_set_drvdata(cldev, NULL);
811 kfree(comp_master);
812 mei_cldev_disable(cldev);
813enable_err_exit:
814 return ret;
815}
816
817static int mei_hdcp_remove(struct mei_cl_device *cldev)
818{
819 struct i915_hdcp_comp_master *comp_master =
820 mei_cldev_get_drvdata(cldev);
821
822 component_master_del(&cldev->dev, &mei_component_master_ops);
823 kfree(comp_master);
824 mei_cldev_set_drvdata(cldev, NULL);
825
826 return mei_cldev_disable(cldev);
827}
828
829#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
830 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
831
832static struct mei_cl_device_id mei_hdcp_tbl[] = {
833 { .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
834 { }
835};
836MODULE_DEVICE_TABLE(mei, mei_hdcp_tbl);
837
838static struct mei_cl_driver mei_hdcp_driver = {
839 .id_table = mei_hdcp_tbl,
840 .name = KBUILD_MODNAME,
841 .probe = mei_hdcp_probe,
842 .remove = mei_hdcp_remove,
843};
844
845module_mei_cl_driver(mei_hdcp_driver);
846
847MODULE_AUTHOR("Intel Corporation");
848MODULE_LICENSE("GPL");
849MODULE_DESCRIPTION("MEI HDCP");
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
new file mode 100644
index 000000000000..5f74b908e486
--- /dev/null
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -0,0 +1,377 @@
1/* SPDX-License-Identifier: (GPL-2.0+) */
2/*
3 * Copyright © 2019 Intel Corporation
4 *
5 * Authors:
6 * Ramalingam C <ramalingam.c@intel.com>
7 */
8
9#ifndef __MEI_HDCP_H__
10#define __MEI_HDCP_H__
11
12#include <drm/drm_hdcp.h>
13
14/* me_hdcp_status: Enumeration of all HDCP Status Codes */
15enum me_hdcp_status {
16 ME_HDCP_STATUS_SUCCESS = 0x0000,
17
18 /* WiDi Generic Status Codes */
19 ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
20 ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
21 ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
22 ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
23 ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
24 ME_HDCP_STATUS_INVALID_PARAMS = 0x1005,
25 ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
26
27 /* WiDi Status Codes */
28 ME_HDCP_INVALID_SESSION_STATE = 0x6000,
29 ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
30 ME_HDCP_SRM_INVALID_LENGTH = 0x6002,
31 ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
32 ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
33 ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
34 ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
35 ME_HDCP_RX_REVOKED = 0x6007,
36 ME_HDCP_H_VERIFICATION_FAILED = 0x6008,
37 ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
38 ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
39 ME_HDCP_V_VERIFICATION_FAILED = 0x600B,
40 ME_HDCP_L_VERIFICATION_FAILED = 0x600C,
41 ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
42 ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
43 ME_HDCP_NONCE_GENERATION_FAILED = 0x600F,
44 ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
45 ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
46 ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
47 ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
48 ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
49 ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
50
51 /* New status for HDCP 2.1 */
52 ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
53
54 /* New status code for HDCP 2.2 Rx */
55 ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
56 ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
57 ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
58 ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
59 ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
60 ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
61 ME_HDCP_FAIL_NOT_EXPECTED = 0x6023,
62 ME_HDCP_FAIL_HDCP_OFF = 0x6024,
63 ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
64 ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
65 ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
66 ME_HDCP_DMA_READ_ERROR = 0x6028,
67 ME_HDCP_DMA_WRITE_ERROR = 0x6029,
68 ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
69 ME_HDCP_H264_PARSING_ERROR = 0x6031,
70 ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
71 ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
72 ME_HDCP_TX_ACTIVE_ERROR = 0x6034,
73 ME_HDCP_MODE_CHANGE_ERROR = 0x6035,
74 ME_HDCP_STREAM_TYPE_ERROR = 0x6036,
75 ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
76
77 ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
78 ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
79 ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
80 ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
81 ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
82 ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
83
84 /* hdcp capable bit is not set in rx_caps(error is unique to DP) */
85 ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
86
87 ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
88};
89
90#define HDCP_API_VERSION 0x00010000
91
92#define HDCP_M_LEN 16
93#define HDCP_KH_LEN 16
94
95/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
96/* Wired_Tx_AKE */
97#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
98#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
99
100#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
101#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
102#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
103
104#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
105#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
106
107#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
108#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
109
110#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
111#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
112
113/* Wired_Tx_LC */
114#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
115#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
116
117#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
118#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
119
120/* Wired_Tx_SKE */
121#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
122#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
123
124/* Wired_Tx_SKE */
125#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
126#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
127
128/* Wired_Tx_Repeater */
129#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
130#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
131
132#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
133 32 + 2 + 2)
134
135#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
136
137/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
138enum hdcp_command_id {
139 _WIDI_COMMAND_BASE = 0x00030000,
140 WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
141 HDCP_GET_SRM_STATUS,
142 HDCP_SEND_SRM_FRAGMENT,
143
144 /* The wired HDCP Tx commands */
145 _WIRED_COMMAND_BASE = 0x00031000,
146 WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
147 WIRED_VERIFY_RECEIVER_CERT,
148 WIRED_AKE_SEND_HPRIME,
149 WIRED_AKE_SEND_PAIRING_INFO,
150 WIRED_INIT_LOCALITY_CHECK,
151 WIRED_VALIDATE_LOCALITY,
152 WIRED_GET_SESSION_KEY,
153 WIRED_ENABLE_AUTH,
154 WIRED_VERIFY_REPEATER,
155 WIRED_REPEATER_AUTH_STREAM_REQ,
156 WIRED_CLOSE_SESSION,
157
158 _WIRED_COMMANDS_COUNT,
159};
160
161union encrypted_buff {
162 u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
163 u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
164 struct {
165 u8 e_kh_km[HDCP_KH_LEN];
166 u8 m[HDCP_M_LEN];
167 } __packed;
168};
169
170/* HDCP HECI message header. All header values are little endian. */
171struct hdcp_cmd_header {
172 u32 api_version;
173 u32 command_id;
174 enum me_hdcp_status status;
175 /* Length of the HECI message (excluding the header) */
176 u32 buffer_len;
177} __packed;
178
179/* Empty command request or response. No data follows the header. */
180struct hdcp_cmd_no_data {
181 struct hdcp_cmd_header header;
182} __packed;
183
184/* Uniquely identifies the hdcp port being addressed for a given command. */
185struct hdcp_port_id {
186 u8 integrated_port_type;
187 u8 physical_port;
188 u16 reserved;
189} __packed;
190
191/*
192 * Data structures for integrated wired HDCP2 Tx in
193 * support of the AKE protocol
194 */
195/* HECI struct for integrated wired HDCP Tx session initiation. */
196struct wired_cmd_initiate_hdcp2_session_in {
197 struct hdcp_cmd_header header;
198 struct hdcp_port_id port;
199 u8 protocol; /* for HDMI vs DP */
200} __packed;
201
202struct wired_cmd_initiate_hdcp2_session_out {
203 struct hdcp_cmd_header header;
204 struct hdcp_port_id port;
205 u8 r_tx[HDCP_2_2_RTX_LEN];
206 struct hdcp2_tx_caps tx_caps;
207} __packed;
208
209/* HECI struct for ending an integrated wired HDCP Tx session. */
210struct wired_cmd_close_session_in {
211 struct hdcp_cmd_header header;
212 struct hdcp_port_id port;
213} __packed;
214
215struct wired_cmd_close_session_out {
216 struct hdcp_cmd_header header;
217 struct hdcp_port_id port;
218} __packed;
219
220/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
221struct wired_cmd_verify_receiver_cert_in {
222 struct hdcp_cmd_header header;
223 struct hdcp_port_id port;
224 struct hdcp2_cert_rx cert_rx;
225 u8 r_rx[HDCP_2_2_RRX_LEN];
226 u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
227} __packed;
228
229struct wired_cmd_verify_receiver_cert_out {
230 struct hdcp_cmd_header header;
231 struct hdcp_port_id port;
232 u8 km_stored;
233 u8 reserved[3];
234 union encrypted_buff ekm_buff;
235} __packed;
236
237/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
238struct wired_cmd_ake_send_hprime_in {
239 struct hdcp_cmd_header header;
240 struct hdcp_port_id port;
241 u8 h_prime[HDCP_2_2_H_PRIME_LEN];
242} __packed;
243
244struct wired_cmd_ake_send_hprime_out {
245 struct hdcp_cmd_header header;
246 struct hdcp_port_id port;
247} __packed;
248
249/*
250 * HECI struct for sending in AKE pairing data generated by the Rx in an
251 * integrated wired HDCP Tx session.
252 */
253struct wired_cmd_ake_send_pairing_info_in {
254 struct hdcp_cmd_header header;
255 struct hdcp_port_id port;
256 u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
257} __packed;
258
259struct wired_cmd_ake_send_pairing_info_out {
260 struct hdcp_cmd_header header;
261 struct hdcp_port_id port;
262} __packed;
263
264/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
265/*
266 * HECI struct for initiating locality check with an
267 * integrated wired HDCP Tx session.
268 */
269struct wired_cmd_init_locality_check_in {
270 struct hdcp_cmd_header header;
271 struct hdcp_port_id port;
272} __packed;
273
274struct wired_cmd_init_locality_check_out {
275 struct hdcp_cmd_header header;
276 struct hdcp_port_id port;
277 u8 r_n[HDCP_2_2_RN_LEN];
278} __packed;
279
280/*
281 * HECI struct for validating an Rx's LPrime value in an
282 * integrated wired HDCP Tx session.
283 */
284struct wired_cmd_validate_locality_in {
285 struct hdcp_cmd_header header;
286 struct hdcp_port_id port;
287 u8 l_prime[HDCP_2_2_L_PRIME_LEN];
288} __packed;
289
290struct wired_cmd_validate_locality_out {
291 struct hdcp_cmd_header header;
292 struct hdcp_port_id port;
293} __packed;
294
295/*
296 * Data structures for integrated wired HDCP2 Tx in support of the
297 * SKE protocol
298 */
299/* HECI struct for creating session key */
300struct wired_cmd_get_session_key_in {
301 struct hdcp_cmd_header header;
302 struct hdcp_port_id port;
303} __packed;
304
305struct wired_cmd_get_session_key_out {
306 struct hdcp_cmd_header header;
307 struct hdcp_port_id port;
308 u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
309 u8 r_iv[HDCP_2_2_RIV_LEN];
310} __packed;
311
312/* HECI struct for the Tx enable authentication command */
313struct wired_cmd_enable_auth_in {
314 struct hdcp_cmd_header header;
315 struct hdcp_port_id port;
316 u8 stream_type;
317} __packed;
318
319struct wired_cmd_enable_auth_out {
320 struct hdcp_cmd_header header;
321 struct hdcp_port_id port;
322} __packed;
323
324/*
325 * Data structures for integrated wired HDCP2 Tx in support of
326 * the repeater protocols
327 */
328/*
329 * HECI struct for verifying the downstream repeater's HDCP topology in an
330 * integrated wired HDCP Tx session.
331 */
332struct wired_cmd_verify_repeater_in {
333 struct hdcp_cmd_header header;
334 struct hdcp_port_id port;
335 u8 rx_info[HDCP_2_2_RXINFO_LEN];
336 u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
337 u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
338 u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
339} __packed;
340
341struct wired_cmd_verify_repeater_out {
342 struct hdcp_cmd_header header;
343 struct hdcp_port_id port;
344 u8 content_type_supported;
345 u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
346} __packed;
347
348/*
349 * HECI struct in support of stream management in an
350 * integrated wired HDCP Tx session.
351 */
352struct wired_cmd_repeater_auth_stream_req_in {
353 struct hdcp_cmd_header header;
354 struct hdcp_port_id port;
355 u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
356 u8 m_prime[HDCP_2_2_MPRIME_LEN];
357 __be16 k;
358 struct hdcp2_streamid_type streams[1];
359} __packed;
360
361struct wired_cmd_repeater_auth_stream_req_out {
362 struct hdcp_cmd_header header;
363 struct hdcp_port_id port;
364} __packed;
365
366enum mei_fw_ddi {
367 MEI_DDI_INVALID_PORT = 0x0,
368
369 MEI_DDI_B = 1,
370 MEI_DDI_C,
371 MEI_DDI_D,
372 MEI_DDI_E,
373 MEI_DDI_F,
374 MEI_DDI_A = 7,
375 MEI_DDI_RANGE_END = MEI_DDI_A,
376};
377#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 2b7f7677f8cc..b7d2487b8409 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -311,7 +311,8 @@ struct mei_client_properties {
311 u8 protocol_version; 311 u8 protocol_version;
312 u8 max_number_of_connections; 312 u8 max_number_of_connections;
313 u8 fixed_address; 313 u8 fixed_address;
314 u8 single_recv_buf; 314 u8 single_recv_buf:1;
315 u8 reserved:7;
315 u32 max_msg_length; 316 u32 max_msg_length;
316} __packed; 317} __packed;
317 318
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 227cc7443671..242dcee14689 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -38,7 +38,6 @@ comment "VOP Bus Driver"
38 38
39config VOP_BUS 39config VOP_BUS
40 tristate "VOP Bus Driver" 40 tristate "VOP Bus Driver"
41 depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
42 help 41 help
43 This option is selected by any driver which registers a 42 This option is selected by any driver which registers a
44 device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST 43 device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
@@ -132,7 +131,7 @@ comment "VOP Driver"
132 131
133config VOP 132config VOP
134 tristate "VOP Driver" 133 tristate "VOP Driver"
135 depends on 64BIT && PCI && X86 && VOP_BUS 134 depends on VOP_BUS
136 select VHOST_RING 135 select VHOST_RING
137 select VIRTIO 136 select VIRTIO
138 help 137 help
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
index ff59568219ad..377a4f38cd7e 100644
--- a/drivers/misc/mic/bus/scif_bus.h
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -88,8 +88,8 @@ struct scif_driver {
88 * @send_intr: Send an interrupt to the remote node on a specified doorbell. 88 * @send_intr: Send an interrupt to the remote node on a specified doorbell.
89 * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell 89 * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
90 * which is specifically targeted for a peer to peer node. 90 * which is specifically targeted for a peer to peer node.
91 * @ioremap: Map a buffer with the specified physical address and length. 91 * @remap: Map a buffer with the specified physical address and length.
92 * @iounmap: Unmap a buffer previously mapped. 92 * @unmap: Unmap a buffer previously mapped.
93 */ 93 */
94struct scif_hw_ops { 94struct scif_hw_ops {
95 int (*next_db)(struct scif_hw_dev *sdev); 95 int (*next_db)(struct scif_hw_dev *sdev);
@@ -104,9 +104,9 @@ struct scif_hw_ops {
104 void (*send_intr)(struct scif_hw_dev *sdev, int db); 104 void (*send_intr)(struct scif_hw_dev *sdev, int db);
105 void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db, 105 void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
106 struct mic_mw *mw); 106 struct mic_mw *mw);
107 void __iomem * (*ioremap)(struct scif_hw_dev *sdev, 107 void __iomem * (*remap)(struct scif_hw_dev *sdev,
108 phys_addr_t pa, size_t len); 108 phys_addr_t pa, size_t len);
109 void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va); 109 void (*unmap)(struct scif_hw_dev *sdev, void __iomem *va);
110}; 110};
111 111
112int scif_register_driver(struct scif_driver *driver); 112int scif_register_driver(struct scif_driver *driver);
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
index fff7a865d721..cf5f3fae573c 100644
--- a/drivers/misc/mic/bus/vop_bus.h
+++ b/drivers/misc/mic/bus/vop_bus.h
@@ -87,8 +87,8 @@ struct vop_driver {
87 * @get_dp: Get access to the virtio device page used by the self 87 * @get_dp: Get access to the virtio device page used by the self
88 * node to add/remove/configure virtio devices. 88 * node to add/remove/configure virtio devices.
89 * @send_intr: Send an interrupt to the peer node on a specified doorbell. 89 * @send_intr: Send an interrupt to the peer node on a specified doorbell.
90 * @ioremap: Map a buffer with the specified DMA address and length. 90 * @remap: Map a buffer with the specified DMA address and length.
91 * @iounmap: Unmap a buffer previously mapped. 91 * @unmap: Unmap a buffer previously mapped.
92 * @dma_filter: The DMA filter function to use for obtaining access to 92 * @dma_filter: The DMA filter function to use for obtaining access to
93 * a DMA channel on the peer node. 93 * a DMA channel on the peer node.
94 */ 94 */
@@ -104,9 +104,9 @@ struct vop_hw_ops {
104 void __iomem * (*get_remote_dp)(struct vop_device *vpdev); 104 void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
105 void * (*get_dp)(struct vop_device *vpdev); 105 void * (*get_dp)(struct vop_device *vpdev);
106 void (*send_intr)(struct vop_device *vpdev, int db); 106 void (*send_intr)(struct vop_device *vpdev, int db);
107 void __iomem * (*ioremap)(struct vop_device *vpdev, 107 void __iomem * (*remap)(struct vop_device *vpdev,
108 dma_addr_t pa, size_t len); 108 dma_addr_t pa, size_t len);
109 void (*iounmap)(struct vop_device *vpdev, void __iomem *va); 109 void (*unmap)(struct vop_device *vpdev, void __iomem *va);
110}; 110};
111 111
112struct vop_device * 112struct vop_device *
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index e749af48f736..dcd07ef29801 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -245,8 +245,8 @@ static struct scif_hw_ops scif_hw_ops = {
245 .next_db = ___mic_next_db, 245 .next_db = ___mic_next_db,
246 .send_intr = ___mic_send_intr, 246 .send_intr = ___mic_send_intr,
247 .send_p2p_intr = ___mic_send_p2p_intr, 247 .send_p2p_intr = ___mic_send_p2p_intr,
248 .ioremap = ___mic_ioremap, 248 .remap = ___mic_ioremap,
249 .iounmap = ___mic_iounmap, 249 .unmap = ___mic_iounmap,
250}; 250};
251 251
252static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev) 252static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
@@ -316,8 +316,8 @@ static struct vop_hw_ops vop_hw_ops = {
316 .next_db = __mic_next_db, 316 .next_db = __mic_next_db,
317 .get_remote_dp = __mic_get_remote_dp, 317 .get_remote_dp = __mic_get_remote_dp,
318 .send_intr = __mic_send_intr, 318 .send_intr = __mic_send_intr,
319 .ioremap = __mic_ioremap, 319 .remap = __mic_ioremap,
320 .iounmap = __mic_iounmap, 320 .unmap = __mic_iounmap,
321}; 321};
322 322
323static int mic_request_dma_chans(struct mic_driver *mdrv) 323static int mic_request_dma_chans(struct mic_driver *mdrv)
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 6479435ac96b..079c36f0ce6e 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -133,8 +133,8 @@ static struct vop_hw_ops vop_hw_ops = {
133 .get_dp = __mic_get_dp, 133 .get_dp = __mic_get_dp,
134 .get_remote_dp = __mic_get_remote_dp, 134 .get_remote_dp = __mic_get_remote_dp,
135 .send_intr = __mic_send_intr, 135 .send_intr = __mic_send_intr,
136 .ioremap = __mic_ioremap, 136 .remap = __mic_ioremap,
137 .iounmap = __mic_iounmap, 137 .unmap = __mic_iounmap,
138}; 138};
139 139
140static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) 140static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
@@ -315,8 +315,8 @@ static struct scif_hw_ops scif_hw_ops = {
315 .ack_interrupt = ___mic_ack_interrupt, 315 .ack_interrupt = ___mic_ack_interrupt,
316 .next_db = ___mic_next_db, 316 .next_db = ___mic_next_db,
317 .send_intr = ___mic_send_intr, 317 .send_intr = ___mic_send_intr,
318 .ioremap = ___mic_ioremap, 318 .remap = ___mic_ioremap,
319 .iounmap = ___mic_iounmap, 319 .unmap = ___mic_iounmap,
320}; 320};
321 321
322static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) 322static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
index 3e86360ba5a6..7b380534eba1 100644
--- a/drivers/misc/mic/scif/scif_map.h
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -97,7 +97,7 @@ scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
97 out_virt = phys_to_virt(phys); 97 out_virt = phys_to_virt(phys);
98 else 98 else
99 out_virt = (void __force *) 99 out_virt = (void __force *)
100 sdev->hw_ops->ioremap(sdev, phys, size); 100 sdev->hw_ops->remap(sdev, phys, size);
101 return out_virt; 101 return out_virt;
102} 102}
103 103
@@ -107,7 +107,7 @@ scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
107 if (!scifdev_self(scifdev)) { 107 if (!scifdev_self(scifdev)) {
108 struct scif_hw_dev *sdev = scifdev->sdev; 108 struct scif_hw_dev *sdev = scifdev->sdev;
109 109
110 sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt); 110 sdev->hw_ops->unmap(sdev, (void __force __iomem *)virt);
111 } 111 }
112} 112}
113 113
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 749321eb91ae..f62216628fa6 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -672,8 +672,8 @@ int scif_unregister_window(struct scif_window *window)
672 { 672 {
673 window->unreg_state = OP_IN_PROGRESS; 673 window->unreg_state = OP_IN_PROGRESS;
674 send_msg = true; 674 send_msg = true;
675 /* fall through */
676 } 675 }
676 /* fall through */
677 case OP_IN_PROGRESS: 677 case OP_IN_PROGRESS:
678 { 678 {
679 scif_get_window(window, 1); 679 scif_get_window(window, 1);
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 744757f541be..e37b2c2152a2 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/io-64-nonatomic-lo-hi.h>
37 38
38#include "vop_main.h" 39#include "vop_main.h"
39 40
@@ -118,7 +119,7 @@ _vop_total_desc_size(struct mic_device_desc __iomem *desc)
118static u64 vop_get_features(struct virtio_device *vdev) 119static u64 vop_get_features(struct virtio_device *vdev)
119{ 120{
120 unsigned int i, bits; 121 unsigned int i, bits;
121 u32 features = 0; 122 u64 features = 0;
122 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; 123 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
123 u8 __iomem *in_features = _vop_vq_features(desc); 124 u8 __iomem *in_features = _vop_vq_features(desc);
124 int feature_len = ioread8(&desc->feature_len); 125 int feature_len = ioread8(&desc->feature_len);
@@ -126,7 +127,7 @@ static u64 vop_get_features(struct virtio_device *vdev)
126 bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8; 127 bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
127 for (i = 0; i < bits; i++) 128 for (i = 0; i < bits; i++)
128 if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) 129 if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
129 features |= BIT(i); 130 features |= BIT_ULL(i);
130 131
131 return features; 132 return features;
132} 133}
@@ -228,7 +229,7 @@ static void vop_reset_inform_host(struct virtio_device *dev)
228 if (ioread8(&dc->host_ack)) 229 if (ioread8(&dc->host_ack))
229 break; 230 break;
230 msleep(100); 231 msleep(100);
231 }; 232 }
232 233
233 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); 234 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
234 235
@@ -269,7 +270,7 @@ static void vop_del_vq(struct virtqueue *vq, int n)
269 free_pages((unsigned long)vdev->used_virt[n], 270 free_pages((unsigned long)vdev->used_virt[n],
270 get_order(vdev->used_size[n])); 271 get_order(vdev->used_size[n]));
271 vring_del_virtqueue(vq); 272 vring_del_virtqueue(vq);
272 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); 273 vpdev->hw_ops->unmap(vpdev, vdev->vr[n]);
273 vdev->vr[n] = NULL; 274 vdev->vr[n] = NULL;
274} 275}
275 276
@@ -337,8 +338,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
337 memcpy_fromio(&config, vqconfig, sizeof(config)); 338 memcpy_fromio(&config, vqconfig, sizeof(config));
338 _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); 339 _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
339 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); 340 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
340 va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address), 341 va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
341 vr_size);
342 if (!va) 342 if (!va)
343 return ERR_PTR(-ENOMEM); 343 return ERR_PTR(-ENOMEM);
344 vdev->vr[index] = va; 344 vdev->vr[index] = va;
@@ -392,7 +392,7 @@ free_used:
392 free_pages((unsigned long)used, 392 free_pages((unsigned long)used,
393 get_order(vdev->used_size[index])); 393 get_order(vdev->used_size[index]));
394unmap: 394unmap:
395 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); 395 vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
396 return ERR_PTR(err); 396 return ERR_PTR(err);
397} 397}
398 398
@@ -437,7 +437,7 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
437 if (!ioread8(&dc->used_address_updated)) 437 if (!ioread8(&dc->used_address_updated))
438 break; 438 break;
439 msleep(100); 439 msleep(100);
440 }; 440 }
441 441
442 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); 442 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
443 if (!retry) { 443 if (!retry) {
@@ -513,7 +513,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
513 vdev->desc = d; 513 vdev->desc = d;
514 vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d); 514 vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
515 vdev->dnode = dnode; 515 vdev->dnode = dnode;
516 vdev->vdev.priv = (void *)(u64)dnode; 516 vdev->vdev.priv = (void *)(unsigned long)dnode;
517 init_completion(&vdev->reset_done); 517 init_completion(&vdev->reset_done);
518 518
519 vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev); 519 vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
@@ -535,7 +535,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
535 offset, type); 535 offset, type);
536 goto free_irq; 536 goto free_irq;
537 } 537 }
538 writeq((u64)vdev, &vdev->dc->vdev); 538 writeq((unsigned long)vdev, &vdev->dc->vdev);
539 dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n", 539 dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
540 __func__, offset, type, vdev); 540 __func__, offset, type, vdev);
541 541
@@ -562,13 +562,18 @@ static int vop_match_desc(struct device *dev, void *data)
562 return vdev->desc == (void __iomem *)data; 562 return vdev->desc == (void __iomem *)data;
563} 563}
564 564
565static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
566{
567 return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
568}
569
565static void _vop_handle_config_change(struct mic_device_desc __iomem *d, 570static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
566 unsigned int offset, 571 unsigned int offset,
567 struct vop_device *vpdev) 572 struct vop_device *vpdev)
568{ 573{
569 struct mic_device_ctrl __iomem *dc 574 struct mic_device_ctrl __iomem *dc
570 = (void __iomem *)d + _vop_aligned_desc_size(d); 575 = (void __iomem *)d + _vop_aligned_desc_size(d);
571 struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); 576 struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
572 577
573 if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) 578 if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
574 return; 579 return;
@@ -587,7 +592,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
587{ 592{
588 struct mic_device_ctrl __iomem *dc 593 struct mic_device_ctrl __iomem *dc
589 = (void __iomem *)d + _vop_aligned_desc_size(d); 594 = (void __iomem *)d + _vop_aligned_desc_size(d);
590 struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); 595 struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
591 u8 status; 596 u8 status;
592 int ret = -1; 597 int ret = -1;
593 598
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index cbc8ebcff5cf..3632fce40590 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -80,7 +80,7 @@ static void vop_virtio_init_post(struct vop_vdev *vdev)
80 continue; 80 continue;
81 } 81 }
82 vdev->vvr[i].vrh.vring.used = 82 vdev->vvr[i].vrh.vring.used =
83 (void __force *)vpdev->hw_ops->ioremap( 83 (void __force *)vpdev->hw_ops->remap(
84 vpdev, 84 vpdev,
85 le64_to_cpu(vqconfig[i].used_address), 85 le64_to_cpu(vqconfig[i].used_address),
86 used_size); 86 used_size);
@@ -528,15 +528,15 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
528 int vr_idx) 528 int vr_idx)
529{ 529{
530 struct vop_device *vpdev = vdev->vpdev; 530 struct vop_device *vpdev = vdev->vpdev;
531 void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); 531 void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
532 struct vop_vringh *vvr = &vdev->vvr[vr_idx]; 532 struct vop_vringh *vvr = &vdev->vvr[vr_idx];
533 struct vop_info *vi = dev_get_drvdata(&vpdev->dev); 533 struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
534 size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; 534 size_t dma_alignment;
535 bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); 535 bool x200;
536 size_t dma_offset, partlen; 536 size_t dma_offset, partlen;
537 int err; 537 int err;
538 538
539 if (!VOP_USE_DMA) { 539 if (!VOP_USE_DMA || !vi->dma_ch) {
540 if (copy_to_user(ubuf, (void __force *)dbuf, len)) { 540 if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
541 err = -EFAULT; 541 err = -EFAULT;
542 dev_err(vop_dev(vdev), "%s %d err %d\n", 542 dev_err(vop_dev(vdev), "%s %d err %d\n",
@@ -548,6 +548,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
548 goto err; 548 goto err;
549 } 549 }
550 550
551 dma_alignment = 1 << vi->dma_ch->device->copy_align;
552 x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
553
551 dma_offset = daddr - round_down(daddr, dma_alignment); 554 dma_offset = daddr - round_down(daddr, dma_alignment);
552 daddr -= dma_offset; 555 daddr -= dma_offset;
553 len += dma_offset; 556 len += dma_offset;
@@ -585,9 +588,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
585 } 588 }
586 err = 0; 589 err = 0;
587err: 590err:
588 vpdev->hw_ops->iounmap(vpdev, dbuf); 591 vpdev->hw_ops->unmap(vpdev, dbuf);
589 dev_dbg(vop_dev(vdev), 592 dev_dbg(vop_dev(vdev),
590 "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", 593 "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
591 __func__, ubuf, dbuf, len, vr_idx); 594 __func__, ubuf, dbuf, len, vr_idx);
592 return err; 595 return err;
593} 596}
@@ -603,21 +606,26 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
603 int vr_idx) 606 int vr_idx)
604{ 607{
605 struct vop_device *vpdev = vdev->vpdev; 608 struct vop_device *vpdev = vdev->vpdev;
606 void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); 609 void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
607 struct vop_vringh *vvr = &vdev->vvr[vr_idx]; 610 struct vop_vringh *vvr = &vdev->vvr[vr_idx];
608 struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev); 611 struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
609 size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; 612 size_t dma_alignment;
610 bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); 613 bool x200;
611 size_t partlen; 614 size_t partlen;
612 bool dma = VOP_USE_DMA; 615 bool dma = VOP_USE_DMA && vi->dma_ch;
613 int err = 0; 616 int err = 0;
614 617
615 if (daddr & (dma_alignment - 1)) { 618 if (dma) {
616 vdev->tx_dst_unaligned += len; 619 dma_alignment = 1 << vi->dma_ch->device->copy_align;
617 dma = false; 620 x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
618 } else if (ALIGN(len, dma_alignment) > dlen) { 621
619 vdev->tx_len_unaligned += len; 622 if (daddr & (dma_alignment - 1)) {
620 dma = false; 623 vdev->tx_dst_unaligned += len;
624 dma = false;
625 } else if (ALIGN(len, dma_alignment) > dlen) {
626 vdev->tx_len_unaligned += len;
627 dma = false;
628 }
621 } 629 }
622 630
623 if (!dma) 631 if (!dma)
@@ -668,9 +676,9 @@ memcpy:
668 vdev->out_bytes += len; 676 vdev->out_bytes += len;
669 err = 0; 677 err = 0;
670err: 678err:
671 vpdev->hw_ops->iounmap(vpdev, dbuf); 679 vpdev->hw_ops->unmap(vpdev, dbuf);
672 dev_dbg(vop_dev(vdev), 680 dev_dbg(vop_dev(vdev),
673 "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", 681 "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
674 __func__, ubuf, dbuf, len, vr_idx); 682 __func__, ubuf, dbuf, len, vr_idx);
675 return err; 683 return err;
676} 684}
@@ -704,16 +712,17 @@ static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
704 712
705 while (len && iov->i < iov->used) { 713 while (len && iov->i < iov->used) {
706 struct kvec *kiov = &iov->iov[iov->i]; 714 struct kvec *kiov = &iov->iov[iov->i];
715 unsigned long daddr = (unsigned long)kiov->iov_base;
707 716
708 partlen = min(kiov->iov_len, len); 717 partlen = min(kiov->iov_len, len);
709 if (read) 718 if (read)
710 ret = vop_virtio_copy_to_user(vdev, ubuf, partlen, 719 ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
711 (u64)kiov->iov_base, 720 daddr,
712 kiov->iov_len, 721 kiov->iov_len,
713 vr_idx); 722 vr_idx);
714 else 723 else
715 ret = vop_virtio_copy_from_user(vdev, ubuf, partlen, 724 ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
716 (u64)kiov->iov_base, 725 daddr,
717 kiov->iov_len, 726 kiov->iov_len,
718 vr_idx); 727 vr_idx);
719 if (ret) { 728 if (ret) {
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 93be82fc338a..2ec5808ba464 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -616,8 +616,8 @@ irqreturn_t gru_intr_mblade(int irq, void *dev_id)
616 for_each_possible_blade(blade) { 616 for_each_possible_blade(blade) {
617 if (uv_blade_nr_possible_cpus(blade)) 617 if (uv_blade_nr_possible_cpus(blade))
618 continue; 618 continue;
619 gru_intr(0, blade); 619 gru_intr(0, blade);
620 gru_intr(1, blade); 620 gru_intr(1, blade);
621 } 621 }
622 return IRQ_HANDLED; 622 return IRQ_HANDLED;
623} 623}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 869ec842729e..ad807d5a3141 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -34,7 +34,6 @@
34 34
35MODULE_AUTHOR("VMware, Inc."); 35MODULE_AUTHOR("VMware, Inc.");
36MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); 36MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
37MODULE_VERSION("1.5.0.0-k");
38MODULE_ALIAS("dmi:*:svnVMware*:*"); 37MODULE_ALIAS("dmi:*:svnVMware*:*");
39MODULE_ALIAS("vmware_vmmemctl"); 38MODULE_ALIAS("vmware_vmmemctl");
40MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
@@ -73,15 +72,26 @@ enum vmwballoon_capabilities {
73 VMW_BALLOON_BATCHED_CMDS = (1 << 2), 72 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
74 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3), 73 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
75 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4), 74 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
75 VMW_BALLOON_64_BIT_TARGET = (1 << 5)
76}; 76};
77 77
78#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \ 78#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
79 | VMW_BALLOON_BATCHED_CMDS \ 79 | VMW_BALLOON_BATCHED_CMDS \
80 | VMW_BALLOON_BATCHED_2M_CMDS \ 80 | VMW_BALLOON_BATCHED_2M_CMDS \
81 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD) 81 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
82 82
83#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT) 83#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
84 84
85/*
86 * 64-bit targets are only supported in 64-bit
87 */
88#ifdef CONFIG_64BIT
89#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
90 | VMW_BALLOON_64_BIT_TARGET)
91#else
92#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
93#endif
94
85enum vmballoon_page_size_type { 95enum vmballoon_page_size_type {
86 VMW_BALLOON_4K_PAGE, 96 VMW_BALLOON_4K_PAGE,
87 VMW_BALLOON_2M_PAGE, 97 VMW_BALLOON_2M_PAGE,
@@ -602,8 +612,9 @@ static int vmballoon_send_get_target(struct vmballoon *b)
602 612
603 limit = totalram_pages(); 613 limit = totalram_pages();
604 614
605 /* Ensure limit fits in 32-bits */ 615 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
606 if (limit != (u32)limit) 616 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
617 limit != (u32)limit)
607 return -EINVAL; 618 return -EINVAL;
608 619
609 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0); 620 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
@@ -1319,7 +1330,7 @@ static void vmballoon_reset(struct vmballoon *b)
1319 vmballoon_pop(b); 1330 vmballoon_pop(b);
1320 1331
1321 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) 1332 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1322 return; 1333 goto unlock;
1323 1334
1324 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { 1335 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1325 if (vmballoon_init_batching(b)) { 1336 if (vmballoon_init_batching(b)) {
@@ -1330,7 +1341,7 @@ static void vmballoon_reset(struct vmballoon *b)
1330 * The guest will retry in one second. 1341 * The guest will retry in one second.
1331 */ 1342 */
1332 vmballoon_send_start(b, 0); 1343 vmballoon_send_start(b, 0);
1333 return; 1344 goto unlock;
1334 } 1345 }
1335 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { 1346 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1336 vmballoon_deinit_batching(b); 1347 vmballoon_deinit_batching(b);
@@ -1346,6 +1357,7 @@ static void vmballoon_reset(struct vmballoon *b)
1346 if (vmballoon_send_guest_id(b)) 1357 if (vmballoon_send_guest_id(b))
1347 pr_err("failed to send guest ID to the host\n"); 1358 pr_err("failed to send guest ID to the host\n");
1348 1359
1360unlock:
1349 up_write(&b->conf_sem); 1361 up_write(&b->conf_sem);
1350} 1362}
1351 1363
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index b3fa738ae005..7824c7494916 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -330,7 +330,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
330/* 330/*
331 * Register the notification bitmap with the host. 331 * Register the notification bitmap with the host.
332 */ 332 */
333bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) 333bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
334{ 334{
335 int result; 335 int result;
336 struct vmci_notify_bm_set_msg bitmap_set_msg; 336 struct vmci_notify_bm_set_msg bitmap_set_msg;
@@ -340,11 +340,14 @@ bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
340 bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 340 bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
341 bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) - 341 bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
342 VMCI_DG_HEADERSIZE; 342 VMCI_DG_HEADERSIZE;
343 bitmap_set_msg.bitmap_ppn = bitmap_ppn; 343 if (vmci_use_ppn64())
344 bitmap_set_msg.bitmap_ppn64 = bitmap_ppn;
345 else
346 bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn;
344 347
345 result = vmci_send_datagram(&bitmap_set_msg.hdr); 348 result = vmci_send_datagram(&bitmap_set_msg.hdr);
346 if (result != VMCI_SUCCESS) { 349 if (result != VMCI_SUCCESS) {
347 pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n", 350 pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n",
348 bitmap_ppn, result); 351 bitmap_ppn, result);
349 return false; 352 return false;
350 } 353 }
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
index e4c0b17486a5..410a21f8436f 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.h
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -45,7 +45,7 @@ struct dbell_cpt_state {
45int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle); 45int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
46int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags); 46int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
47 47
48bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn); 48bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn);
49void vmci_dbell_scan_notification_entries(u8 *bitmap); 49void vmci_dbell_scan_notification_entries(u8 *bitmap);
50 50
51#endif /* VMCI_DOORBELL_H */ 51#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index cee9e977d318..2fbf4a0ac657 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -54,4 +54,6 @@ void vmci_guest_exit(void);
54bool vmci_guest_code_active(void); 54bool vmci_guest_code_active(void);
55u32 vmci_get_vm_context_id(void); 55u32 vmci_get_vm_context_id(void);
56 56
57bool vmci_use_ppn64(void);
58
57#endif /* _VMCI_DRIVER_H_ */ 59#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index dad5abee656e..928708128177 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -64,6 +64,13 @@ struct vmci_guest_device {
64 dma_addr_t notification_base; 64 dma_addr_t notification_base;
65}; 65};
66 66
67static bool use_ppn64;
68
69bool vmci_use_ppn64(void)
70{
71 return use_ppn64;
72}
73
67/* vmci_dev singleton device and supporting data*/ 74/* vmci_dev singleton device and supporting data*/
68struct pci_dev *vmci_pdev; 75struct pci_dev *vmci_pdev;
69static struct vmci_guest_device *vmci_dev_g; 76static struct vmci_guest_device *vmci_dev_g;
@@ -432,6 +439,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
432 struct vmci_guest_device *vmci_dev; 439 struct vmci_guest_device *vmci_dev;
433 void __iomem *iobase; 440 void __iomem *iobase;
434 unsigned int capabilities; 441 unsigned int capabilities;
442 unsigned int caps_in_use;
435 unsigned long cmd; 443 unsigned long cmd;
436 int vmci_err; 444 int vmci_err;
437 int error; 445 int error;
@@ -496,6 +504,23 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
496 error = -ENXIO; 504 error = -ENXIO;
497 goto err_free_data_buffer; 505 goto err_free_data_buffer;
498 } 506 }
507 caps_in_use = VMCI_CAPS_DATAGRAM;
508
509 /*
510 * Use 64-bit PPNs if the device supports.
511 *
512 * There is no check for the return value of dma_set_mask_and_coherent
513 * since this driver can handle the default mask values if
514 * dma_set_mask_and_coherent fails.
515 */
516 if (capabilities & VMCI_CAPS_PPN64) {
517 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
518 use_ppn64 = true;
519 caps_in_use |= VMCI_CAPS_PPN64;
520 } else {
521 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
522 use_ppn64 = false;
523 }
499 524
500 /* 525 /*
501 * If the hardware supports notifications, we will use that as 526 * If the hardware supports notifications, we will use that as
@@ -510,14 +535,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
510 "Unable to allocate notification bitmap\n"); 535 "Unable to allocate notification bitmap\n");
511 } else { 536 } else {
512 memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); 537 memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
513 capabilities |= VMCI_CAPS_NOTIFICATIONS; 538 caps_in_use |= VMCI_CAPS_NOTIFICATIONS;
514 } 539 }
515 } 540 }
516 541
517 dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); 542 dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);
518 543
519 /* Let the host know which capabilities we intend to use. */ 544 /* Let the host know which capabilities we intend to use. */
520 iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); 545 iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR);
521 546
522 /* Set up global device so that we can start sending datagrams */ 547 /* Set up global device so that we can start sending datagrams */
523 spin_lock_irq(&vmci_dev_spinlock); 548 spin_lock_irq(&vmci_dev_spinlock);
@@ -529,13 +554,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
529 * Register notification bitmap with device if that capability is 554 * Register notification bitmap with device if that capability is
530 * used. 555 * used.
531 */ 556 */
532 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 557 if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) {
533 unsigned long bitmap_ppn = 558 unsigned long bitmap_ppn =
534 vmci_dev->notification_base >> PAGE_SHIFT; 559 vmci_dev->notification_base >> PAGE_SHIFT;
535 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 560 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
536 dev_warn(&pdev->dev, 561 dev_warn(&pdev->dev,
537 "VMCI device unable to register notification bitmap with PPN 0x%x\n", 562 "VMCI device unable to register notification bitmap with PPN 0x%lx\n",
538 (u32) bitmap_ppn); 563 bitmap_ppn);
539 error = -ENXIO; 564 error = -ENXIO;
540 goto err_remove_vmci_dev_g; 565 goto err_remove_vmci_dev_g;
541 } 566 }
@@ -611,7 +636,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
611 636
612 /* Enable specific interrupt bits. */ 637 /* Enable specific interrupt bits. */
613 cmd = VMCI_IMR_DATAGRAM; 638 cmd = VMCI_IMR_DATAGRAM;
614 if (capabilities & VMCI_CAPS_NOTIFICATIONS) 639 if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
615 cmd |= VMCI_IMR_NOTIFICATION; 640 cmd |= VMCI_IMR_NOTIFICATION;
616 iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); 641 iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
617 642
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 264f4ed8eef2..f5f1aac9d163 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -435,8 +435,8 @@ static int qp_alloc_ppn_set(void *prod_q,
435 void *cons_q, 435 void *cons_q,
436 u64 num_consume_pages, struct ppn_set *ppn_set) 436 u64 num_consume_pages, struct ppn_set *ppn_set)
437{ 437{
438 u32 *produce_ppns; 438 u64 *produce_ppns;
439 u32 *consume_ppns; 439 u64 *consume_ppns;
440 struct vmci_queue *produce_q = prod_q; 440 struct vmci_queue *produce_q = prod_q;
441 struct vmci_queue *consume_q = cons_q; 441 struct vmci_queue *consume_q = cons_q;
442 u64 i; 442 u64 i;
@@ -462,31 +462,13 @@ static int qp_alloc_ppn_set(void *prod_q,
462 return VMCI_ERROR_NO_MEM; 462 return VMCI_ERROR_NO_MEM;
463 } 463 }
464 464
465 for (i = 0; i < num_produce_pages; i++) { 465 for (i = 0; i < num_produce_pages; i++)
466 unsigned long pfn;
467
468 produce_ppns[i] = 466 produce_ppns[i] =
469 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 467 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
470 pfn = produce_ppns[i];
471
472 /* Fail allocation if PFN isn't supported by hypervisor. */
473 if (sizeof(pfn) > sizeof(*produce_ppns)
474 && pfn != produce_ppns[i])
475 goto ppn_error;
476 }
477
478 for (i = 0; i < num_consume_pages; i++) {
479 unsigned long pfn;
480 468
469 for (i = 0; i < num_consume_pages; i++)
481 consume_ppns[i] = 470 consume_ppns[i] =
482 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 471 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
483 pfn = consume_ppns[i];
484
485 /* Fail allocation if PFN isn't supported by hypervisor. */
486 if (sizeof(pfn) > sizeof(*consume_ppns)
487 && pfn != consume_ppns[i])
488 goto ppn_error;
489 }
490 472
491 ppn_set->num_produce_pages = num_produce_pages; 473 ppn_set->num_produce_pages = num_produce_pages;
492 ppn_set->num_consume_pages = num_consume_pages; 474 ppn_set->num_consume_pages = num_consume_pages;
@@ -494,11 +476,6 @@ static int qp_alloc_ppn_set(void *prod_q,
494 ppn_set->consume_ppns = consume_ppns; 476 ppn_set->consume_ppns = consume_ppns;
495 ppn_set->initialized = true; 477 ppn_set->initialized = true;
496 return VMCI_SUCCESS; 478 return VMCI_SUCCESS;
497
498 ppn_error:
499 kfree(produce_ppns);
500 kfree(consume_ppns);
501 return VMCI_ERROR_INVALID_ARGS;
502} 479}
503 480
504/* 481/*
@@ -520,12 +497,28 @@ static void qp_free_ppn_set(struct ppn_set *ppn_set)
520 */ 497 */
521static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 498static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
522{ 499{
523 memcpy(call_buf, ppn_set->produce_ppns, 500 if (vmci_use_ppn64()) {
524 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 501 memcpy(call_buf, ppn_set->produce_ppns,
525 memcpy(call_buf + 502 ppn_set->num_produce_pages *
526 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), 503 sizeof(*ppn_set->produce_ppns));
527 ppn_set->consume_ppns, 504 memcpy(call_buf +
528 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 505 ppn_set->num_produce_pages *
506 sizeof(*ppn_set->produce_ppns),
507 ppn_set->consume_ppns,
508 ppn_set->num_consume_pages *
509 sizeof(*ppn_set->consume_ppns));
510 } else {
511 int i;
512 u32 *ppns = (u32 *) call_buf;
513
514 for (i = 0; i < ppn_set->num_produce_pages; i++)
515 ppns[i] = (u32) ppn_set->produce_ppns[i];
516
517 ppns = &ppns[ppn_set->num_produce_pages];
518
519 for (i = 0; i < ppn_set->num_consume_pages; i++)
520 ppns[i] = (u32) ppn_set->consume_ppns[i];
521 }
529 522
530 return VMCI_SUCCESS; 523 return VMCI_SUCCESS;
531} 524}
@@ -951,13 +944,15 @@ static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
951{ 944{
952 struct vmci_qp_alloc_msg *alloc_msg; 945 struct vmci_qp_alloc_msg *alloc_msg;
953 size_t msg_size; 946 size_t msg_size;
947 size_t ppn_size;
954 int result; 948 int result;
955 949
956 if (!entry || entry->num_ppns <= 2) 950 if (!entry || entry->num_ppns <= 2)
957 return VMCI_ERROR_INVALID_ARGS; 951 return VMCI_ERROR_INVALID_ARGS;
958 952
953 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
959 msg_size = sizeof(*alloc_msg) + 954 msg_size = sizeof(*alloc_msg) +
960 (size_t) entry->num_ppns * sizeof(u32); 955 (size_t) entry->num_ppns * ppn_size;
961 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 956 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
962 if (!alloc_msg) 957 if (!alloc_msg)
963 return VMCI_ERROR_NO_MEM; 958 return VMCI_ERROR_NO_MEM;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index ed177f04ef24..46c0b6c7bafb 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -28,8 +28,8 @@ typedef int (*vmci_event_release_cb) (void *client_data);
28struct ppn_set { 28struct ppn_set {
29 u64 num_produce_pages; 29 u64 num_produce_pages;
30 u64 num_consume_pages; 30 u64 num_consume_pages;
31 u32 *produce_ppns; 31 u64 *produce_ppns;
32 u32 *consume_ppns; 32 u64 *consume_ppns;
33 bool initialized; 33 bool initialized;
34}; 34};
35 35
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index c49ff8970ce3..e071e28bca3f 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -53,6 +53,7 @@
53 53
54#include <linux/ntb.h> 54#include <linux/ntb.h>
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/io-64-nonatomic-lo-hi.h>
56 57
57/* PCI device IDs */ 58/* PCI device IDs */
58#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 59#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
@@ -218,33 +219,4 @@ static inline int pdev_is_gen3(struct pci_dev *pdev)
218 return 0; 219 return 0;
219} 220}
220 221
221#ifndef ioread64
222#ifdef readq
223#define ioread64 readq
224#else
225#define ioread64 _ioread64
226static inline u64 _ioread64(void __iomem *mmio)
227{
228 u64 low, high;
229
230 low = ioread32(mmio);
231 high = ioread32(mmio + sizeof(u32));
232 return low | (high << 32);
233}
234#endif
235#endif
236
237#ifndef iowrite64
238#ifdef writeq
239#define iowrite64 writeq
240#else
241#define iowrite64 _iowrite64
242static inline void _iowrite64(u64 val, void __iomem *mmio)
243{
244 iowrite32(val, mmio);
245 iowrite32(val >> 32, mmio + sizeof(u32));
246}
247#endif
248#endif
249
250#endif 222#endif
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f1eaa3c4d46a..f2df2d39c65b 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -13,13 +13,14 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/switchtec.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/io-64-nonatomic-lo-hi.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20#include <linux/interrupt.h> 20#include <linux/module.h>
21#include <linux/ntb.h> 21#include <linux/ntb.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/switchtec.h>
23 24
24MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); 25MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
25MODULE_VERSION("0.1"); 26MODULE_VERSION("0.1");
@@ -36,35 +37,6 @@ module_param(use_lut_mws, bool, 0644);
36MODULE_PARM_DESC(use_lut_mws, 37MODULE_PARM_DESC(use_lut_mws,
37 "Enable the use of the LUT based memory windows"); 38 "Enable the use of the LUT based memory windows");
38 39
39#ifndef ioread64
40#ifdef readq
41#define ioread64 readq
42#else
43#define ioread64 _ioread64
44static inline u64 _ioread64(void __iomem *mmio)
45{
46 u64 low, high;
47
48 low = ioread32(mmio);
49 high = ioread32(mmio + sizeof(u32));
50 return low | (high << 32);
51}
52#endif
53#endif
54
55#ifndef iowrite64
56#ifdef writeq
57#define iowrite64 writeq
58#else
59#define iowrite64 _iowrite64
60static inline void _iowrite64(u64 val, void __iomem *mmio)
61{
62 iowrite32(val, mmio);
63 iowrite32(val >> 32, mmio + sizeof(u32));
64}
65#endif
66#endif
67
68#define SWITCHTEC_NTB_MAGIC 0x45CC0001 40#define SWITCHTEC_NTB_MAGIC 0x45CC0001
69#define MAX_MWS 128 41#define MAX_MWS 128
70 42
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 4ad846ceac7c..530d570724c9 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -26,7 +26,7 @@ config NVMEM_IMX_IIM
26 26
27config NVMEM_IMX_OCOTP 27config NVMEM_IMX_OCOTP
28 tristate "i.MX6 On-Chip OTP Controller support" 28 tristate "i.MX6 On-Chip OTP Controller support"
29 depends on SOC_IMX6 || COMPILE_TEST 29 depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST
30 depends on HAS_IOMEM 30 depends on HAS_IOMEM
31 help 31 help
32 This is a driver for the On-Chip OTP Controller (OCOTP) available on 32 This is a driver for the On-Chip OTP Controller (OCOTP) available on
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 4159b3f41d79..a8097511582a 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -11,13 +11,14 @@
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 */ 12 */
13 13
14#include <linux/acpi.h>
14#include <linux/delay.h> 15#include <linux/delay.h>
15#include <linux/device.h> 16#include <linux/device.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/nvmem-provider.h> 19#include <linux/nvmem-provider.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_address.h> 21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22 23
23/* 24/*
@@ -78,9 +79,9 @@ static struct otpc_map otp_map_v2 = {
78}; 79};
79 80
80struct otpc_priv { 81struct otpc_priv {
81 struct device *dev; 82 struct device *dev;
82 void __iomem *base; 83 void __iomem *base;
83 struct otpc_map *map; 84 const struct otpc_map *map;
84 struct nvmem_config *config; 85 struct nvmem_config *config;
85}; 86};
86 87
@@ -237,16 +238,22 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
237}; 238};
238 239
239static const struct of_device_id bcm_otpc_dt_ids[] = { 240static const struct of_device_id bcm_otpc_dt_ids[] = {
240 { .compatible = "brcm,ocotp" }, 241 { .compatible = "brcm,ocotp", .data = &otp_map },
241 { .compatible = "brcm,ocotp-v2" }, 242 { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 },
242 { }, 243 { },
243}; 244};
244MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); 245MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
245 246
247static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
248 { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
249 { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
250 { /* sentinel */ }
251};
252MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids);
253
246static int bcm_otpc_probe(struct platform_device *pdev) 254static int bcm_otpc_probe(struct platform_device *pdev)
247{ 255{
248 struct device *dev = &pdev->dev; 256 struct device *dev = &pdev->dev;
249 struct device_node *dn = dev->of_node;
250 struct resource *res; 257 struct resource *res;
251 struct otpc_priv *priv; 258 struct otpc_priv *priv;
252 struct nvmem_device *nvmem; 259 struct nvmem_device *nvmem;
@@ -257,14 +264,9 @@ static int bcm_otpc_probe(struct platform_device *pdev)
257 if (!priv) 264 if (!priv)
258 return -ENOMEM; 265 return -ENOMEM;
259 266
260 if (of_device_is_compatible(dev->of_node, "brcm,ocotp")) 267 priv->map = device_get_match_data(dev);
261 priv->map = &otp_map; 268 if (!priv->map)
262 else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) 269 return -ENODEV;
263 priv->map = &otp_map_v2;
264 else {
265 dev_err(dev, "%s otpc config map not defined\n", __func__);
266 return -EINVAL;
267 }
268 270
269 /* Get OTP base address register. */ 271 /* Get OTP base address register. */
270 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 272 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -281,7 +283,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
281 reset_start_bit(priv->base); 283 reset_start_bit(priv->base);
282 284
283 /* Read size of memory in words. */ 285 /* Read size of memory in words. */
284 err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words); 286 err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words);
285 if (err) { 287 if (err) {
286 dev_err(dev, "size parameter not specified\n"); 288 dev_err(dev, "size parameter not specified\n");
287 return -EINVAL; 289 return -EINVAL;
@@ -294,7 +296,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
294 bcm_otpc_nvmem_config.dev = dev; 296 bcm_otpc_nvmem_config.dev = dev;
295 bcm_otpc_nvmem_config.priv = priv; 297 bcm_otpc_nvmem_config.priv = priv;
296 298
297 if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) { 299 if (priv->map == &otp_map_v2) {
298 bcm_otpc_nvmem_config.word_size = 8; 300 bcm_otpc_nvmem_config.word_size = 8;
299 bcm_otpc_nvmem_config.stride = 8; 301 bcm_otpc_nvmem_config.stride = 8;
300 } 302 }
@@ -315,6 +317,7 @@ static struct platform_driver bcm_otpc_driver = {
315 .driver = { 317 .driver = {
316 .name = "brcm-otpc", 318 .name = "brcm-otpc",
317 .of_match_table = bcm_otpc_dt_ids, 319 .of_match_table = bcm_otpc_dt_ids,
320 .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids),
318 }, 321 },
319}; 322};
320module_platform_driver(bcm_otpc_driver); 323module_platform_driver(bcm_otpc_driver);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f7301bb4ef3b..f24008b66826 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -525,12 +525,14 @@ out:
525static struct nvmem_cell * 525static struct nvmem_cell *
526nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 526nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
527{ 527{
528 struct nvmem_cell *cell = NULL; 528 struct nvmem_cell *iter, *cell = NULL;
529 529
530 mutex_lock(&nvmem_mutex); 530 mutex_lock(&nvmem_mutex);
531 list_for_each_entry(cell, &nvmem->cells, node) { 531 list_for_each_entry(iter, &nvmem->cells, node) {
532 if (strcmp(cell_id, cell->name) == 0) 532 if (strcmp(cell_id, iter->name) == 0) {
533 cell = iter;
533 break; 534 break;
535 }
534 } 536 }
535 mutex_unlock(&nvmem_mutex); 537 mutex_unlock(&nvmem_mutex);
536 538
@@ -646,8 +648,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
646 config->name ? config->id : nvmem->id); 648 config->name ? config->id : nvmem->id);
647 } 649 }
648 650
649 nvmem->read_only = device_property_present(config->dev, "read-only") | 651 nvmem->read_only = device_property_present(config->dev, "read-only") ||
650 config->read_only; 652 config->read_only || !nvmem->reg_write;
651 653
652 if (config->root_only) 654 if (config->root_only)
653 nvmem->dev.groups = nvmem->read_only ? 655 nvmem->dev.groups = nvmem->read_only ?
@@ -686,9 +688,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
686 if (rval) 688 if (rval)
687 goto err_remove_cells; 689 goto err_remove_cells;
688 690
689 rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 691 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
690 if (rval)
691 goto err_remove_cells;
692 692
693 return nvmem; 693 return nvmem;
694 694
@@ -809,6 +809,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
809 "could not increase module refcount for cell %s\n", 809 "could not increase module refcount for cell %s\n",
810 nvmem_dev_name(nvmem)); 810 nvmem_dev_name(nvmem));
811 811
812 put_device(&nvmem->dev);
812 return ERR_PTR(-EINVAL); 813 return ERR_PTR(-EINVAL);
813 } 814 }
814 815
@@ -819,6 +820,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
819 820
820static void __nvmem_device_put(struct nvmem_device *nvmem) 821static void __nvmem_device_put(struct nvmem_device *nvmem)
821{ 822{
823 put_device(&nvmem->dev);
822 module_put(nvmem->owner); 824 module_put(nvmem->owner);
823 kref_put(&nvmem->refcnt, nvmem_device_release); 825 kref_put(&nvmem->refcnt, nvmem_device_release);
824} 826}
@@ -837,13 +839,14 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
837{ 839{
838 840
839 struct device_node *nvmem_np; 841 struct device_node *nvmem_np;
840 int index; 842 int index = 0;
841 843
842 index = of_property_match_string(np, "nvmem-names", id); 844 if (id)
845 index = of_property_match_string(np, "nvmem-names", id);
843 846
844 nvmem_np = of_parse_phandle(np, "nvmem", index); 847 nvmem_np = of_parse_phandle(np, "nvmem", index);
845 if (!nvmem_np) 848 if (!nvmem_np)
846 return ERR_PTR(-EINVAL); 849 return ERR_PTR(-ENOENT);
847 850
848 return __nvmem_device_get(nvmem_np, NULL); 851 return __nvmem_device_get(nvmem_np, NULL);
849} 852}
@@ -871,7 +874,7 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
871 874
872 } 875 }
873 876
874 return nvmem_find(dev_name); 877 return __nvmem_device_get(NULL, dev_name);
875} 878}
876EXPORT_SYMBOL_GPL(nvmem_device_get); 879EXPORT_SYMBOL_GPL(nvmem_device_get);
877 880
@@ -972,7 +975,7 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
972 if (IS_ERR(nvmem)) { 975 if (IS_ERR(nvmem)) {
973 /* Provider may not be registered yet. */ 976 /* Provider may not be registered yet. */
974 cell = ERR_CAST(nvmem); 977 cell = ERR_CAST(nvmem);
975 goto out; 978 break;
976 } 979 }
977 980
978 cell = nvmem_find_cell_by_name(nvmem, 981 cell = nvmem_find_cell_by_name(nvmem,
@@ -980,12 +983,11 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
980 if (!cell) { 983 if (!cell) {
981 __nvmem_device_put(nvmem); 984 __nvmem_device_put(nvmem);
982 cell = ERR_PTR(-ENOENT); 985 cell = ERR_PTR(-ENOENT);
983 goto out;
984 } 986 }
987 break;
985 } 988 }
986 } 989 }
987 990
988out:
989 mutex_unlock(&nvmem_lookup_mutex); 991 mutex_unlock(&nvmem_lookup_mutex);
990 return cell; 992 return cell;
991} 993}
@@ -994,12 +996,14 @@ out:
994static struct nvmem_cell * 996static struct nvmem_cell *
995nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 997nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
996{ 998{
997 struct nvmem_cell *cell = NULL; 999 struct nvmem_cell *iter, *cell = NULL;
998 1000
999 mutex_lock(&nvmem_mutex); 1001 mutex_lock(&nvmem_mutex);
1000 list_for_each_entry(cell, &nvmem->cells, node) { 1002 list_for_each_entry(iter, &nvmem->cells, node) {
1001 if (np == cell->np) 1003 if (np == iter->np) {
1004 cell = iter;
1002 break; 1005 break;
1006 }
1003 } 1007 }
1004 mutex_unlock(&nvmem_mutex); 1008 mutex_unlock(&nvmem_mutex);
1005 1009
@@ -1031,7 +1035,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1031 1035
1032 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1036 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1033 if (!cell_np) 1037 if (!cell_np)
1034 return ERR_PTR(-EINVAL); 1038 return ERR_PTR(-ENOENT);
1035 1039
1036 nvmem_np = of_get_next_parent(cell_np); 1040 nvmem_np = of_get_next_parent(cell_np);
1037 if (!nvmem_np) 1041 if (!nvmem_np)
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index afb429a417fe..08a9b1ef8ae4 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -427,19 +427,32 @@ static const struct ocotp_params imx6ul_params = {
427 .set_timing = imx_ocotp_set_imx6_timing, 427 .set_timing = imx_ocotp_set_imx6_timing,
428}; 428};
429 429
430static const struct ocotp_params imx6ull_params = {
431 .nregs = 64,
432 .bank_address_words = 0,
433 .set_timing = imx_ocotp_set_imx6_timing,
434};
435
430static const struct ocotp_params imx7d_params = { 436static const struct ocotp_params imx7d_params = {
431 .nregs = 64, 437 .nregs = 64,
432 .bank_address_words = 4, 438 .bank_address_words = 4,
433 .set_timing = imx_ocotp_set_imx7_timing, 439 .set_timing = imx_ocotp_set_imx7_timing,
434}; 440};
435 441
442static const struct ocotp_params imx7ulp_params = {
443 .nregs = 256,
444 .bank_address_words = 0,
445};
446
436static const struct of_device_id imx_ocotp_dt_ids[] = { 447static const struct of_device_id imx_ocotp_dt_ids[] = {
437 { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, 448 { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
438 { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, 449 { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
439 { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params }, 450 { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
440 { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params }, 451 { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
452 { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params },
441 { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params }, 453 { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
442 { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params }, 454 { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
455 { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
443 { }, 456 { },
444}; 457};
445MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); 458MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index 33185d8d82cf..c6ee21018d80 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -106,10 +106,12 @@ static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
106static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes) 106static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
107{ 107{
108 struct sc27xx_efuse *efuse = context; 108 struct sc27xx_efuse *efuse = context;
109 u32 buf; 109 u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH;
110 u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
110 int ret; 111 int ret;
111 112
112 if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH) 113 if (blk_index > SC27XX_EFUSE_BLOCK_MAX ||
114 bytes > SC27XX_EFUSE_BLOCK_WIDTH)
113 return -EINVAL; 115 return -EINVAL;
114 116
115 ret = sc27xx_efuse_lock(efuse); 117 ret = sc27xx_efuse_lock(efuse);
@@ -133,7 +135,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
133 /* Set the block address to be read. */ 135 /* Set the block address to be read. */
134 ret = regmap_write(efuse->regmap, 136 ret = regmap_write(efuse->regmap,
135 efuse->base + SC27XX_EFUSE_BLOCK_INDEX, 137 efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
136 offset & SC27XX_EFUSE_BLOCK_MASK); 138 blk_index & SC27XX_EFUSE_BLOCK_MASK);
137 if (ret) 139 if (ret)
138 goto disable_efuse; 140 goto disable_efuse;
139 141
@@ -171,8 +173,10 @@ disable_efuse:
171unlock_efuse: 173unlock_efuse:
172 sc27xx_efuse_unlock(efuse); 174 sc27xx_efuse_unlock(efuse);
173 175
174 if (!ret) 176 if (!ret) {
177 buf >>= blk_offset;
175 memcpy(val, &buf, bytes); 178 memcpy(val, &buf, bytes);
179 }
176 180
177 return ret; 181 return ret;
178} 182}
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..56dd83a45e55 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,10 +213,12 @@ void parport_daisy_fini(struct parport *port)
213struct pardevice *parport_open(int devnum, const char *name) 213struct pardevice *parport_open(int devnum, const char *name)
214{ 214{
215 struct daisydev *p = topology; 215 struct daisydev *p = topology;
216 struct pardev_cb par_cb;
216 struct parport *port; 217 struct parport *port;
217 struct pardevice *dev; 218 struct pardevice *dev;
218 int daisy; 219 int daisy;
219 220
221 memset(&par_cb, 0, sizeof(par_cb));
220 spin_lock(&topology_lock); 222 spin_lock(&topology_lock);
221 while (p && p->devnum != devnum) 223 while (p && p->devnum != devnum)
222 p = p->next; 224 p = p->next;
@@ -230,7 +232,7 @@ struct pardevice *parport_open(int devnum, const char *name)
230 port = parport_get_port(p->port); 232 port = parport_get_port(p->port);
231 spin_unlock(&topology_lock); 233 spin_unlock(&topology_lock);
232 234
233 dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); 235 dev = parport_register_dev_model(port, name, &par_cb, devnum);
234 parport_put_port(port); 236 parport_put_port(port);
235 if (!dev) 237 if (!dev)
236 return NULL; 238 return NULL;
@@ -480,3 +482,31 @@ static int assign_addrs(struct parport *port)
480 kfree(deviceid); 482 kfree(deviceid);
481 return detected; 483 return detected;
482} 484}
485
486static int daisy_drv_probe(struct pardevice *par_dev)
487{
488 struct device_driver *drv = par_dev->dev.driver;
489
490 if (strcmp(drv->name, "daisy_drv"))
491 return -ENODEV;
492 if (strcmp(par_dev->name, daisy_dev_name))
493 return -ENODEV;
494
495 return 0;
496}
497
498static struct parport_driver daisy_driver = {
499 .name = "daisy_drv",
500 .probe = daisy_drv_probe,
501 .devmodel = true,
502};
503
504int daisy_drv_init(void)
505{
506 return parport_register_driver(&daisy_driver);
507}
508
509void daisy_drv_exit(void)
510{
511 parport_unregister_driver(&daisy_driver);
512}
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 9c8249f74479..6296dbb83d47 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
1377{ 1377{
1378 int i; 1378 int i;
1379 for (i = 0; i < NR_SUPERIOS; i++) 1379 for (i = 0; i < NR_SUPERIOS; i++)
1380 if (superios[i].io != p->base) 1380 if (superios[i].io == p->base)
1381 return &superios[i]; 1381 return &superios[i];
1382 return NULL; 1382 return NULL;
1383} 1383}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
257ssize_t parport_device_id (int devnum, char *buffer, size_t count) 257ssize_t parport_device_id (int devnum, char *buffer, size_t count)
258{ 258{
259 ssize_t retval = -ENXIO; 259 ssize_t retval = -ENXIO;
260 struct pardevice *dev = parport_open (devnum, "Device ID probe"); 260 struct pardevice *dev = parport_open(devnum, daisy_dev_name);
261 if (!dev) 261 if (!dev)
262 return -ENXIO; 262 return -ENXIO;
263 263
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5dc53d420ca8..0171b8dbcdcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,11 +137,19 @@ static struct bus_type parport_bus_type = {
137 137
138int parport_bus_init(void) 138int parport_bus_init(void)
139{ 139{
140 return bus_register(&parport_bus_type); 140 int retval;
141
142 retval = bus_register(&parport_bus_type);
143 if (retval)
144 return retval;
145 daisy_drv_init();
146
147 return 0;
141} 148}
142 149
143void parport_bus_exit(void) 150void parport_bus_exit(void)
144{ 151{
152 daisy_drv_exit();
145 bus_unregister(&parport_bus_type); 153 bus_unregister(&parport_bus_type);
146} 154}
147 155
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 479031aa4f88..74fdfa68d1f2 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,7 +2,7 @@ menuconfig GOLDFISH
2 bool "Platform support for Goldfish virtual devices" 2 bool "Platform support for Goldfish virtual devices"
3 depends on X86_32 || X86_64 || ARM || ARM64 || MIPS 3 depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
4 depends on HAS_IOMEM 4 depends on HAS_IOMEM
5 ---help--- 5 help
6 Say Y here to get to see options for the Goldfish virtual platform. 6 Say Y here to get to see options for the Goldfish virtual platform.
7 This option alone does not add any kernel code. 7 This option alone does not add any kernel code.
8 8
@@ -12,7 +12,7 @@ if GOLDFISH
12 12
13config GOLDFISH_PIPE 13config GOLDFISH_PIPE
14 tristate "Goldfish virtual device for QEMU pipes" 14 tristate "Goldfish virtual device for QEMU pipes"
15 ---help--- 15 help
16 This is a virtual device to drive the QEMU pipe interface used by 16 This is a virtual device to drive the QEMU pipe interface used by
17 the Goldfish Android Virtual Device. 17 the Goldfish Android Virtual Device.
18 18
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f38882f6f37d..8f9d9e9fa695 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1369,14 +1369,14 @@ config ATARI_SCSI
1369 tristate "Atari native SCSI support" 1369 tristate "Atari native SCSI support"
1370 depends on ATARI && SCSI 1370 depends on ATARI && SCSI
1371 select SCSI_SPI_ATTRS 1371 select SCSI_SPI_ATTRS
1372 select NVRAM
1373 ---help--- 1372 ---help---
1374 If you have an Atari with built-in NCR5380 SCSI controller (TT, 1373 If you have an Atari with built-in NCR5380 SCSI controller (TT,
1375 Falcon, ...) say Y to get it supported. Of course also, if you have 1374 Falcon, ...) say Y to get it supported. Of course also, if you have
1376 a compatible SCSI controller (e.g. for Medusa). 1375 a compatible SCSI controller (e.g. for Medusa).
1377 1376
1378 To compile this driver as a module, choose M here: the 1377 To compile this driver as a module, choose M here: the module will
1379 module will be called atari_scsi. 1378 be called atari_scsi. If you also enable NVRAM support, the SCSI
1379 host's ID is taken from the setting in TT RTC NVRAM.
1380 1380
1381 This driver supports both styles of NCR integration into the 1381 This driver supports both styles of NCR integration into the
1382 system: the TT style (separate DMA), and the Falcon style (via 1382 system: the TT style (separate DMA), and the Falcon style (via
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a503dc50c4f8..e809493d0d06 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -757,15 +757,17 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
757 757
758 if (setup_hostid >= 0) { 758 if (setup_hostid >= 0) {
759 atari_scsi_template.this_id = setup_hostid & 7; 759 atari_scsi_template.this_id = setup_hostid & 7;
760 } else { 760 } else if (IS_REACHABLE(CONFIG_NVRAM)) {
761 /* Test if a host id is set in the NVRam */ 761 /* Test if a host id is set in the NVRam */
762 if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { 762 if (ATARIHW_PRESENT(TT_CLK)) {
763 unsigned char b = nvram_read_byte(16); 763 unsigned char b;
764 loff_t offset = 16;
765 ssize_t count = nvram_read(&b, 1, &offset);
764 766
765 /* Arbitration enabled? (for TOS) 767 /* Arbitration enabled? (for TOS)
766 * If yes, use configured host ID 768 * If yes, use configured host ID
767 */ 769 */
768 if (b & 0x80) 770 if ((count == 1) && (b & 0x80))
769 atari_scsi_template.this_id = b & 7; 771 atari_scsi_template.this_id = b & 7;
770 } 772 }
771 } 773 }
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 55eda5863a6b..b2f07d2043eb 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -21,7 +21,9 @@ static const struct slim_device_id *slim_match(const struct slim_device_id *id,
21{ 21{
22 while (id->manf_id != 0 || id->prod_code != 0) { 22 while (id->manf_id != 0 || id->prod_code != 0) {
23 if (id->manf_id == sbdev->e_addr.manf_id && 23 if (id->manf_id == sbdev->e_addr.manf_id &&
24 id->prod_code == sbdev->e_addr.prod_code) 24 id->prod_code == sbdev->e_addr.prod_code &&
25 id->dev_index == sbdev->e_addr.dev_index &&
26 id->instance == sbdev->e_addr.instance)
25 return id; 27 return id;
26 id++; 28 id++;
27 } 29 }
@@ -40,6 +42,23 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
40 return !!slim_match(sbdrv->id_table, sbdev); 42 return !!slim_match(sbdrv->id_table, sbdev);
41} 43}
42 44
45static void slim_device_update_status(struct slim_device *sbdev,
46 enum slim_device_status status)
47{
48 struct slim_driver *sbdrv;
49
50 if (sbdev->status == status)
51 return;
52
53 sbdev->status = status;
54 if (!sbdev->dev.driver)
55 return;
56
57 sbdrv = to_slim_driver(sbdev->dev.driver);
58 if (sbdrv->device_status)
59 sbdrv->device_status(sbdev, sbdev->status);
60}
61
43static int slim_device_probe(struct device *dev) 62static int slim_device_probe(struct device *dev)
44{ 63{
45 struct slim_device *sbdev = to_slim_device(dev); 64 struct slim_device *sbdev = to_slim_device(dev);
@@ -53,8 +72,7 @@ static int slim_device_probe(struct device *dev)
53 /* try getting the logical address after probe */ 72 /* try getting the logical address after probe */
54 ret = slim_get_logical_addr(sbdev); 73 ret = slim_get_logical_addr(sbdev);
55 if (!ret) { 74 if (!ret) {
56 if (sbdrv->device_status) 75 slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
57 sbdrv->device_status(sbdev, sbdev->status);
58 } else { 76 } else {
59 dev_err(&sbdev->dev, "Failed to get logical address\n"); 77 dev_err(&sbdev->dev, "Failed to get logical address\n");
60 ret = -EPROBE_DEFER; 78 ret = -EPROBE_DEFER;
@@ -256,6 +274,7 @@ int slim_register_controller(struct slim_controller *ctrl)
256 mutex_init(&ctrl->lock); 274 mutex_init(&ctrl->lock);
257 mutex_init(&ctrl->sched.m_reconf); 275 mutex_init(&ctrl->sched.m_reconf);
258 init_completion(&ctrl->sched.pause_comp); 276 init_completion(&ctrl->sched.pause_comp);
277 spin_lock_init(&ctrl->txn_lock);
259 278
260 dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n", 279 dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
261 ctrl->name, ctrl->dev); 280 ctrl->name, ctrl->dev);
@@ -295,23 +314,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
295} 314}
296EXPORT_SYMBOL_GPL(slim_unregister_controller); 315EXPORT_SYMBOL_GPL(slim_unregister_controller);
297 316
298static void slim_device_update_status(struct slim_device *sbdev,
299 enum slim_device_status status)
300{
301 struct slim_driver *sbdrv;
302
303 if (sbdev->status == status)
304 return;
305
306 sbdev->status = status;
307 if (!sbdev->dev.driver)
308 return;
309
310 sbdrv = to_slim_driver(sbdev->dev.driver);
311 if (sbdrv->device_status)
312 sbdrv->device_status(sbdev, sbdev->status);
313}
314
315/** 317/**
316 * slim_report_absent() - Controller calls this function when a device 318 * slim_report_absent() - Controller calls this function when a device
317 * reports absent, OR when the device cannot be communicated with 319 * reports absent, OR when the device cannot be communicated with
@@ -464,6 +466,7 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
464 466
465 sbdev->laddr = laddr; 467 sbdev->laddr = laddr;
466 sbdev->is_laddr_valid = true; 468 sbdev->is_laddr_valid = true;
469 mutex_unlock(&ctrl->lock);
467 470
468 slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP); 471 slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
469 472
@@ -471,6 +474,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
471 laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code, 474 laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
472 sbdev->e_addr.dev_index, sbdev->e_addr.instance); 475 sbdev->e_addr.dev_index, sbdev->e_addr.instance);
473 476
477 return 0;
478
474err: 479err:
475 mutex_unlock(&ctrl->lock); 480 mutex_unlock(&ctrl->lock);
476 return ret; 481 return ret;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 131342280b46..a57698985f9c 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -491,10 +491,10 @@ static int uio_open(struct inode *inode, struct file *filep)
491 if (!idev->info) { 491 if (!idev->info) {
492 mutex_unlock(&idev->info_lock); 492 mutex_unlock(&idev->info_lock);
493 ret = -EINVAL; 493 ret = -EINVAL;
494 goto err_alloc_listener; 494 goto err_infoopen;
495 } 495 }
496 496
497 if (idev->info && idev->info->open) 497 if (idev->info->open)
498 ret = idev->info->open(idev->info, inode); 498 ret = idev->info->open(idev->info, inode);
499 mutex_unlock(&idev->info_lock); 499 mutex_unlock(&idev->info_lock);
500 if (ret) 500 if (ret)
@@ -635,7 +635,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
635 goto out; 635 goto out;
636 } 636 }
637 637
638 if (!idev->info || !idev->info->irq) { 638 if (!idev->info->irq) {
639 retval = -EIO; 639 retval = -EIO;
640 goto out; 640 goto out;
641 } 641 }
@@ -940,9 +940,12 @@ int __uio_register_device(struct module *owner,
940 atomic_set(&idev->event, 0); 940 atomic_set(&idev->event, 0);
941 941
942 ret = uio_get_minor(idev); 942 ret = uio_get_minor(idev);
943 if (ret) 943 if (ret) {
944 kfree(idev);
944 return ret; 945 return ret;
946 }
945 947
948 device_initialize(&idev->dev);
946 idev->dev.devt = MKDEV(uio_major, idev->minor); 949 idev->dev.devt = MKDEV(uio_major, idev->minor);
947 idev->dev.class = &uio_class; 950 idev->dev.class = &uio_class;
948 idev->dev.parent = parent; 951 idev->dev.parent = parent;
@@ -953,7 +956,7 @@ int __uio_register_device(struct module *owner,
953 if (ret) 956 if (ret)
954 goto err_device_create; 957 goto err_device_create;
955 958
956 ret = device_register(&idev->dev); 959 ret = device_add(&idev->dev);
957 if (ret) 960 if (ret)
958 goto err_device_create; 961 goto err_device_create;
959 962
@@ -985,9 +988,10 @@ int __uio_register_device(struct module *owner,
985err_request_irq: 988err_request_irq:
986 uio_dev_del_attributes(idev); 989 uio_dev_del_attributes(idev);
987err_uio_dev_add_attributes: 990err_uio_dev_add_attributes:
988 device_unregister(&idev->dev); 991 device_del(&idev->dev);
989err_device_create: 992err_device_create:
990 uio_free_minor(idev); 993 uio_free_minor(idev);
994 put_device(&idev->dev);
991 return ret; 995 return ret;
992} 996}
993EXPORT_SYMBOL_GPL(__uio_register_device); 997EXPORT_SYMBOL_GPL(__uio_register_device);
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 8773e373ffe5..dde5cbb27178 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -39,6 +39,22 @@ to_uio_pci_generic_dev(struct uio_info *info)
39 return container_of(info, struct uio_pci_generic_dev, info); 39 return container_of(info, struct uio_pci_generic_dev, info);
40} 40}
41 41
42static int release(struct uio_info *info, struct inode *inode)
43{
44 struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
45
46 /*
47 * This driver is insecure when used with devices doing DMA, but some
48  * people (mis)use it with such devices.
49  * Let's at least make sure DMA isn't left enabled after the userspace
50  * driver closes the fd.
51  * Note that there's a non-zero chance doing this will wedge the device
52  * at least until reset.
53 */
54 pci_clear_master(gdev->pdev);
55 return 0;
56}
57
42/* Interrupt handler. Read/modify/write the command register to disable 58/* Interrupt handler. Read/modify/write the command register to disable
43 * the interrupt. */ 59 * the interrupt. */
44static irqreturn_t irqhandler(int irq, struct uio_info *info) 60static irqreturn_t irqhandler(int irq, struct uio_info *info)
@@ -78,6 +94,7 @@ static int probe(struct pci_dev *pdev,
78 94
79 gdev->info.name = "uio_pci_generic"; 95 gdev->info.name = "uio_pci_generic";
80 gdev->info.version = DRIVER_VERSION; 96 gdev->info.version = DRIVER_VERSION;
97 gdev->info.release = release;
81 gdev->pdev = pdev; 98 gdev->pdev = pdev;
82 if (pdev->irq) { 99 if (pdev->irq) {
83 gdev->info.irq = pdev->irq; 100 gdev->info.irq = pdev->irq;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index ae7712c9687a..58a9590c9db6 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -536,7 +536,7 @@ config FB_IMSTT
536 bool "IMS Twin Turbo display support" 536 bool "IMS Twin Turbo display support"
537 depends on (FB = y) && PCI 537 depends on (FB = y) && PCI
538 select FB_CFB_IMAGEBLIT 538 select FB_CFB_IMAGEBLIT
539 select FB_MACMODES if PPC 539 select FB_MACMODES if PPC_PMAC
540 help 540 help
541 The IMS Twin Turbo is a PCI-based frame buffer card bundled with 541 The IMS Twin Turbo is a PCI-based frame buffer card bundled with
542 many Macintosh and compatible computers. 542 many Macintosh and compatible computers.
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9cb0ef7ac29e..7af8db28bb80 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -411,35 +411,23 @@ static int __init init_control(struct fb_info_control *p)
411 full = p->total_vram == 0x400000; 411 full = p->total_vram == 0x400000;
412 412
413 /* Try to pick a video mode out of NVRAM if we have one. */ 413 /* Try to pick a video mode out of NVRAM if we have one. */
414#ifdef CONFIG_NVRAM 414 cmode = default_cmode;
415 if (default_cmode == CMODE_NVRAM) { 415 if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
416 cmode = nvram_read_byte(NV_CMODE); 416 cmode = nvram_read_byte(NV_CMODE);
417 if(cmode < CMODE_8 || cmode > CMODE_32) 417 if (cmode < CMODE_8 || cmode > CMODE_32)
418 cmode = CMODE_8; 418 cmode = CMODE_8;
419 } else 419
420#endif 420 vmode = default_vmode;
421 cmode=default_cmode; 421 if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
422#ifdef CONFIG_NVRAM
423 if (default_vmode == VMODE_NVRAM) {
424 vmode = nvram_read_byte(NV_VMODE); 422 vmode = nvram_read_byte(NV_VMODE);
425 if (vmode < 1 || vmode > VMODE_MAX || 423 if (vmode < 1 || vmode > VMODE_MAX ||
426 control_mac_modes[vmode - 1].m[full] < cmode) { 424 control_mac_modes[vmode - 1].m[full] < cmode) {
427 sense = read_control_sense(p); 425 sense = read_control_sense(p);
428 printk("Monitor sense value = 0x%x, ", sense); 426 printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
429 vmode = mac_map_monitor_sense(sense); 427 vmode = mac_map_monitor_sense(sense);
430 if (control_mac_modes[vmode - 1].m[full] < cmode) 428 if (control_mac_modes[vmode - 1].m[full] < 0)
431 vmode = VMODE_640_480_60; 429 vmode = VMODE_640_480_60;
432 } 430 cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
433 } else
434#endif
435 {
436 vmode=default_vmode;
437 if (control_mac_modes[vmode - 1].m[full] < cmode) {
438 if (cmode > CMODE_8)
439 cmode--;
440 else
441 vmode = VMODE_640_480_60;
442 }
443 } 431 }
444 432
445 /* Initialize info structure */ 433 /* Initialize info structure */
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 901ca4ed10e9..5d9670daf60e 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -30,9 +30,8 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32 32
33#if defined(CONFIG_PPC) 33#if defined(CONFIG_PPC_PMAC)
34#include <linux/nvram.h> 34#include <linux/nvram.h>
35#include <asm/prom.h>
36#include "macmodes.h" 35#include "macmodes.h"
37#endif 36#endif
38 37
@@ -327,14 +326,13 @@ enum {
327 TVP = 1 326 TVP = 1
328}; 327};
329 328
330#define USE_NV_MODES 1
331#define INIT_BPP 8 329#define INIT_BPP 8
332#define INIT_XRES 640 330#define INIT_XRES 640
333#define INIT_YRES 480 331#define INIT_YRES 480
334 332
335static int inverse = 0; 333static int inverse = 0;
336static char fontname[40] __initdata = { 0 }; 334static char fontname[40] __initdata = { 0 };
337#if defined(CONFIG_PPC) 335#if defined(CONFIG_PPC_PMAC)
338static signed char init_vmode = -1, init_cmode = -1; 336static signed char init_vmode = -1, init_cmode = -1;
339#endif 337#endif
340 338
@@ -1390,8 +1388,8 @@ static void init_imstt(struct fb_info *info)
1390 } 1388 }
1391 } 1389 }
1392 1390
1393#if USE_NV_MODES && defined(CONFIG_PPC32) 1391#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
1394 { 1392 if (IS_REACHABLE(CONFIG_NVRAM) && machine_is(powermac)) {
1395 int vmode = init_vmode, cmode = init_cmode; 1393 int vmode = init_vmode, cmode = init_cmode;
1396 1394
1397 if (vmode == -1) { 1395 if (vmode == -1) {
@@ -1409,12 +1407,13 @@ static void init_imstt(struct fb_info *info)
1409 info->var.yres = info->var.yres_virtual = INIT_YRES; 1407 info->var.yres = info->var.yres_virtual = INIT_YRES;
1410 info->var.bits_per_pixel = INIT_BPP; 1408 info->var.bits_per_pixel = INIT_BPP;
1411 } 1409 }
1412 } 1410 } else
1413#else
1414 info->var.xres = info->var.xres_virtual = INIT_XRES;
1415 info->var.yres = info->var.yres_virtual = INIT_YRES;
1416 info->var.bits_per_pixel = INIT_BPP;
1417#endif 1411#endif
1412 {
1413 info->var.xres = info->var.xres_virtual = INIT_XRES;
1414 info->var.yres = info->var.yres_virtual = INIT_YRES;
1415 info->var.bits_per_pixel = INIT_BPP;
1416 }
1418 1417
1419 if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len 1418 if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
1420 || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { 1419 || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
@@ -1565,7 +1564,7 @@ imsttfb_setup(char *options)
1565 inverse = 1; 1564 inverse = 1;
1566 fb_invert_cmaps(); 1565 fb_invert_cmaps();
1567 } 1566 }
1568#if defined(CONFIG_PPC) 1567#if defined(CONFIG_PPC_PMAC)
1569 else if (!strncmp(this_opt, "vmode:", 6)) { 1568 else if (!strncmp(this_opt, "vmode:", 6)) {
1570 int vmode = simple_strtoul(this_opt+6, NULL, 0); 1569 int vmode = simple_strtoul(this_opt+6, NULL, 0);
1571 if (vmode > 0 && vmode <= VMODE_MAX) 1570 if (vmode > 0 && vmode <= VMODE_MAX)
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 838869c6490c..d11b5e6210ed 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -111,12 +111,12 @@
111#include "matroxfb_g450.h" 111#include "matroxfb_g450.h"
112#include <linux/matroxfb.h> 112#include <linux/matroxfb.h>
113#include <linux/interrupt.h> 113#include <linux/interrupt.h>
114#include <linux/nvram.h>
114#include <linux/slab.h> 115#include <linux/slab.h>
115#include <linux/uaccess.h> 116#include <linux/uaccess.h>
116 117
117#ifdef CONFIG_PPC_PMAC 118#ifdef CONFIG_PPC_PMAC
118#include <asm/machdep.h> 119#include <asm/machdep.h>
119unsigned char nvram_read_byte(int);
120static int default_vmode = VMODE_NVRAM; 120static int default_vmode = VMODE_NVRAM;
121static int default_cmode = CMODE_NVRAM; 121static int default_cmode = CMODE_NVRAM;
122#endif 122#endif
@@ -1872,10 +1872,11 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
1872#ifndef MODULE 1872#ifndef MODULE
1873 if (machine_is(powermac)) { 1873 if (machine_is(powermac)) {
1874 struct fb_var_screeninfo var; 1874 struct fb_var_screeninfo var;
1875
1875 if (default_vmode <= 0 || default_vmode > VMODE_MAX) 1876 if (default_vmode <= 0 || default_vmode > VMODE_MAX)
1876 default_vmode = VMODE_640_480_60; 1877 default_vmode = VMODE_640_480_60;
1877#ifdef CONFIG_NVRAM 1878#if defined(CONFIG_PPC32)
1878 if (default_cmode == CMODE_NVRAM) 1879 if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
1879 default_cmode = nvram_read_byte(NV_CMODE); 1880 default_cmode = nvram_read_byte(NV_CMODE);
1880#endif 1881#endif
1881 if (default_cmode < CMODE_8 || default_cmode > CMODE_32) 1882 if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index bf6b7fb83cf4..76f299375a00 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -345,23 +345,18 @@ static int platinum_init_fb(struct fb_info *info)
345 345
346 sense = read_platinum_sense(pinfo); 346 sense = read_platinum_sense(pinfo);
347 printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense); 347 printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense);
348 if (default_vmode == VMODE_NVRAM) { 348
349#ifdef CONFIG_NVRAM 349 if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
350 default_vmode = nvram_read_byte(NV_VMODE); 350 default_vmode = nvram_read_byte(NV_VMODE);
351 if (default_vmode <= 0 || default_vmode > VMODE_MAX || 351 if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
352 !platinum_reg_init[default_vmode-1]) 352 !platinum_reg_init[default_vmode - 1]) {
353#endif
354 default_vmode = VMODE_CHOOSE;
355 }
356 if (default_vmode == VMODE_CHOOSE) {
357 default_vmode = mac_map_monitor_sense(sense); 353 default_vmode = mac_map_monitor_sense(sense);
354 if (!platinum_reg_init[default_vmode - 1])
355 default_vmode = VMODE_640_480_60;
358 } 356 }
359 if (default_vmode <= 0 || default_vmode > VMODE_MAX) 357
360 default_vmode = VMODE_640_480_60; 358 if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
361#ifdef CONFIG_NVRAM
362 if (default_cmode == CMODE_NVRAM)
363 default_cmode = nvram_read_byte(NV_CMODE); 359 default_cmode = nvram_read_byte(NV_CMODE);
364#endif
365 if (default_cmode < CMODE_8 || default_cmode > CMODE_32) 360 if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
366 default_cmode = CMODE_8; 361 default_cmode = CMODE_8;
367 /* 362 /*
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index d51c3a8009cb..e04fde9c1fcd 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -63,15 +63,8 @@
63#include "macmodes.h" 63#include "macmodes.h"
64#include "valkyriefb.h" 64#include "valkyriefb.h"
65 65
66#ifdef CONFIG_MAC
67/* We don't yet have functions to read the PRAM... perhaps we can
68 adapt them from the PPC code? */
69static int default_vmode = VMODE_CHOOSE;
70static int default_cmode = CMODE_8;
71#else
72static int default_vmode = VMODE_NVRAM; 66static int default_vmode = VMODE_NVRAM;
73static int default_cmode = CMODE_NVRAM; 67static int default_cmode = CMODE_NVRAM;
74#endif
75 68
76struct fb_par_valkyrie { 69struct fb_par_valkyrie {
77 int vmode, cmode; 70 int vmode, cmode;
@@ -283,24 +276,21 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
283 printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense); 276 printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense);
284 277
285 /* Try to pick a video mode out of NVRAM if we have one. */ 278 /* Try to pick a video mode out of NVRAM if we have one. */
286#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM) 279#ifdef CONFIG_PPC_PMAC
287 if (default_vmode == VMODE_NVRAM) { 280 if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
288 default_vmode = nvram_read_byte(NV_VMODE); 281 default_vmode = nvram_read_byte(NV_VMODE);
289 if (default_vmode <= 0
290 || default_vmode > VMODE_MAX
291 || !valkyrie_reg_init[default_vmode - 1])
292 default_vmode = VMODE_CHOOSE;
293 }
294#endif 282#endif
295 if (default_vmode == VMODE_CHOOSE) 283 if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
284 !valkyrie_reg_init[default_vmode - 1]) {
296 default_vmode = mac_map_monitor_sense(p->sense); 285 default_vmode = mac_map_monitor_sense(p->sense);
297 if (!valkyrie_reg_init[default_vmode - 1]) 286 if (!valkyrie_reg_init[default_vmode - 1])
298 default_vmode = VMODE_640_480_67; 287 default_vmode = VMODE_640_480_67;
299#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM) 288 }
300 if (default_cmode == CMODE_NVRAM) 289
290#ifdef CONFIG_PPC_PMAC
291 if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
301 default_cmode = nvram_read_byte(NV_CMODE); 292 default_cmode = nvram_read_byte(NV_CMODE);
302#endif 293#endif
303
304 /* 294 /*
305 * Reduce the pixel size if we don't have enough VRAM or bandwidth. 295 * Reduce the pixel size if we don't have enough VRAM or bandwidth.
306 */ 296 */
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 1475ed5ffcde..df7d09409efe 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1484,8 +1484,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1484#ifdef CONFIG_COMPAT 1484#ifdef CONFIG_COMPAT
1485 case VBG_IOCTL_HGCM_CALL_32(0): 1485 case VBG_IOCTL_HGCM_CALL_32(0):
1486 f32bit = true; 1486 f32bit = true;
1487 /* Fall through */
1488#endif 1487#endif
1488 /* Fall through */
1489 case VBG_IOCTL_HGCM_CALL(0): 1489 case VBG_IOCTL_HGCM_CALL(0):
1490 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); 1490 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1491 case VBG_IOCTL_LOG(0): 1491 case VBG_IOCTL_LOG(0):
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 5b63b94ef6b5..a008f504a2d0 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -36,6 +36,17 @@ extern u64 ioread64(void __iomem *);
36extern u64 ioread64be(void __iomem *); 36extern u64 ioread64be(void __iomem *);
37#endif 37#endif
38 38
39#ifdef readq
40#define ioread64_lo_hi ioread64_lo_hi
41#define ioread64_hi_lo ioread64_hi_lo
42#define ioread64be_lo_hi ioread64be_lo_hi
43#define ioread64be_hi_lo ioread64be_hi_lo
44extern u64 ioread64_lo_hi(void __iomem *addr);
45extern u64 ioread64_hi_lo(void __iomem *addr);
46extern u64 ioread64be_lo_hi(void __iomem *addr);
47extern u64 ioread64be_hi_lo(void __iomem *addr);
48#endif
49
39extern void iowrite8(u8, void __iomem *); 50extern void iowrite8(u8, void __iomem *);
40extern void iowrite16(u16, void __iomem *); 51extern void iowrite16(u16, void __iomem *);
41extern void iowrite16be(u16, void __iomem *); 52extern void iowrite16be(u16, void __iomem *);
@@ -46,6 +57,17 @@ extern void iowrite64(u64, void __iomem *);
46extern void iowrite64be(u64, void __iomem *); 57extern void iowrite64be(u64, void __iomem *);
47#endif 58#endif
48 59
60#ifdef writeq
61#define iowrite64_lo_hi iowrite64_lo_hi
62#define iowrite64_hi_lo iowrite64_hi_lo
63#define iowrite64be_lo_hi iowrite64be_lo_hi
64#define iowrite64be_hi_lo iowrite64be_hi_lo
65extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
66extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
67extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
68extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
69#endif
70
49/* 71/*
50 * "string" versions of the above. Note that they 72 * "string" versions of the above. Note that they
51 * use native byte ordering for the accesses (on 73 * use native byte ordering for the accesses (on
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 4923b00328c1..93a386be38fa 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -5,6 +5,7 @@
5#define _DRM_AUDIO_COMPONENT_H_ 5#define _DRM_AUDIO_COMPONENT_H_
6 6
7struct drm_audio_component; 7struct drm_audio_component;
8struct device;
8 9
9/** 10/**
10 * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver 11 * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index a6de09c5e47f..c21682f76cd3 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -250,4 +250,22 @@ struct hdcp2_dp_errata_stream_type {
250#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) 250#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
251#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) 251#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
252 252
253/*
254 * Helper functions to convert 24bit big endian hdcp sequence number to
255 * host format and back
256 */
257static inline
258u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
259{
260 return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16);
261}
262
263static inline
264void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
265{
266 seq_num[0] = val >> 16;
267 seq_num[1] = val >> 8;
268 seq_num[2] = val;
269}
270
253#endif 271#endif
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index fca22d463e1b..dcb95bd9dee6 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -26,6 +26,11 @@
26 26
27#include "drm_audio_component.h" 27#include "drm_audio_component.h"
28 28
29enum i915_component_type {
30 I915_COMPONENT_AUDIO = 1,
31 I915_COMPONENT_HDCP,
32};
33
29/* MAX_PORT is the number of port 34/* MAX_PORT is the number of port
30 * It must be sync with I915_MAX_PORTS defined i915_drv.h 35 * It must be sync with I915_MAX_PORTS defined i915_drv.h
31 */ 36 */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c44703f471b3..7523e9a7b6e2 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -100,4 +100,19 @@ extern struct resource intel_graphics_stolen_res;
100#define INTEL_GEN11_BSM_DW1 0xc4 100#define INTEL_GEN11_BSM_DW1 0xc4
101#define INTEL_BSM_MASK (-(1u << 20)) 101#define INTEL_BSM_MASK (-(1u << 20))
102 102
103enum port {
104 PORT_NONE = -1,
105
106 PORT_A = 0,
107 PORT_B,
108 PORT_C,
109 PORT_D,
110 PORT_E,
111 PORT_F,
112
113 I915_MAX_PORTS
114};
115
116#define port_name(p) ((p) + 'A')
117
103#endif /* _I915_DRM_H_ */ 118#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
new file mode 100644
index 000000000000..8c344255146a
--- /dev/null
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -0,0 +1,149 @@
1/* SPDX-License-Identifier: (GPL-2.0+) */
2/*
3 * Copyright © 2017-2018 Intel Corporation
4 *
5 * Authors:
6 * Ramalingam C <ramalingam.c@intel.com>
7 */
8
9#ifndef _I915_MEI_HDCP_INTERFACE_H_
10#define _I915_MEI_HDCP_INTERFACE_H_
11
12#include <linux/mutex.h>
13#include <linux/device.h>
14#include <drm/drm_hdcp.h>
15#include <drm/i915_drm.h>
16
17/**
18 * enum hdcp_port_type - HDCP port implementation type defined by ME FW
19 * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
20 * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
21 * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
22 * (HDMI 2.0) solution
23 * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
24 * solution
25 */
26enum hdcp_port_type {
27 HDCP_PORT_TYPE_INVALID,
28 HDCP_PORT_TYPE_INTEGRATED,
29 HDCP_PORT_TYPE_LSPCON,
30 HDCP_PORT_TYPE_CPDP
31};
32
33/**
34 * enum hdcp_wired_protocol - HDCP adaptation used on the port
35 * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
36 * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
37 * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
38 */
39enum hdcp_wired_protocol {
40 HDCP_PROTOCOL_INVALID,
41 HDCP_PROTOCOL_HDMI,
42 HDCP_PROTOCOL_DP
43};
44
45/**
46 * struct hdcp_port_data - intel specific HDCP port data
47 * @port: port index as per I915
48 * @port_type: HDCP port type as per ME FW classification
49 * @protocol: HDCP adaptation as per ME FW
50 * @k: No of streams transmitted on a port. Only on DP MST this is != 1
51 * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
52 * Initialized to 0 on AKE_INIT. Incremented after every successful
53 * transmission of RepeaterAuth_Stream_Manage message. When it rolls
54 * over re-Auth has to be triggered.
55 * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
56 * streams
57 */
58struct hdcp_port_data {
59 enum port port;
60 u8 port_type;
61 u8 protocol;
62 u16 k;
63 u32 seq_num_m;
64 struct hdcp2_streamid_type *streams;
65};
66
67/**
68 * struct i915_hdcp_component_ops- ops for HDCP2.2 services.
69 * @owner: Module providing the ops
70 * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
71 * And Prepare AKE_Init.
72 * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
73 * AKE_Send_Cert and prepare
74 AKE_Stored_Km/AKE_No_Stored_Km
75 * @verify_hprime: Verify AKE_Send_H_prime
76 * @store_pairing_info: Store pairing info received
77 * @initiate_locality_check: Prepare LC_Init
78 * @verify_lprime: Verify lprime
79 * @get_session_key: Prepare SKE_Send_Eks
80 * @repeater_check_flow_prepare_ack: Validate the Downstream topology
81 * and prepare rep_ack
82 * @verify_mprime: Verify mprime
83 * @enable_hdcp_authentication: Mark a port as authenticated.
84 * @close_hdcp_session: Close the Wired HDCP Tx session per port.
85 * This also disables the authenticated state of the port.
86 */
87struct i915_hdcp_component_ops {
88 /**
89 * @owner: mei_hdcp module
90 */
91 struct module *owner;
92
93 int (*initiate_hdcp2_session)(struct device *dev,
94 struct hdcp_port_data *data,
95 struct hdcp2_ake_init *ake_data);
96 int (*verify_receiver_cert_prepare_km)(struct device *dev,
97 struct hdcp_port_data *data,
98 struct hdcp2_ake_send_cert
99 *rx_cert,
100 bool *km_stored,
101 struct hdcp2_ake_no_stored_km
102 *ek_pub_km,
103 size_t *msg_sz);
104 int (*verify_hprime)(struct device *dev,
105 struct hdcp_port_data *data,
106 struct hdcp2_ake_send_hprime *rx_hprime);
107 int (*store_pairing_info)(struct device *dev,
108 struct hdcp_port_data *data,
109 struct hdcp2_ake_send_pairing_info
110 *pairing_info);
111 int (*initiate_locality_check)(struct device *dev,
112 struct hdcp_port_data *data,
113 struct hdcp2_lc_init *lc_init_data);
114 int (*verify_lprime)(struct device *dev,
115 struct hdcp_port_data *data,
116 struct hdcp2_lc_send_lprime *rx_lprime);
117 int (*get_session_key)(struct device *dev,
118 struct hdcp_port_data *data,
119 struct hdcp2_ske_send_eks *ske_data);
120 int (*repeater_check_flow_prepare_ack)(struct device *dev,
121 struct hdcp_port_data *data,
122 struct hdcp2_rep_send_receiverid_list
123 *rep_topology,
124 struct hdcp2_rep_send_ack
125 *rep_send_ack);
126 int (*verify_mprime)(struct device *dev,
127 struct hdcp_port_data *data,
128 struct hdcp2_rep_stream_ready *stream_ready);
129 int (*enable_hdcp_authentication)(struct device *dev,
130 struct hdcp_port_data *data);
131 int (*close_hdcp_session)(struct device *dev,
132 struct hdcp_port_data *data);
133};
134
135/**
136 * struct i915_hdcp_component_master - Used for communication between i915
137 * and mei_hdcp drivers for the HDCP2.2 services
138 * @mei_dev: device that provide the HDCP2.2 service from MEI Bus.
139 * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver.
140 */
141struct i915_hdcp_comp_master {
142 struct device *mei_dev;
143 const struct i915_hdcp_component_ops *ops;
144
145 /* To protect the above members. */
146 struct mutex mutex;
147};
148
149#endif /* _I915_MEI_HDCP_INTERFACE_H_ */
diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h
new file mode 100644
index 000000000000..7b2393be7361
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm845.h
@@ -0,0 +1,143 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Qualcomm SDM845 interconnect IDs
4 *
5 * Copyright (c) 2018, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
10#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
11
12#define MASTER_A1NOC_CFG 0
13#define MASTER_BLSP_1 1
14#define MASTER_TSIF 2
15#define MASTER_SDCC_2 3
16#define MASTER_SDCC_4 4
17#define MASTER_UFS_CARD 5
18#define MASTER_UFS_MEM 6
19#define MASTER_PCIE_0 7
20#define MASTER_A2NOC_CFG 8
21#define MASTER_QDSS_BAM 9
22#define MASTER_BLSP_2 10
23#define MASTER_CNOC_A2NOC 11
24#define MASTER_CRYPTO 12
25#define MASTER_IPA 13
26#define MASTER_PCIE_1 14
27#define MASTER_QDSS_ETR 15
28#define MASTER_USB3_0 16
29#define MASTER_USB3_1 17
30#define MASTER_CAMNOC_HF0_UNCOMP 18
31#define MASTER_CAMNOC_HF1_UNCOMP 19
32#define MASTER_CAMNOC_SF_UNCOMP 20
33#define MASTER_SPDM 21
34#define MASTER_TIC 22
35#define MASTER_SNOC_CNOC 23
36#define MASTER_QDSS_DAP 24
37#define MASTER_CNOC_DC_NOC 25
38#define MASTER_APPSS_PROC 26
39#define MASTER_GNOC_CFG 27
40#define MASTER_LLCC 28
41#define MASTER_TCU_0 29
42#define MASTER_MEM_NOC_CFG 30
43#define MASTER_GNOC_MEM_NOC 31
44#define MASTER_MNOC_HF_MEM_NOC 32
45#define MASTER_MNOC_SF_MEM_NOC 33
46#define MASTER_SNOC_GC_MEM_NOC 34
47#define MASTER_SNOC_SF_MEM_NOC 35
48#define MASTER_GFX3D 36
49#define MASTER_CNOC_MNOC_CFG 37
50#define MASTER_CAMNOC_HF0 38
51#define MASTER_CAMNOC_HF1 39
52#define MASTER_CAMNOC_SF 40
53#define MASTER_MDP0 41
54#define MASTER_MDP1 42
55#define MASTER_ROTATOR 43
56#define MASTER_VIDEO_P0 44
57#define MASTER_VIDEO_P1 45
58#define MASTER_VIDEO_PROC 46
59#define MASTER_SNOC_CFG 47
60#define MASTER_A1NOC_SNOC 48
61#define MASTER_A2NOC_SNOC 49
62#define MASTER_GNOC_SNOC 50
63#define MASTER_MEM_NOC_SNOC 51
64#define MASTER_ANOC_PCIE_SNOC 52
65#define MASTER_PIMEM 53
66#define MASTER_GIC 54
67#define SLAVE_A1NOC_SNOC 55
68#define SLAVE_SERVICE_A1NOC 56
69#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57
70#define SLAVE_A2NOC_SNOC 58
71#define SLAVE_ANOC_PCIE_SNOC 59
72#define SLAVE_SERVICE_A2NOC 60
73#define SLAVE_CAMNOC_UNCOMP 61
74#define SLAVE_A1NOC_CFG 62
75#define SLAVE_A2NOC_CFG 63
76#define SLAVE_AOP 64
77#define SLAVE_AOSS 65
78#define SLAVE_CAMERA_CFG 66
79#define SLAVE_CLK_CTL 67
80#define SLAVE_CDSP_CFG 68
81#define SLAVE_RBCPR_CX_CFG 69
82#define SLAVE_CRYPTO_0_CFG 70
83#define SLAVE_DCC_CFG 71
84#define SLAVE_CNOC_DDRSS 72
85#define SLAVE_DISPLAY_CFG 73
86#define SLAVE_GLM 74
87#define SLAVE_GFX3D_CFG 75
88#define SLAVE_IMEM_CFG 76
89#define SLAVE_IPA_CFG 77
90#define SLAVE_CNOC_MNOC_CFG 78
91#define SLAVE_PCIE_0_CFG 79
92#define SLAVE_PCIE_1_CFG 80
93#define SLAVE_PDM 81
94#define SLAVE_SOUTH_PHY_CFG 82
95#define SLAVE_PIMEM_CFG 83
96#define SLAVE_PRNG 84
97#define SLAVE_QDSS_CFG 85
98#define SLAVE_BLSP_2 86
99#define SLAVE_BLSP_1 87
100#define SLAVE_SDCC_2 88
101#define SLAVE_SDCC_4 89
102#define SLAVE_SNOC_CFG 90
103#define SLAVE_SPDM_WRAPPER 91
104#define SLAVE_SPSS_CFG 92
105#define SLAVE_TCSR 93
106#define SLAVE_TLMM_NORTH 94
107#define SLAVE_TLMM_SOUTH 95
108#define SLAVE_TSIF 96
109#define SLAVE_UFS_CARD_CFG 97
110#define SLAVE_UFS_MEM_CFG 98
111#define SLAVE_USB3_0 99
112#define SLAVE_USB3_1 100
113#define SLAVE_VENUS_CFG 101
114#define SLAVE_VSENSE_CTRL_CFG 102
115#define SLAVE_CNOC_A2NOC 103
116#define SLAVE_SERVICE_CNOC 104
117#define SLAVE_LLCC_CFG 105
118#define SLAVE_MEM_NOC_CFG 106
119#define SLAVE_GNOC_SNOC 107
120#define SLAVE_GNOC_MEM_NOC 108
121#define SLAVE_SERVICE_GNOC 109
122#define SLAVE_EBI1 110
123#define SLAVE_MSS_PROC_MS_MPU_CFG 111
124#define SLAVE_MEM_NOC_GNOC 112
125#define SLAVE_LLCC 113
126#define SLAVE_MEM_NOC_SNOC 114
127#define SLAVE_SERVICE_MEM_NOC 115
128#define SLAVE_MNOC_SF_MEM_NOC 116
129#define SLAVE_MNOC_HF_MEM_NOC 117
130#define SLAVE_SERVICE_MNOC 118
131#define SLAVE_APPSS 119
132#define SLAVE_SNOC_CNOC 120
133#define SLAVE_SNOC_MEM_NOC_GC 121
134#define SLAVE_SNOC_MEM_NOC_SF 122
135#define SLAVE_IMEM 123
136#define SLAVE_PCIE_0 124
137#define SLAVE_PCIE_1 125
138#define SLAVE_PIMEM 126
139#define SLAVE_SERVICE_SNOC 127
140#define SLAVE_QDSS_STM 128
141#define SLAVE_TCU 129
142
143#endif
diff --git a/include/linux/component.h b/include/linux/component.h
index e71fbbbc74e2..30bcc7e590eb 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -4,16 +4,38 @@
4 4
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6 6
7
7struct device; 8struct device;
8 9
10/**
11 * struct component_ops - callbacks for component drivers
12 *
13 * Components are registered with component_add() and unregistered with
14 * component_del().
15 */
9struct component_ops { 16struct component_ops {
17 /**
18 * @bind:
19 *
20 * Called through component_bind_all() when the aggregate driver is
21 * ready to bind the overall driver.
22 */
10 int (*bind)(struct device *comp, struct device *master, 23 int (*bind)(struct device *comp, struct device *master,
11 void *master_data); 24 void *master_data);
25 /**
26 * @unbind:
27 *
28 * Called through component_unbind_all() when the aggregate driver is
29 * ready to bind the overall driver, or when component_bind_all() fails
30 * part-ways through and needs to unbind some already bound components.
31 */
12 void (*unbind)(struct device *comp, struct device *master, 32 void (*unbind)(struct device *comp, struct device *master,
13 void *master_data); 33 void *master_data);
14}; 34};
15 35
16int component_add(struct device *, const struct component_ops *); 36int component_add(struct device *, const struct component_ops *);
37int component_add_typed(struct device *dev, const struct component_ops *ops,
38 int subcomponent);
17void component_del(struct device *, const struct component_ops *); 39void component_del(struct device *, const struct component_ops *);
18 40
19int component_bind_all(struct device *master, void *master_data); 41int component_bind_all(struct device *master, void *master_data);
@@ -21,8 +43,42 @@ void component_unbind_all(struct device *master, void *master_data);
21 43
22struct master; 44struct master;
23 45
46/**
47 * struct component_master_ops - callback for the aggregate driver
48 *
49 * Aggregate drivers are registered with component_master_add_with_match() and
50 * unregistered with component_master_del().
51 */
24struct component_master_ops { 52struct component_master_ops {
53 /**
54 * @bind:
55 *
56 * Called when all components or the aggregate driver, as specified in
57 * the match list passed to component_master_add_with_match(), are
58 * ready. Usually there are 3 steps to bind an aggregate driver:
59 *
60 * 1. Allocate a structure for the aggregate driver.
61 *
62 * 2. Bind all components to the aggregate driver by calling
63 * component_bind_all() with the aggregate driver structure as opaque
64 * pointer data.
65 *
66 * 3. Register the aggregate driver with the subsystem to publish its
67 * interfaces.
68 *
69 * Note that the lifetime of the aggregate driver does not align with
70 * any of the underlying &struct device instances. Therefore devm cannot
71 * be used and all resources acquired or allocated in this callback must
72 * be explicitly released in the @unbind callback.
73 */
25 int (*bind)(struct device *master); 74 int (*bind)(struct device *master);
75 /**
76 * @unbind:
77 *
78 * Called when either the aggregate driver, using
79 * component_master_del(), or one of its components, using
80 * component_del(), is unregistered.
81 */
26 void (*unbind)(struct device *master); 82 void (*unbind)(struct device *master);
27}; 83};
28 84
@@ -37,7 +93,27 @@ void component_match_add_release(struct device *master,
37 struct component_match **matchptr, 93 struct component_match **matchptr,
38 void (*release)(struct device *, void *), 94 void (*release)(struct device *, void *),
39 int (*compare)(struct device *, void *), void *compare_data); 95 int (*compare)(struct device *, void *), void *compare_data);
96void component_match_add_typed(struct device *master,
97 struct component_match **matchptr,
98 int (*compare_typed)(struct device *, int, void *), void *compare_data);
40 99
100/**
101 * component_match_add - add a compent match
102 * @master: device with the aggregate driver
103 * @matchptr: pointer to the list of component matches
104 * @compare: compare function to match against all components
105 * @compare_data: opaque pointer passed to the @compare function
106 *
107 * Adds a new component match to the list stored in @matchptr, which the @master
108 * aggregate driver needs to function. The list of component matches pointed to
109 * by @matchptr must be initialized to NULL before adding the first match. This
110 * only matches against components added with component_add().
111 *
112 * The allocated match list in @matchptr is automatically released using devm
113 * actions.
114 *
115 * See also component_match_add_release() and component_match_add_typed().
116 */
41static inline void component_match_add(struct device *master, 117static inline void component_match_add(struct device *master,
42 struct component_match **matchptr, 118 struct component_match **matchptr,
43 int (*compare)(struct device *, void *), void *compare_data) 119 int (*compare)(struct device *, void *), void *compare_data)
diff --git a/include/linux/gnss.h b/include/linux/gnss.h
index 43546977098c..36968a0f33e8 100644
--- a/include/linux/gnss.h
+++ b/include/linux/gnss.h
@@ -22,6 +22,7 @@ enum gnss_type {
22 GNSS_TYPE_NMEA = 0, 22 GNSS_TYPE_NMEA = 0,
23 GNSS_TYPE_SIRF, 23 GNSS_TYPE_SIRF,
24 GNSS_TYPE_UBX, 24 GNSS_TYPE_UBX,
25 GNSS_TYPE_MTK,
25 26
26 GNSS_TYPE_COUNT 27 GNSS_TYPE_COUNT
27}; 28};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index dcb6977afce9..64698ec8f2ac 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -222,8 +222,8 @@ static inline u32 hv_get_avail_to_write_percent(
222 * struct contains the fundamental information about an offer. 222 * struct contains the fundamental information about an offer.
223 */ 223 */
224struct vmbus_channel_offer { 224struct vmbus_channel_offer {
225 uuid_le if_type; 225 guid_t if_type;
226 uuid_le if_instance; 226 guid_t if_instance;
227 227
228 /* 228 /*
229 * These two fields are not currently used. 229 * These two fields are not currently used.
@@ -614,8 +614,8 @@ struct vmbus_channel_initiate_contact {
614/* Hyper-V socket: guest's connect()-ing to host */ 614/* Hyper-V socket: guest's connect()-ing to host */
615struct vmbus_channel_tl_connect_request { 615struct vmbus_channel_tl_connect_request {
616 struct vmbus_channel_message_header header; 616 struct vmbus_channel_message_header header;
617 uuid_le guest_endpoint_id; 617 guid_t guest_endpoint_id;
618 uuid_le host_service_id; 618 guid_t host_service_id;
619} __packed; 619} __packed;
620 620
621struct vmbus_channel_version_response { 621struct vmbus_channel_version_response {
@@ -714,7 +714,7 @@ enum vmbus_device_type {
714 714
715struct vmbus_device { 715struct vmbus_device {
716 u16 dev_type; 716 u16 dev_type;
717 uuid_le guid; 717 guid_t guid;
718 bool perf_device; 718 bool perf_device;
719}; 719};
720 720
@@ -751,6 +751,19 @@ struct vmbus_channel {
751 u64 interrupts; /* Host to Guest interrupts */ 751 u64 interrupts; /* Host to Guest interrupts */
752 u64 sig_events; /* Guest to Host events */ 752 u64 sig_events; /* Guest to Host events */
753 753
754 /*
755 * Guest to host interrupts caused by the outbound ring buffer changing
756 * from empty to not empty.
757 */
758 u64 intr_out_empty;
759
760 /*
761 * Indicates that a full outbound ring buffer was encountered. The flag
762 * is set to true when a full outbound ring buffer is encountered and
763 * set to false when a write to the outbound ring buffer is completed.
764 */
765 bool out_full_flag;
766
754 /* Channel callback's invoked in softirq context */ 767 /* Channel callback's invoked in softirq context */
755 struct tasklet_struct callback_event; 768 struct tasklet_struct callback_event;
756 void (*onchannel_callback)(void *context); 769 void (*onchannel_callback)(void *context);
@@ -903,6 +916,24 @@ struct vmbus_channel {
903 * vmbus_connection.work_queue and hang: see vmbus_process_offer(). 916 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
904 */ 917 */
905 struct work_struct add_channel_work; 918 struct work_struct add_channel_work;
919
920 /*
921 * Guest to host interrupts caused by the inbound ring buffer changing
922 * from full to not full while a packet is waiting.
923 */
924 u64 intr_in_full;
925
926 /*
927 * The total number of write operations that encountered a full
928 * outbound ring buffer.
929 */
930 u64 out_full_total;
931
932 /*
933 * The number of write operations that were the first to encounter a
934 * full outbound ring buffer.
935 */
936 u64 out_full_first;
906}; 937};
907 938
908static inline bool is_hvsock_channel(const struct vmbus_channel *c) 939static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -936,6 +967,21 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
936static inline void set_channel_pending_send_size(struct vmbus_channel *c, 967static inline void set_channel_pending_send_size(struct vmbus_channel *c,
937 u32 size) 968 u32 size)
938{ 969{
970 unsigned long flags;
971
972 if (size) {
973 spin_lock_irqsave(&c->outbound.ring_lock, flags);
974 ++c->out_full_total;
975
976 if (!c->out_full_flag) {
977 ++c->out_full_first;
978 c->out_full_flag = true;
979 }
980 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
981 } else {
982 c->out_full_flag = false;
983 }
984
939 c->outbound.ring_buffer->pending_send_sz = size; 985 c->outbound.ring_buffer->pending_send_sz = size;
940} 986}
941 987
@@ -1096,7 +1142,7 @@ struct hv_driver {
1096 bool hvsock; 1142 bool hvsock;
1097 1143
1098 /* the device type supported by this driver */ 1144 /* the device type supported by this driver */
1099 uuid_le dev_type; 1145 guid_t dev_type;
1100 const struct hv_vmbus_device_id *id_table; 1146 const struct hv_vmbus_device_id *id_table;
1101 1147
1102 struct device_driver driver; 1148 struct device_driver driver;
@@ -1116,10 +1162,10 @@ struct hv_driver {
1116/* Base device object */ 1162/* Base device object */
1117struct hv_device { 1163struct hv_device {
1118 /* the device type id of this device */ 1164 /* the device type id of this device */
1119 uuid_le dev_type; 1165 guid_t dev_type;
1120 1166
1121 /* the device instance id of this device */ 1167 /* the device instance id of this device */
1122 uuid_le dev_instance; 1168 guid_t dev_instance;
1123 u16 vendor_id; 1169 u16 vendor_id;
1124 u16 device_id; 1170 u16 device_id;
1125 1171
@@ -1188,102 +1234,102 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1188 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1234 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1189 */ 1235 */
1190#define HV_NIC_GUID \ 1236#define HV_NIC_GUID \
1191 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 1237 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1192 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) 1238 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1193 1239
1194/* 1240/*
1195 * IDE GUID 1241 * IDE GUID
1196 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1242 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1197 */ 1243 */
1198#define HV_IDE_GUID \ 1244#define HV_IDE_GUID \
1199 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 1245 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1200 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1246 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1201 1247
1202/* 1248/*
1203 * SCSI GUID 1249 * SCSI GUID
1204 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1250 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1205 */ 1251 */
1206#define HV_SCSI_GUID \ 1252#define HV_SCSI_GUID \
1207 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 1253 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1208 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1254 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1209 1255
1210/* 1256/*
1211 * Shutdown GUID 1257 * Shutdown GUID
1212 * {0e0b6031-5213-4934-818b-38d90ced39db} 1258 * {0e0b6031-5213-4934-818b-38d90ced39db}
1213 */ 1259 */
1214#define HV_SHUTDOWN_GUID \ 1260#define HV_SHUTDOWN_GUID \
1215 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 1261 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1216 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) 1262 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1217 1263
1218/* 1264/*
1219 * Time Synch GUID 1265 * Time Synch GUID
1220 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1266 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1221 */ 1267 */
1222#define HV_TS_GUID \ 1268#define HV_TS_GUID \
1223 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 1269 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1224 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 1270 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1225 1271
1226/* 1272/*
1227 * Heartbeat GUID 1273 * Heartbeat GUID
1228 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1274 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1229 */ 1275 */
1230#define HV_HEART_BEAT_GUID \ 1276#define HV_HEART_BEAT_GUID \
1231 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 1277 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1232 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 1278 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1233 1279
1234/* 1280/*
1235 * KVP GUID 1281 * KVP GUID
1236 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1282 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1237 */ 1283 */
1238#define HV_KVP_GUID \ 1284#define HV_KVP_GUID \
1239 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 1285 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1240 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) 1286 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1241 1287
1242/* 1288/*
1243 * Dynamic memory GUID 1289 * Dynamic memory GUID
1244 * {525074dc-8985-46e2-8057-a307dc18a502} 1290 * {525074dc-8985-46e2-8057-a307dc18a502}
1245 */ 1291 */
1246#define HV_DM_GUID \ 1292#define HV_DM_GUID \
1247 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 1293 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1248 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) 1294 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1249 1295
1250/* 1296/*
1251 * Mouse GUID 1297 * Mouse GUID
1252 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1298 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1253 */ 1299 */
1254#define HV_MOUSE_GUID \ 1300#define HV_MOUSE_GUID \
1255 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 1301 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1256 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) 1302 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1257 1303
1258/* 1304/*
1259 * Keyboard GUID 1305 * Keyboard GUID
1260 * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 1306 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1261 */ 1307 */
1262#define HV_KBD_GUID \ 1308#define HV_KBD_GUID \
1263 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 1309 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1264 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) 1310 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1265 1311
1266/* 1312/*
1267 * VSS (Backup/Restore) GUID 1313 * VSS (Backup/Restore) GUID
1268 */ 1314 */
1269#define HV_VSS_GUID \ 1315#define HV_VSS_GUID \
1270 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 1316 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1271 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) 1317 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1272/* 1318/*
1273 * Synthetic Video GUID 1319 * Synthetic Video GUID
1274 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1320 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1275 */ 1321 */
1276#define HV_SYNTHVID_GUID \ 1322#define HV_SYNTHVID_GUID \
1277 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 1323 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1278 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) 1324 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1279 1325
1280/* 1326/*
1281 * Synthetic FC GUID 1327 * Synthetic FC GUID
1282 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1328 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1283 */ 1329 */
1284#define HV_SYNTHFC_GUID \ 1330#define HV_SYNTHFC_GUID \
1285 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 1331 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1286 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) 1332 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1287 1333
1288/* 1334/*
1289 * Guest File Copy Service 1335 * Guest File Copy Service
@@ -1291,16 +1337,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1291 */ 1337 */
1292 1338
1293#define HV_FCOPY_GUID \ 1339#define HV_FCOPY_GUID \
1294 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 1340 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1295 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) 1341 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1296 1342
1297/* 1343/*
1298 * NetworkDirect. This is the guest RDMA service. 1344 * NetworkDirect. This is the guest RDMA service.
1299 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1345 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1300 */ 1346 */
1301#define HV_ND_GUID \ 1347#define HV_ND_GUID \
1302 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 1348 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1303 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) 1349 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1304 1350
1305/* 1351/*
1306 * PCI Express Pass Through 1352 * PCI Express Pass Through
@@ -1308,8 +1354,8 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1308 */ 1354 */
1309 1355
1310#define HV_PCIE_GUID \ 1356#define HV_PCIE_GUID \
1311 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 1357 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1312 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) 1358 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1313 1359
1314/* 1360/*
1315 * Linux doesn't support the 3 devices: the first two are for 1361 * Linux doesn't support the 3 devices: the first two are for
@@ -1321,16 +1367,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1321 */ 1367 */
1322 1368
1323#define HV_AVMA1_GUID \ 1369#define HV_AVMA1_GUID \
1324 .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ 1370 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1325 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) 1371 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1326 1372
1327#define HV_AVMA2_GUID \ 1373#define HV_AVMA2_GUID \
1328 .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ 1374 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1329 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) 1375 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1330 1376
1331#define HV_RDV_GUID \ 1377#define HV_RDV_GUID \
1332 .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ 1378 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1333 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) 1379 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1334 1380
1335/* 1381/*
1336 * Common header for Hyper-V ICs 1382 * Common header for Hyper-V ICs
@@ -1432,7 +1478,7 @@ struct ictimesync_ref_data {
1432struct hyperv_service_callback { 1478struct hyperv_service_callback {
1433 u8 msg_type; 1479 u8 msg_type;
1434 char *log_msg; 1480 char *log_msg;
1435 uuid_le data; 1481 guid_t data;
1436 struct vmbus_channel *channel; 1482 struct vmbus_channel *channel;
1437 void (*callback)(void *context); 1483 void (*callback)(void *context);
1438}; 1484};
@@ -1452,8 +1498,8 @@ void vmbus_setevent(struct vmbus_channel *channel);
1452 1498
1453extern __u32 vmbus_proto_version; 1499extern __u32 vmbus_proto_version;
1454 1500
1455int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, 1501int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1456 const uuid_le *shv_host_servie_id); 1502 const guid_t *shv_host_servie_id);
1457void vmbus_set_event(struct vmbus_channel *channel); 1503void vmbus_set_event(struct vmbus_channel *channel);
1458 1504
1459/* Get the start of the ring buffer. */ 1505/* Get the start of the ring buffer. */
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
new file mode 100644
index 000000000000..63caccadc2db
--- /dev/null
+++ b/include/linux/interconnect-provider.h
@@ -0,0 +1,142 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018, Linaro Ltd.
4 * Author: Georgi Djakov <georgi.djakov@linaro.org>
5 */
6
7#ifndef __LINUX_INTERCONNECT_PROVIDER_H
8#define __LINUX_INTERCONNECT_PROVIDER_H
9
10#include <linux/interconnect.h>
11
12#define icc_units_to_bps(bw) ((bw) * 1000ULL)
13
14struct icc_node;
15struct of_phandle_args;
16
17/**
18 * struct icc_onecell_data - driver data for onecell interconnect providers
19 *
20 * @num_nodes: number of nodes in this device
21 * @nodes: array of pointers to the nodes in this device
22 */
23struct icc_onecell_data {
24 unsigned int num_nodes;
25 struct icc_node *nodes[];
26};
27
28struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
29 void *data);
30
31/**
32 * struct icc_provider - interconnect provider (controller) entity that might
33 * provide multiple interconnect controls
34 *
35 * @provider_list: list of the registered interconnect providers
36 * @nodes: internal list of the interconnect provider nodes
37 * @set: pointer to device specific set operation function
38 * @aggregate: pointer to device specific aggregate operation function
39 * @xlate: provider-specific callback for mapping nodes from phandle arguments
40 * @dev: the device this interconnect provider belongs to
41 * @users: count of active users
42 * @data: pointer to private data
43 */
44struct icc_provider {
45 struct list_head provider_list;
46 struct list_head nodes;
47 int (*set)(struct icc_node *src, struct icc_node *dst);
48 int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw,
49 u32 *agg_avg, u32 *agg_peak);
50 struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
51 struct device *dev;
52 int users;
53 void *data;
54};
55
56/**
57 * struct icc_node - entity that is part of the interconnect topology
58 *
59 * @id: platform specific node id
60 * @name: node name used in debugfs
61 * @links: a list of targets pointing to where we can go next when traversing
62 * @num_links: number of links to other interconnect nodes
63 * @provider: points to the interconnect provider of this node
64 * @node_list: the list entry in the parent provider's "nodes" list
65 * @search_list: list used when walking the nodes graph
66 * @reverse: pointer to previous node when walking the nodes graph
67 * @is_traversed: flag that is used when walking the nodes graph
68 * @req_list: a list of QoS constraint requests associated with this node
69 * @avg_bw: aggregated value of average bandwidth requests from all consumers
70 * @peak_bw: aggregated value of peak bandwidth requests from all consumers
71 * @data: pointer to private data
72 */
73struct icc_node {
74 int id;
75 const char *name;
76 struct icc_node **links;
77 size_t num_links;
78
79 struct icc_provider *provider;
80 struct list_head node_list;
81 struct list_head search_list;
82 struct icc_node *reverse;
83 u8 is_traversed:1;
84 struct hlist_head req_list;
85 u32 avg_bw;
86 u32 peak_bw;
87 void *data;
88};
89
90#if IS_ENABLED(CONFIG_INTERCONNECT)
91
92struct icc_node *icc_node_create(int id);
93void icc_node_destroy(int id);
94int icc_link_create(struct icc_node *node, const int dst_id);
95int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
96void icc_node_add(struct icc_node *node, struct icc_provider *provider);
97void icc_node_del(struct icc_node *node);
98int icc_provider_add(struct icc_provider *provider);
99int icc_provider_del(struct icc_provider *provider);
100
101#else
102
103static inline struct icc_node *icc_node_create(int id)
104{
105 return ERR_PTR(-ENOTSUPP);
106}
107
108void icc_node_destroy(int id)
109{
110}
111
112static inline int icc_link_create(struct icc_node *node, const int dst_id)
113{
114 return -ENOTSUPP;
115}
116
117int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
118{
119 return -ENOTSUPP;
120}
121
122void icc_node_add(struct icc_node *node, struct icc_provider *provider)
123{
124}
125
126void icc_node_del(struct icc_node *node)
127{
128}
129
130static inline int icc_provider_add(struct icc_provider *provider)
131{
132 return -ENOTSUPP;
133}
134
135static inline int icc_provider_del(struct icc_provider *provider)
136{
137 return -ENOTSUPP;
138}
139
140#endif /* CONFIG_INTERCONNECT */
141
142#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
new file mode 100644
index 000000000000..dc25864755ba
--- /dev/null
+++ b/include/linux/interconnect.h
@@ -0,0 +1,59 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018-2019, Linaro Ltd.
4 * Author: Georgi Djakov <georgi.djakov@linaro.org>
5 */
6
7#ifndef __LINUX_INTERCONNECT_H
8#define __LINUX_INTERCONNECT_H
9
10#include <linux/mutex.h>
11#include <linux/types.h>
12
13/* macros for converting to icc units */
14#define Bps_to_icc(x) ((x) / 1000)
15#define kBps_to_icc(x) (x)
16#define MBps_to_icc(x) ((x) * 1000)
17#define GBps_to_icc(x) ((x) * 1000 * 1000)
18#define bps_to_icc(x) (1)
19#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
20#define Mbps_to_icc(x) ((x) * 1000 / 8)
21#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
22
23struct icc_path;
24struct device;
25
26#if IS_ENABLED(CONFIG_INTERCONNECT)
27
28struct icc_path *icc_get(struct device *dev, const int src_id,
29 const int dst_id);
30struct icc_path *of_icc_get(struct device *dev, const char *name);
31void icc_put(struct icc_path *path);
32int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
33
34#else
35
36static inline struct icc_path *icc_get(struct device *dev, const int src_id,
37 const int dst_id)
38{
39 return NULL;
40}
41
42static inline struct icc_path *of_icc_get(struct device *dev,
43 const char *name)
44{
45 return NULL;
46}
47
48static inline void icc_put(struct icc_path *path)
49{
50}
51
52static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
53{
54 return 0;
55}
56
57#endif /* CONFIG_INTERCONNECT */
58
59#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index 862d786a904f..ae21b72cce85 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -55,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
55#define writeq_relaxed hi_lo_writeq_relaxed 55#define writeq_relaxed hi_lo_writeq_relaxed
56#endif 56#endif
57 57
58#ifndef ioread64_hi_lo
59#define ioread64_hi_lo ioread64_hi_lo
60static inline u64 ioread64_hi_lo(void __iomem *addr)
61{
62 u32 low, high;
63
64 high = ioread32(addr + sizeof(u32));
65 low = ioread32(addr);
66
67 return low + ((u64)high << 32);
68}
69#endif
70
71#ifndef iowrite64_hi_lo
72#define iowrite64_hi_lo iowrite64_hi_lo
73static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
74{
75 iowrite32(val >> 32, addr + sizeof(u32));
76 iowrite32(val, addr);
77}
78#endif
79
80#ifndef ioread64be_hi_lo
81#define ioread64be_hi_lo ioread64be_hi_lo
82static inline u64 ioread64be_hi_lo(void __iomem *addr)
83{
84 u32 low, high;
85
86 high = ioread32be(addr);
87 low = ioread32be(addr + sizeof(u32));
88
89 return low + ((u64)high << 32);
90}
91#endif
92
93#ifndef iowrite64be_hi_lo
94#define iowrite64be_hi_lo iowrite64be_hi_lo
95static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
96{
97 iowrite32be(val >> 32, addr);
98 iowrite32be(val, addr + sizeof(u32));
99}
100#endif
101
102#ifndef ioread64
103#define ioread64_is_nonatomic
104#define ioread64 ioread64_hi_lo
105#endif
106
107#ifndef iowrite64
108#define iowrite64_is_nonatomic
109#define iowrite64 iowrite64_hi_lo
110#endif
111
112#ifndef ioread64be
113#define ioread64be_is_nonatomic
114#define ioread64be ioread64be_hi_lo
115#endif
116
117#ifndef iowrite64be
118#define iowrite64be_is_nonatomic
119#define iowrite64be iowrite64be_hi_lo
120#endif
121
58#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ 122#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index d042e7bb5adb..faaa842dbdb9 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -55,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
55#define writeq_relaxed lo_hi_writeq_relaxed 55#define writeq_relaxed lo_hi_writeq_relaxed
56#endif 56#endif
57 57
58#ifndef ioread64_lo_hi
59#define ioread64_lo_hi ioread64_lo_hi
60static inline u64 ioread64_lo_hi(void __iomem *addr)
61{
62 u32 low, high;
63
64 low = ioread32(addr);
65 high = ioread32(addr + sizeof(u32));
66
67 return low + ((u64)high << 32);
68}
69#endif
70
71#ifndef iowrite64_lo_hi
72#define iowrite64_lo_hi iowrite64_lo_hi
73static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
74{
75 iowrite32(val, addr);
76 iowrite32(val >> 32, addr + sizeof(u32));
77}
78#endif
79
80#ifndef ioread64be_lo_hi
81#define ioread64be_lo_hi ioread64be_lo_hi
82static inline u64 ioread64be_lo_hi(void __iomem *addr)
83{
84 u32 low, high;
85
86 low = ioread32be(addr + sizeof(u32));
87 high = ioread32be(addr);
88
89 return low + ((u64)high << 32);
90}
91#endif
92
93#ifndef iowrite64be_lo_hi
94#define iowrite64be_lo_hi iowrite64be_lo_hi
95static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
96{
97 iowrite32be(val, addr + sizeof(u32));
98 iowrite32be(val >> 32, addr);
99}
100#endif
101
102#ifndef ioread64
103#define ioread64_is_nonatomic
104#define ioread64 ioread64_lo_hi
105#endif
106
107#ifndef iowrite64
108#define iowrite64_is_nonatomic
109#define iowrite64 iowrite64_lo_hi
110#endif
111
112#ifndef ioread64be
113#define ioread64be_is_nonatomic
114#define ioread64be ioread64be_lo_hi
115#endif
116
117#ifndef iowrite64be
118#define iowrite64be_is_nonatomic
119#define iowrite64be iowrite64be_lo_hi
120#endif
121
58#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ 122#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 7fde40e17c8b..03b6ba2a63f8 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -55,6 +55,8 @@ struct mei_cl_device {
55 void *priv_data; 55 void *priv_data;
56}; 56};
57 57
58#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
59
58struct mei_cl_driver { 60struct mei_cl_driver {
59 struct device_driver driver; 61 struct device_driver driver;
60 const char *name; 62 const char *name;
diff --git a/include/linux/nvram.h b/include/linux/nvram.h
index 28bfb9ab94ca..d29d9c93a927 100644
--- a/include/linux/nvram.h
+++ b/include/linux/nvram.h
@@ -2,13 +2,132 @@
2#ifndef _LINUX_NVRAM_H 2#ifndef _LINUX_NVRAM_H
3#define _LINUX_NVRAM_H 3#define _LINUX_NVRAM_H
4 4
5#include <linux/errno.h>
5#include <uapi/linux/nvram.h> 6#include <uapi/linux/nvram.h>
6 7
7/* __foo is foo without grabbing the rtc_lock - get it yourself */ 8#ifdef CONFIG_PPC
8extern unsigned char __nvram_read_byte(int i); 9#include <asm/machdep.h>
9extern unsigned char nvram_read_byte(int i); 10#endif
10extern void __nvram_write_byte(unsigned char c, int i); 11
11extern void nvram_write_byte(unsigned char c, int i); 12/**
12extern int __nvram_check_checksum(void); 13 * struct nvram_ops - NVRAM functionality made available to drivers
13extern int nvram_check_checksum(void); 14 * @read: validate checksum (if any) then load a range of bytes from NVRAM
15 * @write: store a range of bytes to NVRAM then update checksum (if any)
16 * @read_byte: load a single byte from NVRAM
17 * @write_byte: store a single byte to NVRAM
18 * @get_size: return the fixed number of bytes in the NVRAM
19 *
20 * Architectures which provide an nvram ops struct need not implement all
21 * of these methods. If the NVRAM hardware can be accessed only one byte
22 * at a time then it may be sufficient to provide .read_byte and .write_byte.
23 * If the NVRAM has a checksum (and it is to be checked) the .read and
24 * .write methods can be used to implement that efficiently.
25 *
26 * Portable drivers may use the wrapper functions defined here.
27 * The nvram_read() and nvram_write() functions call the .read and .write
28 * methods when available and fall back on the .read_byte and .write_byte
29 * methods otherwise.
30 */
31
32struct nvram_ops {
33 ssize_t (*get_size)(void);
34 unsigned char (*read_byte)(int);
35 void (*write_byte)(unsigned char, int);
36 ssize_t (*read)(char *, size_t, loff_t *);
37 ssize_t (*write)(char *, size_t, loff_t *);
38#if defined(CONFIG_X86) || defined(CONFIG_M68K)
39 long (*initialize)(void);
40 long (*set_checksum)(void);
41#endif
42};
43
44extern const struct nvram_ops arch_nvram_ops;
45
46static inline ssize_t nvram_get_size(void)
47{
48#ifdef CONFIG_PPC
49 if (ppc_md.nvram_size)
50 return ppc_md.nvram_size();
51#else
52 if (arch_nvram_ops.get_size)
53 return arch_nvram_ops.get_size();
54#endif
55 return -ENODEV;
56}
57
58static inline unsigned char nvram_read_byte(int addr)
59{
60#ifdef CONFIG_PPC
61 if (ppc_md.nvram_read_val)
62 return ppc_md.nvram_read_val(addr);
63#else
64 if (arch_nvram_ops.read_byte)
65 return arch_nvram_ops.read_byte(addr);
66#endif
67 return 0xFF;
68}
69
70static inline void nvram_write_byte(unsigned char val, int addr)
71{
72#ifdef CONFIG_PPC
73 if (ppc_md.nvram_write_val)
74 ppc_md.nvram_write_val(addr, val);
75#else
76 if (arch_nvram_ops.write_byte)
77 arch_nvram_ops.write_byte(val, addr);
78#endif
79}
80
81static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos)
82{
83 ssize_t nvram_size = nvram_get_size();
84 loff_t i;
85 char *p = buf;
86
87 if (nvram_size < 0)
88 return nvram_size;
89 for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
90 *p = nvram_read_byte(i);
91 *ppos = i;
92 return p - buf;
93}
94
95static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos)
96{
97 ssize_t nvram_size = nvram_get_size();
98 loff_t i;
99 char *p = buf;
100
101 if (nvram_size < 0)
102 return nvram_size;
103 for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
104 nvram_write_byte(*p, i);
105 *ppos = i;
106 return p - buf;
107}
108
109static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos)
110{
111#ifdef CONFIG_PPC
112 if (ppc_md.nvram_read)
113 return ppc_md.nvram_read(buf, count, ppos);
114#else
115 if (arch_nvram_ops.read)
116 return arch_nvram_ops.read(buf, count, ppos);
117#endif
118 return nvram_read_bytes(buf, count, ppos);
119}
120
121static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos)
122{
123#ifdef CONFIG_PPC
124 if (ppc_md.nvram_write)
125 return ppc_md.nvram_write(buf, count, ppos);
126#else
127 if (arch_nvram_ops.write)
128 return arch_nvram_ops.write(buf, count, ppos);
129#endif
130 return nvram_write_bytes(buf, count, ppos);
131}
132
14#endif /* _LINUX_NVRAM_H */ 133#endif /* _LINUX_NVRAM_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 397607a0c0eb..f41f1d041e2c 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
460 void *, size_t, int); 460 void *, size_t, int);
461 461
462/* IEEE1284.3 functions */ 462/* IEEE1284.3 functions */
463#define daisy_dev_name "Device ID probe"
463extern int parport_daisy_init (struct parport *port); 464extern int parport_daisy_init (struct parport *port);
464extern void parport_daisy_fini (struct parport *port); 465extern void parport_daisy_fini (struct parport *port);
465extern struct pardevice *parport_open (int devnum, const char *name); 466extern struct pardevice *parport_open (int devnum, const char *name);
@@ -468,6 +469,18 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
468extern void parport_daisy_deselect_all (struct parport *port); 469extern void parport_daisy_deselect_all (struct parport *port);
469extern int parport_daisy_select (struct parport *port, int daisy, int mode); 470extern int parport_daisy_select (struct parport *port, int daisy, int mode);
470 471
472#ifdef CONFIG_PARPORT_1284
473extern int daisy_drv_init(void);
474extern void daisy_drv_exit(void);
475#else
476static inline int daisy_drv_init(void)
477{
478 return 0;
479}
480
481static inline void daisy_drv_exit(void) {}
482#endif
483
471/* Lowlevel drivers _can_ call this support function to handle irqs. */ 484/* Lowlevel drivers _can_ call this support function to handle irqs. */
472static inline void parport_generic_irq(struct parport *port) 485static inline void parport_generic_irq(struct parport *port)
473{ 486{
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index b724ef7005de..eaa1e762bf06 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -45,6 +45,7 @@
45#define VMCI_CAPS_GUESTCALL 0x2 45#define VMCI_CAPS_GUESTCALL 0x2
46#define VMCI_CAPS_DATAGRAM 0x4 46#define VMCI_CAPS_DATAGRAM 0x4
47#define VMCI_CAPS_NOTIFICATIONS 0x8 47#define VMCI_CAPS_NOTIFICATIONS 0x8
48#define VMCI_CAPS_PPN64 0x10
48 49
49/* Interrupt Cause register bits. */ 50/* Interrupt Cause register bits. */
50#define VMCI_ICR_DATAGRAM 0x1 51#define VMCI_ICR_DATAGRAM 0x1
@@ -569,8 +570,10 @@ struct vmci_resource_query_msg {
569 */ 570 */
570struct vmci_notify_bm_set_msg { 571struct vmci_notify_bm_set_msg {
571 struct vmci_datagram hdr; 572 struct vmci_datagram hdr;
572 u32 bitmap_ppn; 573 union {
573 u32 _pad; 574 u32 bitmap_ppn32;
575 u64 bitmap_ppn64;
576 };
574}; 577};
575 578
576/* 579/*
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
index 2ec31b358950..d4804c72d959 100644
--- a/include/sound/hda_component.h
+++ b/include/sound/hda_component.h
@@ -20,7 +20,7 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
20 bool *audio_enabled, char *buffer, int max_bytes); 20 bool *audio_enabled, char *buffer, int max_bytes);
21int snd_hdac_acomp_init(struct hdac_bus *bus, 21int snd_hdac_acomp_init(struct hdac_bus *bus,
22 const struct drm_audio_component_audio_ops *aops, 22 const struct drm_audio_component_audio_ops *aops,
23 int (*match_master)(struct device *, void *), 23 int (*match_master)(struct device *, int, void *),
24 size_t extra_size); 24 size_t extra_size);
25int snd_hdac_acomp_exit(struct hdac_bus *bus); 25int snd_hdac_acomp_exit(struct hdac_bus *bus);
26int snd_hdac_acomp_register_notifier(struct hdac_bus *bus, 26int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
@@ -47,7 +47,8 @@ static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t ni
47} 47}
48static inline int snd_hdac_acomp_init(struct hdac_bus *bus, 48static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
49 const struct drm_audio_component_audio_ops *aops, 49 const struct drm_audio_component_audio_ops *aops,
50 int (*match_master)(struct device *, void *), 50 int (*match_master)(struct device *,
51 int, void *),
51 size_t extra_size) 52 size_t extra_size)
52{ 53{
53 return -ENODEV; 54 return -ENODEV;
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index b9ba520f7e4b..2832134e5397 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -41,6 +41,14 @@ enum {
41enum { 41enum {
42 FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 42 FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
43 FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 43 FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
44
45 /**
46 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
47 *
48 * Only when set, causes senders to include their security
49 * context
50 */
51 FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
44}; 52};
45 53
46#ifdef BINDER_IPC_32BIT 54#ifdef BINDER_IPC_32BIT
@@ -218,6 +226,7 @@ struct binder_node_info_for_ref {
218#define BINDER_VERSION _IOWR('b', 9, struct binder_version) 226#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
219#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) 227#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
220#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) 228#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
229#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
221 230
222/* 231/*
223 * NOTE: Two special error codes you should check for when calling 232 * NOTE: Two special error codes you should check for when calling
@@ -276,6 +285,11 @@ struct binder_transaction_data {
276 } data; 285 } data;
277}; 286};
278 287
288struct binder_transaction_data_secctx {
289 struct binder_transaction_data transaction_data;
290 binder_uintptr_t secctx;
291};
292
279struct binder_transaction_data_sg { 293struct binder_transaction_data_sg {
280 struct binder_transaction_data transaction_data; 294 struct binder_transaction_data transaction_data;
281 binder_size_t buffers_size; 295 binder_size_t buffers_size;
@@ -311,6 +325,11 @@ enum binder_driver_return_protocol {
311 BR_OK = _IO('r', 1), 325 BR_OK = _IO('r', 1),
312 /* No parameters! */ 326 /* No parameters! */
313 327
328 BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
329 struct binder_transaction_data_secctx),
330 /*
331 * binder_transaction_data_secctx: the received command.
332 */
314 BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 333 BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
315 BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 334 BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
316 /* 335 /*
diff --git a/include/uapi/linux/pmu.h b/include/uapi/linux/pmu.h
index 97256f90e6df..f2fc1bd80017 100644
--- a/include/uapi/linux/pmu.h
+++ b/include/uapi/linux/pmu.h
@@ -19,7 +19,9 @@
19#define PMU_POWER_CTRL 0x11 /* control power of some devices */ 19#define PMU_POWER_CTRL 0x11 /* control power of some devices */
20#define PMU_ADB_CMD 0x20 /* send ADB packet */ 20#define PMU_ADB_CMD 0x20 /* send ADB packet */
21#define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */ 21#define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */
22#define PMU_WRITE_XPRAM 0x32 /* write eXtended Parameter RAM */
22#define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */ 23#define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */
24#define PMU_READ_XPRAM 0x3a /* read eXtended Parameter RAM */
23#define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */ 25#define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */
24#define PMU_SET_RTC 0x30 /* set real-time clock */ 26#define PMU_SET_RTC 0x30 /* set real-time clock */
25#define PMU_READ_RTC 0x38 /* read real-time clock */ 27#define PMU_READ_RTC 0x38 /* read real-time clock */
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
new file mode 100644
index 000000000000..6d701af9fc42
--- /dev/null
+++ b/include/uapi/misc/fastrpc.h
@@ -0,0 +1,41 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __QCOM_FASTRPC_H__
4#define __QCOM_FASTRPC_H__
5
6#include <linux/types.h>
7
8#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
9#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
10#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
11#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
12#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
13
14struct fastrpc_invoke_args {
15 __u64 ptr;
16 __u64 length;
17 __s32 fd;
18 __u32 reserved;
19};
20
21struct fastrpc_invoke {
22 __u32 handle;
23 __u32 sc;
24 __u64 args;
25};
26
27struct fastrpc_init_create {
28 __u32 filelen; /* elf file length */
29 __s32 filefd; /* fd for the file */
30 __u32 attrs;
31 __u32 siglen;
32 __u64 file; /* pointer to elf file */
33};
34
35struct fastrpc_alloc_dma_buf {
36 __s32 fd; /* fd */
37 __u32 flags; /* flags to map with */
38 __u64 size; /* size */
39};
40
41#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
new file mode 100644
index 000000000000..7fd6f633534c
--- /dev/null
+++ b/include/uapi/misc/habanalabs.h
@@ -0,0 +1,450 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef HABANALABS_H_
9#define HABANALABS_H_
10
11#include <linux/types.h>
12#include <linux/ioctl.h>
13
14/*
15 * Defines that are asic-specific but constitutes as ABI between kernel driver
16 * and userspace
17 */
18#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
19
20/*
21 * Queue Numbering
22 *
23 * The external queues (DMA channels + CPU) MUST be before the internal queues
24 * and each group (DMA channels + CPU and internal) must be contiguous inside
25 * itself but there can be a gap between the two groups (although not
26 * recommended)
27 */
28
29enum goya_queue_id {
30 GOYA_QUEUE_ID_DMA_0 = 0,
31 GOYA_QUEUE_ID_DMA_1,
32 GOYA_QUEUE_ID_DMA_2,
33 GOYA_QUEUE_ID_DMA_3,
34 GOYA_QUEUE_ID_DMA_4,
35 GOYA_QUEUE_ID_CPU_PQ,
36 GOYA_QUEUE_ID_MME,
37 GOYA_QUEUE_ID_TPC0,
38 GOYA_QUEUE_ID_TPC1,
39 GOYA_QUEUE_ID_TPC2,
40 GOYA_QUEUE_ID_TPC3,
41 GOYA_QUEUE_ID_TPC4,
42 GOYA_QUEUE_ID_TPC5,
43 GOYA_QUEUE_ID_TPC6,
44 GOYA_QUEUE_ID_TPC7,
45 GOYA_QUEUE_ID_SIZE
46};
47
48/* Opcode for management ioctl */
49#define HL_INFO_HW_IP_INFO 0
50#define HL_INFO_HW_EVENTS 1
51#define HL_INFO_DRAM_USAGE 2
52#define HL_INFO_HW_IDLE 3
53
54#define HL_INFO_VERSION_MAX_LEN 128
55
56struct hl_info_hw_ip_info {
57 __u64 sram_base_address;
58 __u64 dram_base_address;
59 __u64 dram_size;
60 __u32 sram_size;
61 __u32 num_of_events;
62 __u32 device_id; /* PCI Device ID */
63 __u32 reserved[3];
64 __u32 armcp_cpld_version;
65 __u32 psoc_pci_pll_nr;
66 __u32 psoc_pci_pll_nf;
67 __u32 psoc_pci_pll_od;
68 __u32 psoc_pci_pll_div_factor;
69 __u8 tpc_enabled_mask;
70 __u8 dram_enabled;
71 __u8 pad[2];
72 __u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
73};
74
75struct hl_info_dram_usage {
76 __u64 dram_free_mem;
77 __u64 ctx_dram_mem;
78};
79
80struct hl_info_hw_idle {
81 __u32 is_idle;
82 __u32 pad;
83};
84
85struct hl_info_args {
86 /* Location of relevant struct in userspace */
87 __u64 return_pointer;
88 /*
89 * The size of the return value. Just like "size" in "snprintf",
90 * it limits how many bytes the kernel can write
91 *
92 * For hw_events array, the size should be
93 * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
94 */
95 __u32 return_size;
96
97 /* HL_INFO_* */
98 __u32 op;
99
100 /* Context ID - Currently not in use */
101 __u32 ctx_id;
102 __u32 pad;
103};
104
105/* Opcode to create a new command buffer */
106#define HL_CB_OP_CREATE 0
107/* Opcode to destroy previously created command buffer */
108#define HL_CB_OP_DESTROY 1
109
110struct hl_cb_in {
111 /* Handle of CB or 0 if we want to create one */
112 __u64 cb_handle;
113 /* HL_CB_OP_* */
114 __u32 op;
115 /* Size of CB. Maximum size is 2MB. The minimum size that will be
116 * allocated, regardless of this parameter's value, is PAGE_SIZE
117 */
118 __u32 cb_size;
119 /* Context ID - Currently not in use */
120 __u32 ctx_id;
121 __u32 pad;
122};
123
124struct hl_cb_out {
125 /* Handle of CB */
126 __u64 cb_handle;
127};
128
129union hl_cb_args {
130 struct hl_cb_in in;
131 struct hl_cb_out out;
132};
133
134/*
135 * This structure size must always be fixed to 64-bytes for backward
136 * compatibility
137 */
138struct hl_cs_chunk {
139 /*
140 * For external queue, this represents a Handle of CB on the Host
141 * For internal queue, this represents an SRAM or DRAM address of the
142 * internal CB
143 */
144 __u64 cb_handle;
145 /* Index of queue to put the CB on */
146 __u32 queue_index;
147 /*
148 * Size of command buffer with valid packets
149 * Can be smaller then actual CB size
150 */
151 __u32 cb_size;
152 /* HL_CS_CHUNK_FLAGS_* */
153 __u32 cs_chunk_flags;
154 /* Align structure to 64 bytes */
155 __u32 pad[11];
156};
157
158#define HL_CS_FLAGS_FORCE_RESTORE 0x1
159
160#define HL_CS_STATUS_SUCCESS 0
161
162struct hl_cs_in {
163 /* this holds address of array of hl_cs_chunk for restore phase */
164 __u64 chunks_restore;
165 /* this holds address of array of hl_cs_chunk for execution phase */
166 __u64 chunks_execute;
167 /* this holds address of array of hl_cs_chunk for store phase -
168 * Currently not in use
169 */
170 __u64 chunks_store;
171 /* Number of chunks in restore phase array */
172 __u32 num_chunks_restore;
173 /* Number of chunks in execution array */
174 __u32 num_chunks_execute;
175 /* Number of chunks in restore phase array - Currently not in use */
176 __u32 num_chunks_store;
177 /* HL_CS_FLAGS_* */
178 __u32 cs_flags;
179 /* Context ID - Currently not in use */
180 __u32 ctx_id;
181};
182
183struct hl_cs_out {
184 /* this holds the sequence number of the CS to pass to wait ioctl */
185 __u64 seq;
186 /* HL_CS_STATUS_* */
187 __u32 status;
188 __u32 pad;
189};
190
191union hl_cs_args {
192 struct hl_cs_in in;
193 struct hl_cs_out out;
194};
195
196struct hl_wait_cs_in {
197 /* Command submission sequence number */
198 __u64 seq;
199 /* Absolute timeout to wait in microseconds */
200 __u64 timeout_us;
201 /* Context ID - Currently not in use */
202 __u32 ctx_id;
203 __u32 pad;
204};
205
206#define HL_WAIT_CS_STATUS_COMPLETED 0
207#define HL_WAIT_CS_STATUS_BUSY 1
208#define HL_WAIT_CS_STATUS_TIMEDOUT 2
209#define HL_WAIT_CS_STATUS_ABORTED 3
210#define HL_WAIT_CS_STATUS_INTERRUPTED 4
211
212struct hl_wait_cs_out {
213 /* HL_WAIT_CS_STATUS_* */
214 __u32 status;
215 __u32 pad;
216};
217
218union hl_wait_cs_args {
219 struct hl_wait_cs_in in;
220 struct hl_wait_cs_out out;
221};
222
223/* Opcode to alloc device memory */
224#define HL_MEM_OP_ALLOC 0
225/* Opcode to free previously allocated device memory */
226#define HL_MEM_OP_FREE 1
227/* Opcode to map host memory */
228#define HL_MEM_OP_MAP 2
229/* Opcode to unmap previously mapped host memory */
230#define HL_MEM_OP_UNMAP 3
231
232/* Memory flags */
233#define HL_MEM_CONTIGUOUS 0x1
234#define HL_MEM_SHARED 0x2
235#define HL_MEM_USERPTR 0x4
236
237struct hl_mem_in {
238 union {
239 /* HL_MEM_OP_ALLOC- allocate device memory */
240 struct {
241 /* Size to alloc */
242 __u64 mem_size;
243 } alloc;
244
245 /* HL_MEM_OP_FREE - free device memory */
246 struct {
247 /* Handle returned from HL_MEM_OP_ALLOC */
248 __u64 handle;
249 } free;
250
251 /* HL_MEM_OP_MAP - map device memory */
252 struct {
253 /*
254 * Requested virtual address of mapped memory.
255 * KMD will try to map the requested region to this
256 * hint address, as long as the address is valid and
257 * not already mapped. The user should check the
258 * returned address of the IOCTL to make sure he got
259 * the hint address. Passing 0 here means that KMD
260 * will choose the address itself.
261 */
262 __u64 hint_addr;
263 /* Handle returned from HL_MEM_OP_ALLOC */
264 __u64 handle;
265 } map_device;
266
267 /* HL_MEM_OP_MAP - map host memory */
268 struct {
269 /* Address of allocated host memory */
270 __u64 host_virt_addr;
271 /*
272 * Requested virtual address of mapped memory.
273 * KMD will try to map the requested region to this
274 * hint address, as long as the address is valid and
275 * not already mapped. The user should check the
276 * returned address of the IOCTL to make sure he got
277 * the hint address. Passing 0 here means that KMD
278 * will choose the address itself.
279 */
280 __u64 hint_addr;
281 /* Size of allocated host memory */
282 __u64 mem_size;
283 } map_host;
284
285 /* HL_MEM_OP_UNMAP - unmap host memory */
286 struct {
287 /* Virtual address returned from HL_MEM_OP_MAP */
288 __u64 device_virt_addr;
289 } unmap;
290 };
291
292 /* HL_MEM_OP_* */
293 __u32 op;
294 /* HL_MEM_* flags */
295 __u32 flags;
296 /* Context ID - Currently not in use */
297 __u32 ctx_id;
298 __u32 pad;
299};
300
301struct hl_mem_out {
302 union {
303 /*
304 * Used for HL_MEM_OP_MAP as the virtual address that was
305 * assigned in the device VA space.
306 * A value of 0 means the requested operation failed.
307 */
308 __u64 device_virt_addr;
309
310 /*
311 * Used for HL_MEM_OP_ALLOC. This is the assigned
312 * handle for the allocated memory
313 */
314 __u64 handle;
315 };
316};
317
318union hl_mem_args {
319 struct hl_mem_in in;
320 struct hl_mem_out out;
321};
322
323/*
324 * Various information operations such as:
325 * - H/W IP information
326 * - Current dram usage
327 *
328 * The user calls this IOCTL with an opcode that describes the required
329 * information. The user should supply a pointer to a user-allocated memory
330 * chunk, which will be filled by the driver with the requested information.
331 *
332 * The user supplies the maximum amount of size to copy into the user's memory,
333 * in order to prevent data corruption in case of differences between the
334 * definitions of structures in kernel and userspace, e.g. in case of old
335 * userspace and new kernel driver
336 */
337#define HL_IOCTL_INFO \
338 _IOWR('H', 0x01, struct hl_info_args)
339
340/*
341 * Command Buffer
342 * - Request a Command Buffer
343 * - Destroy a Command Buffer
344 *
345 * The command buffers are memory blocks that reside in DMA-able address
346 * space and are physically contiguous so they can be accessed by the device
347 * directly. They are allocated using the coherent DMA API.
348 *
349 * When creating a new CB, the IOCTL returns a handle of it, and the user-space
350 * process needs to use that handle to mmap the buffer so it can access them.
351 *
352 */
353#define HL_IOCTL_CB \
354 _IOWR('H', 0x02, union hl_cb_args)
355
356/*
357 * Command Submission
358 *
359 * To submit work to the device, the user need to call this IOCTL with a set
360 * of JOBS. That set of JOBS constitutes a CS object.
361 * Each JOB will be enqueued on a specific queue, according to the user's input.
362 * There can be more then one JOB per queue.
363 *
364 * There are two types of queues - external and internal. External queues
365 * are DMA queues which transfer data from/to the Host. All other queues are
366 * internal. The driver will get completion notifications from the device only
367 * on JOBS which are enqueued in the external queues.
368 *
369 * For jobs on external queues, the user needs to create command buffers
370 * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
371 * internal queues, the user needs to prepare a "command buffer" with packets
372 * on either the SRAM or DRAM, and give the device address of that buffer to
373 * the CS ioctl.
374 *
375 * This IOCTL is asynchronous in regard to the actual execution of the CS. This
376 * means it returns immediately after ALL the JOBS were enqueued on their
377 * relevant queues. Therefore, the user mustn't assume the CS has been completed
378 * or has even started to execute.
379 *
380 * Upon successful enqueue, the IOCTL returns an opaque handle which the user
381 * can use with the "Wait for CS" IOCTL to check whether the handle's CS
382 * external JOBS have been completed. Note that if the CS has internal JOBS
383 * which can execute AFTER the external JOBS have finished, the driver might
384 * report that the CS has finished executing BEFORE the internal JOBS have
385 * actually finish executing.
386 *
387 * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
388 * a second set is for "execution" phase and a third set is for "store" phase.
389 * The JOBS on the "restore" phase are enqueued only after context-switch
390 * (or if its the first CS for this context). The user can also order the
391 * driver to run the "restore" phase explicitly
392 *
393 */
394#define HL_IOCTL_CS \
395 _IOWR('H', 0x03, union hl_cs_args)
396
397/*
398 * Wait for Command Submission
399 *
400 * The user can call this IOCTL with a handle it received from the CS IOCTL
401 * to wait until the handle's CS has finished executing. The user will wait
402 * inside the kernel until the CS has finished or until the user-requeusted
403 * timeout has expired.
404 *
405 * The return value of the IOCTL is a standard Linux error code. The possible
406 * values are:
407 *
408 * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
409 * that the user process received
410 * ETIMEDOUT - The CS has caused a timeout on the device
411 * EIO - The CS was aborted (usually because the device was reset)
412 * ENODEV - The device wants to do hard-reset (so user need to close FD)
413 *
414 * The driver also returns a custom define inside the IOCTL which can be:
415 *
416 * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
417 * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
418 * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
419 * (ETIMEDOUT)
420 * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
421 * device was reset (EIO)
422 * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
423 *
424 */
425
426#define HL_IOCTL_WAIT_CS \
427 _IOWR('H', 0x04, union hl_wait_cs_args)
428
429/*
430 * Memory
431 * - Map host memory to device MMU
432 * - Unmap host memory from device MMU
433 *
434 * This IOCTL allows the user to map host memory to the device MMU
435 *
436 * For host memory, the IOCTL doesn't allocate memory. The user is supposed
437 * to allocate the memory in user-space (malloc/new). The driver pins the
438 * physical pages (up to the allowed limit by the OS), assigns a virtual
439 * address in the device VA space and initializes the device MMU.
440 *
441 * There is an option for the user to specify the requested virtual address.
442 *
443 */
444#define HL_IOCTL_MEMORY \
445 _IOWR('H', 0x05, union hl_mem_args)
446
447#define HL_COMMAND_START 0x01
448#define HL_COMMAND_END 0x06
449
450#endif /* HABANALABS_H_ */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e6a7b01932e6..b19cc9c36475 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1682,7 +1682,6 @@ if RUNTIME_TESTING_MENU
1682config LKDTM 1682config LKDTM
1683 tristate "Linux Kernel Dump Test Tool Module" 1683 tristate "Linux Kernel Dump Test Tool Module"
1684 depends on DEBUG_FS 1684 depends on DEBUG_FS
1685 depends on BLOCK
1686 help 1685 help
1687 This module enables testing of the different dumping mechanisms by 1686 This module enables testing of the different dumping mechanisms by
1688 inducing system failures at predefined crash points. 1687 inducing system failures at predefined crash points.
diff --git a/lib/iomap.c b/lib/iomap.c
index 541d926da95e..e909ab71e995 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -65,8 +65,9 @@ static void bad_io_access(unsigned long port, const char *access)
65#endif 65#endif
66 66
67#ifndef mmio_read16be 67#ifndef mmio_read16be
68#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr)) 68#define mmio_read16be(addr) swab16(readw(addr))
69#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr)) 69#define mmio_read32be(addr) swab32(readl(addr))
70#define mmio_read64be(addr) swab64(readq(addr))
70#endif 71#endif
71 72
72unsigned int ioread8(void __iomem *addr) 73unsigned int ioread8(void __iomem *addr)
@@ -100,14 +101,89 @@ EXPORT_SYMBOL(ioread16be);
100EXPORT_SYMBOL(ioread32); 101EXPORT_SYMBOL(ioread32);
101EXPORT_SYMBOL(ioread32be); 102EXPORT_SYMBOL(ioread32be);
102 103
104#ifdef readq
105static u64 pio_read64_lo_hi(unsigned long port)
106{
107 u64 lo, hi;
108
109 lo = inl(port);
110 hi = inl(port + sizeof(u32));
111
112 return lo | (hi << 32);
113}
114
115static u64 pio_read64_hi_lo(unsigned long port)
116{
117 u64 lo, hi;
118
119 hi = inl(port + sizeof(u32));
120 lo = inl(port);
121
122 return lo | (hi << 32);
123}
124
125static u64 pio_read64be_lo_hi(unsigned long port)
126{
127 u64 lo, hi;
128
129 lo = pio_read32be(port + sizeof(u32));
130 hi = pio_read32be(port);
131
132 return lo | (hi << 32);
133}
134
135static u64 pio_read64be_hi_lo(unsigned long port)
136{
137 u64 lo, hi;
138
139 hi = pio_read32be(port);
140 lo = pio_read32be(port + sizeof(u32));
141
142 return lo | (hi << 32);
143}
144
145u64 ioread64_lo_hi(void __iomem *addr)
146{
147 IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
148 return 0xffffffffffffffffULL;
149}
150
151u64 ioread64_hi_lo(void __iomem *addr)
152{
153 IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
154 return 0xffffffffffffffffULL;
155}
156
157u64 ioread64be_lo_hi(void __iomem *addr)
158{
159 IO_COND(addr, return pio_read64be_lo_hi(port),
160 return mmio_read64be(addr));
161 return 0xffffffffffffffffULL;
162}
163
164u64 ioread64be_hi_lo(void __iomem *addr)
165{
166 IO_COND(addr, return pio_read64be_hi_lo(port),
167 return mmio_read64be(addr));
168 return 0xffffffffffffffffULL;
169}
170
171EXPORT_SYMBOL(ioread64_lo_hi);
172EXPORT_SYMBOL(ioread64_hi_lo);
173EXPORT_SYMBOL(ioread64be_lo_hi);
174EXPORT_SYMBOL(ioread64be_hi_lo);
175
176#endif /* readq */
177
103#ifndef pio_write16be 178#ifndef pio_write16be
104#define pio_write16be(val,port) outw(swab16(val),port) 179#define pio_write16be(val,port) outw(swab16(val),port)
105#define pio_write32be(val,port) outl(swab32(val),port) 180#define pio_write32be(val,port) outl(swab32(val),port)
106#endif 181#endif
107 182
108#ifndef mmio_write16be 183#ifndef mmio_write16be
109#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port) 184#define mmio_write16be(val,port) writew(swab16(val),port)
110#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port) 185#define mmio_write32be(val,port) writel(swab32(val),port)
186#define mmio_write64be(val,port) writeq(swab64(val),port)
111#endif 187#endif
112 188
113void iowrite8(u8 val, void __iomem *addr) 189void iowrite8(u8 val, void __iomem *addr)
@@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be);
136EXPORT_SYMBOL(iowrite32); 212EXPORT_SYMBOL(iowrite32);
137EXPORT_SYMBOL(iowrite32be); 213EXPORT_SYMBOL(iowrite32be);
138 214
215#ifdef writeq
216static void pio_write64_lo_hi(u64 val, unsigned long port)
217{
218 outl(val, port);
219 outl(val >> 32, port + sizeof(u32));
220}
221
222static void pio_write64_hi_lo(u64 val, unsigned long port)
223{
224 outl(val >> 32, port + sizeof(u32));
225 outl(val, port);
226}
227
228static void pio_write64be_lo_hi(u64 val, unsigned long port)
229{
230 pio_write32be(val, port + sizeof(u32));
231 pio_write32be(val >> 32, port);
232}
233
234static void pio_write64be_hi_lo(u64 val, unsigned long port)
235{
236 pio_write32be(val >> 32, port);
237 pio_write32be(val, port + sizeof(u32));
238}
239
240void iowrite64_lo_hi(u64 val, void __iomem *addr)
241{
242 IO_COND(addr, pio_write64_lo_hi(val, port),
243 writeq(val, addr));
244}
245
246void iowrite64_hi_lo(u64 val, void __iomem *addr)
247{
248 IO_COND(addr, pio_write64_hi_lo(val, port),
249 writeq(val, addr));
250}
251
252void iowrite64be_lo_hi(u64 val, void __iomem *addr)
253{
254 IO_COND(addr, pio_write64be_lo_hi(val, port),
255 mmio_write64be(val, addr));
256}
257
258void iowrite64be_hi_lo(u64 val, void __iomem *addr)
259{
260 IO_COND(addr, pio_write64be_hi_lo(val, port),
261 mmio_write64be(val, addr));
262}
263
264EXPORT_SYMBOL(iowrite64_lo_hi);
265EXPORT_SYMBOL(iowrite64_hi_lo);
266EXPORT_SYMBOL(iowrite64be_lo_hi);
267EXPORT_SYMBOL(iowrite64be_hi_lo);
268
269#endif /* readq */
270
139/* 271/*
140 * These are the "repeat MMIO read/write" functions. 272 * These are the "repeat MMIO read/write" functions.
141 * Note the "__raw" accesses, since we don't want to 273 * Note the "__raw" accesses, since we don't want to
diff --git a/scripts/ver_linux b/scripts/ver_linux
index a6c728db05ce..810e608baa24 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -13,6 +13,8 @@ BEGIN {
13 system("uname -a") 13 system("uname -a")
14 printf("\n") 14 printf("\n")
15 15
16 vernum = "[0-9]+([.]?[0-9]+)+"
17
16 printversion("GNU C", version("gcc -dumpversion")) 18 printversion("GNU C", version("gcc -dumpversion"))
17 printversion("GNU Make", version("make --version")) 19 printversion("GNU Make", version("make --version"))
18 printversion("Binutils", version("ld -v")) 20 printversion("Binutils", version("ld -v"))
@@ -34,7 +36,7 @@ BEGIN {
34 while (getline <"/proc/self/maps" > 0) { 36 while (getline <"/proc/self/maps" > 0) {
35 if (/libc.*\.so$/) { 37 if (/libc.*\.so$/) {
36 n = split($0, procmaps, "/") 38 n = split($0, procmaps, "/")
37 if (match(procmaps[n], /[0-9]+([.]?[0-9]+)+/)) { 39 if (match(procmaps[n], vernum)) {
38 ver = substr(procmaps[n], RSTART, RLENGTH) 40 ver = substr(procmaps[n], RSTART, RLENGTH)
39 printversion("Linux C Library", ver) 41 printversion("Linux C Library", ver)
40 break 42 break
@@ -70,7 +72,7 @@ BEGIN {
70function version(cmd, ver) { 72function version(cmd, ver) {
71 cmd = cmd " 2>&1" 73 cmd = cmd " 2>&1"
72 while (cmd | getline > 0) { 74 while (cmd | getline > 0) {
73 if (match($0, /[0-9]+([.]?[0-9]+)+/)) { 75 if (match($0, vernum)) {
74 ver = substr($0, RSTART, RLENGTH) 76 ver = substr($0, RSTART, RLENGTH)
75 break 77 break
76 } 78 }
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index a6d37b9d6413..5c95933e739a 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_acomp_register_notifier);
269 */ 269 */
270int snd_hdac_acomp_init(struct hdac_bus *bus, 270int snd_hdac_acomp_init(struct hdac_bus *bus,
271 const struct drm_audio_component_audio_ops *aops, 271 const struct drm_audio_component_audio_ops *aops,
272 int (*match_master)(struct device *, void *), 272 int (*match_master)(struct device *, int, void *),
273 size_t extra_size) 273 size_t extra_size)
274{ 274{
275 struct component_match *match = NULL; 275 struct component_match *match = NULL;
@@ -288,7 +288,7 @@ int snd_hdac_acomp_init(struct hdac_bus *bus,
288 bus->audio_component = acomp; 288 bus->audio_component = acomp;
289 devres_add(dev, acomp); 289 devres_add(dev, acomp);
290 290
291 component_match_add(dev, &match, match_master, bus); 291 component_match_add_typed(dev, &match, match_master, bus);
292 ret = component_master_add_with_match(dev, &hdac_component_master_ops, 292 ret = component_master_add_with_match(dev, &hdac_component_master_ops,
293 match); 293 match);
294 if (ret < 0) 294 if (ret < 0)
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 27eb0270a711..575198bd3cd0 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -82,9 +82,11 @@ void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
82} 82}
83EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk); 83EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
84 84
85static int i915_component_master_match(struct device *dev, void *data) 85static int i915_component_master_match(struct device *dev, int subcomponent,
86 void *data)
86{ 87{
87 return !strcmp(dev->driver->name, "i915"); 88 return !strcmp(dev->driver->name, "i915") &&
89 subcomponent == I915_COMPONENT_AUDIO;
88} 90}
89 91
90/* check whether intel graphics is present */ 92/* check whether intel graphics is present */