aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Paul <seanpaul@chromium.org>2017-06-27 11:36:28 -0400
committerSean Paul <seanpaul@chromium.org>2017-06-27 11:36:28 -0400
commitc048c984de38d906bb0df56ec2ae90eafc123d0a (patch)
tree4e670e712aeedcaf2e2193b64f9120e03320e01c
parent38cb266ad1a24e037220dc563ab46364595b17b6 (diff)
parent6d61e70ccc21606ffb8a0a03bd3aba24f659502b (diff)
Merge remote-tracking branch 'airlied/drm-next' into drm-misc-next-fixes
Backmerge drm-next with rc7
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt7
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-timers.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt1
-rw-r--r--Makefile4
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S1
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/powerpc/include/asm/kprobes.h1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/kprobes.c17
-rw-r--r--arch/powerpc/kernel/setup_64.c31
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S59
-rw-r--r--arch/powerpc/kvm/book3s_hv.c51
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S75
-rw-r--r--arch/powerpc/perf/perf_regs.c3
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c94
-rw-r--r--arch/s390/kvm/gaccess.c15
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h3
-rw-r--r--arch/x86/kvm/emulate.c1
-rw-r--r--arch/x86/kvm/x86.c62
-rw-r--r--block/blk-mq-sched.c58
-rw-r--r--block/blk-mq-sched.h9
-rw-r--r--block/blk-mq.c16
-rw-r--r--drivers/acpi/scan.c67
-rw-r--r--drivers/block/xen-blkback/blkback.c26
-rw-r--r--drivers/block/xen-blkback/common.h26
-rw-r--r--drivers/block/xen-blkback/xenbus.c15
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/clk/meson/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c1
-rw-r--r--drivers/clocksource/timer-sun5i.c1
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c38
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c56
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c56
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h100
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c303
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c121
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h44
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c48
-rw-r--r--drivers/gpu/drm/i915/gvt/render.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h178
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c46
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h106
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c164
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c119
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2746
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c24
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c5376
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c2690
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c2873
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c2602
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c263
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c2991
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c3040
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c3479
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c3039
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h40
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c3093
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h40
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c87
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1126
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h287
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h5
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c44
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h20
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c137
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c73
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c355
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c392
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c306
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c201
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c437
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h59
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c167
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c22
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c86
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c43
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c157
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c15
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c73
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c265
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c112
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c316
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c540
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c126
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c12
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h267
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h22
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h66
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h1371
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c204
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h51
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c23
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h368
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h13
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h68
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h38
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c15
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c431
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h26
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c16
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c19
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h13
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-magicmouse.c15
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/input/misc/soc_button_array.c20
-rw-r--r--drivers/input/rmi4/rmi_f54.c17
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/irqchip/irq-mips-gic.c6
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-raid1.c21
-rw-r--r--drivers/mfd/arizona-core.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c2
-rw-r--r--drivers/ntb/ntb_transport.c58
-rw-r--r--drivers/ntb/test/ntb_perf.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c91
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_tmr.c16
-rw-r--r--drivers/target/target_core_transport.c9
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/dax.c1
-rw-r--r--fs/exec.c28
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/xattr.c23
-rw-r--r--fs/ufs/balloc.c22
-rw-r--r--fs/ufs/inode.c27
-rw-r--r--fs/ufs/super.c9
-rw-r--r--fs/ufs/ufs_fs.h2
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/drm/i915_pciids.h40
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/net/wext.h4
-rw-r--r--include/uapi/drm/i915_drm.h44
-rw-r--r--include/uapi/drm/msm_drm.h9
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/livepatch/patch.c8
-rw-r--r--kernel/livepatch/transition.c36
-rw-r--r--kernel/signal.c20
-rw-r--r--kernel/time/timekeeping.c71
-rw-r--r--lib/cmdline.c6
-rw-r--r--mm/khugepaged.c1
-rw-r--r--mm/mmap.c19
-rw-r--r--mm/slub.c40
-rw-r--r--mm/vmalloc.c15
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/fib6_rules.c22
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/rxrpc/key.c64
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/sctp_diag.c5
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/wireless/wext-core.c22
-rw-r--r--scripts/Makefile.headersinst10
-rw-r--r--scripts/genksyms/genksyms.h2
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--scripts/kconfig/nconf.c12
-rw-r--r--scripts/kconfig/nconf.gui.c4
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/firewire/amdtp-stream.c8
-rw-r--r--sound/firewire/amdtp-stream.h2
-rw-r--r--sound/pci/hda/hda_intel.c11
-rw-r--r--tools/perf/util/probe-event.c2
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh2
313 files changed, 40877 insertions, 4720 deletions
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index e9c5a1d9834a..f465647a4dd2 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -22,7 +22,8 @@ Required properties :
22- #clock-cells : must contain 1 22- #clock-cells : must contain 1
23- #reset-cells : must contain 1 23- #reset-cells : must contain 1
24 24
25For the PRCM CCUs on H3/A64, one more clock is needed: 25For the PRCM CCUs on H3/A64, two more clocks are needed:
26- "pll-periph": the SoC's peripheral PLL from the main CCU
26- "iosc": the SoC's internal frequency oscillator 27- "iosc": the SoC's internal frequency oscillator
27 28
28Example for generic CCU: 29Example for generic CCU:
@@ -39,8 +40,8 @@ Example for PRCM CCU:
39r_ccu: clock@01f01400 { 40r_ccu: clock@01f01400 {
40 compatible = "allwinner,sun50i-a64-r-ccu"; 41 compatible = "allwinner,sun50i-a64-r-ccu";
41 reg = <0x01f01400 0x100>; 42 reg = <0x01f01400 0x100>;
42 clocks = <&osc24M>, <&osc32k>, <&iosc>; 43 clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
43 clock-names = "hosc", "losc", "iosc"; 44 clock-names = "hosc", "losc", "iosc", "pll-periph";
44 #clock-cells = <1>; 45 #clock-cells = <1>;
45 #reset-cells = <1>; 46 #reset-cells = <1>;
46}; 47};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 42c3bb2d53e8..01e331a5f3e7 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -41,9 +41,9 @@ Required properties:
41Optional properties: 41Optional properties:
42 42
43In order to use the GPIO lines in PWM mode, some additional optional 43In order to use the GPIO lines in PWM mode, some additional optional
44properties are required. Only Armada 370 and XP support these properties. 44properties are required.
45 45
46- compatible: Must contain "marvell,armada-370-xp-gpio" 46- compatible: Must contain "marvell,armada-370-gpio"
47 47
48- reg: an additional register set is needed, for the GPIO Blink 48- reg: an additional register set is needed, for the GPIO Blink
49 Counter on/off registers. 49 Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
71 }; 71 };
72 72
73 gpio1: gpio@18140 { 73 gpio1: gpio@18140 {
74 compatible = "marvell,armada-370-xp-gpio"; 74 compatible = "marvell,armada-370-gpio";
75 reg = <0x18140 0x40>, <0x181c8 0x08>; 75 reg = <0x18140 0x40>, <0x181c8 0x08>;
76 reg-names = "gpio", "pwm"; 76 reg-names = "gpio", "pwm";
77 ngpios = <17>; 77 ngpios = <17>;
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
index bbd083f5600a..1db6e0057a63 100644
--- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt
+++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
@@ -31,7 +31,7 @@ Example:
31 compatible = "st,stm32-timers"; 31 compatible = "st,stm32-timers";
32 reg = <0x40010000 0x400>; 32 reg = <0x40010000 0x400>;
33 clocks = <&rcc 0 160>; 33 clocks = <&rcc 0 160>;
34 clock-names = "clk_int"; 34 clock-names = "int";
35 35
36 pwm { 36 pwm {
37 compatible = "st,stm32-pwm"; 37 compatible = "st,stm32-pwm";
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index d6c6e41648d4..8ec2ca21adeb 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -34,7 +34,7 @@ Required properties:
34 "brcm,bcm6328-switch" 34 "brcm,bcm6328-switch"
35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" 35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
36 36
37See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional 37See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
38required and optional properties. 38required and optional properties.
39 39
40Examples: 40Examples:
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index 16c3a9501f5d..acfafc8e143c 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -27,6 +27,7 @@ Optional properties:
27 of the device. On many systems this is wired high so the device goes 27 of the device. On many systems this is wired high so the device goes
28 out of reset at power-on, but if it is under program control, this 28 out of reset at power-on, but if it is under program control, this
29 optional GPIO can wake up in response to it. 29 optional GPIO can wake up in response to it.
30- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
30 31
31Examples: 32Examples:
32 33
diff --git a/Makefile b/Makefile
index e40c471abe29..6d8a984ed9c9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -1437,7 +1437,7 @@ help:
1437 @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' 1437 @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
1438 @echo ' make V=2 [targets] 2 => give reason for rebuild of target' 1438 @echo ' make V=2 [targets] 2 => give reason for rebuild of target'
1439 @echo ' make O=dir [targets] Locate all output files in "dir", including .config' 1439 @echo ' make O=dir [targets] Locate all output files in "dir", including .config'
1440 @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' 1440 @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
1441 @echo ' make C=2 [targets] Force check of all c source with $$CHECK' 1441 @echo ' make C=2 [targets] Force check of all c source with $$CHECK'
1442 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' 1442 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
1443 @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' 1443 @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 41b6e31f8f55..d0cb007fa482 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */ 221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; 222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
223 vdso_data->raw_time_sec = tk->raw_time.tv_sec; 223 vdso_data->raw_time_sec = tk->raw_time.tv_sec;
224 vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; 224 vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
225 tk->tkr_raw.shift) +
226 tk->tkr_raw.xtime_nsec;
225 vdso_data->xtime_clock_sec = tk->xtime_sec; 227 vdso_data->xtime_clock_sec = tk->xtime_sec;
226 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; 228 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
227 /* tkr_raw.xtime_nsec == 0 */
228 vdso_data->cs_mono_mult = tk->tkr_mono.mult; 229 vdso_data->cs_mono_mult = tk->tkr_mono.mult;
229 vdso_data->cs_raw_mult = tk->tkr_raw.mult; 230 vdso_data->cs_raw_mult = tk->tkr_raw.mult;
230 /* tkr_mono.shift == tkr_raw.shift */ 231 /* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b4671bd7c..76320e920965 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@ monotonic_raw:
256 seqcnt_check fail=monotonic_raw 256 seqcnt_check fail=monotonic_raw
257 257
258 /* All computations are done with left-shifted nsecs. */ 258 /* All computations are done with left-shifted nsecs. */
259 lsl x14, x14, x12
260 get_nsec_per_sec res=x9 259 get_nsec_per_sec res=x9
261 lsl x9, x9, x12 260 lsl x9, x9, x12
262 261
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7c6336dd2638..7cd92166a0b9 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167 bool user, bool kernel) 167 bool user, bool kernel)
168{ 168{
169 int idx_user, idx_kernel; 169 /*
170 * Initialize idx_user and idx_kernel to workaround bogus
171 * maybe-initialized warning when using GCC 6.
172 */
173 int idx_user = 0, idx_kernel = 0;
170 unsigned long flags, old_entryhi; 174 unsigned long flags, old_entryhi;
171 175
172 local_irq_save(flags); 176 local_irq_save(flags);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a83821f33ea3..8814a7249ceb 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
103extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 103extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
104extern int kprobe_handler(struct pt_regs *regs); 104extern int kprobe_handler(struct pt_regs *regs);
105extern int kprobe_post_handler(struct pt_regs *regs); 105extern int kprobe_post_handler(struct pt_regs *regs);
106extern int is_current_kprobe_addr(unsigned long addr);
106#ifdef CONFIG_KPROBES_ON_FTRACE 107#ifdef CONFIG_KPROBES_ON_FTRACE
107extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, 108extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
108 struct kprobe_ctlblk *kcb); 109 struct kprobe_ctlblk *kcb);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ae418b85c17c..b886795060fd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
1411 .balign IFETCH_ALIGN_BYTES 1411 .balign IFETCH_ALIGN_BYTES
1412do_hash_page: 1412do_hash_page:
1413#ifdef CONFIG_PPC_STD_MMU_64 1413#ifdef CONFIG_PPC_STD_MMU_64
1414 andis. r0,r4,0xa410 /* weird error? */ 1414 andis. r0,r4,0xa450 /* weird error? */
1415 bne- handle_page_fault /* if not, try to insert a HPTE */ 1415 bne- handle_page_fault /* if not, try to insert a HPTE */
1416 andis. r0,r4,DSISR_DABRMATCH@h
1417 bne- handle_dabr_fault
1418 CURRENT_THREAD_INFO(r11, r1) 1416 CURRENT_THREAD_INFO(r11, r1)
1419 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1417 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1420 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1418 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
1438 1436
1439 /* Error */ 1437 /* Error */
1440 blt- 13f 1438 blt- 13f
1439
1440 /* Reload DSISR into r4 for the DABR check below */
1441 ld r4,_DSISR(r1)
1441#endif /* CONFIG_PPC_STD_MMU_64 */ 1442#endif /* CONFIG_PPC_STD_MMU_64 */
1442 1443
1443/* Here we have a page fault that hash_page can't handle. */ 1444/* Here we have a page fault that hash_page can't handle. */
1444handle_page_fault: 1445handle_page_fault:
144511: ld r4,_DAR(r1) 144611: andis. r0,r4,DSISR_DABRMATCH@h
1447 bne- handle_dabr_fault
1448 ld r4,_DAR(r1)
1446 ld r5,_DSISR(r1) 1449 ld r5,_DSISR(r1)
1447 addi r3,r1,STACK_FRAME_OVERHEAD 1450 addi r3,r1,STACK_FRAME_OVERHEAD
1448 bl do_page_fault 1451 bl do_page_fault
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index fc4343514bed..01addfb0ed0a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 44struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
45 45
46int is_current_kprobe_addr(unsigned long addr)
47{
48 struct kprobe *p = kprobe_running();
49 return (p && (unsigned long)p->addr == addr) ? 1 : 0;
50}
51
46bool arch_within_kprobe_blacklist(unsigned long addr) 52bool arch_within_kprobe_blacklist(unsigned long addr)
47{ 53{
48 return (addr >= (unsigned long)__kprobes_text_start && 54 return (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
617 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 623 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
618#endif 624#endif
619 625
626 /*
627 * jprobes use jprobe_return() which skips the normal return
628 * path of the function, and this messes up the accounting of the
629 * function graph tracer.
630 *
631 * Pause function graph tracing while performing the jprobe function.
632 */
633 pause_graph_tracing();
634
620 return 1; 635 return 1;
621} 636}
622NOKPROBE_SYMBOL(setjmp_pre_handler); 637NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
642 * saved regs... 657 * saved regs...
643 */ 658 */
644 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 659 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
660 /* It's OK to start function graph tracing again */
661 unpause_graph_tracing();
645 preempt_enable_no_resched(); 662 preempt_enable_no_resched();
646 return 1; 663 return 1;
647} 664}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a8c1f99e9607..4640f6d64f8b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -616,6 +616,24 @@ void __init exc_lvl_early_init(void)
616#endif 616#endif
617 617
618/* 618/*
619 * Emergency stacks are used for a range of things, from asynchronous
620 * NMIs (system reset, machine check) to synchronous, process context.
621 * We set preempt_count to zero, even though that isn't necessarily correct. To
622 * get the right value we'd need to copy it from the previous thread_info, but
623 * doing that might fault causing more problems.
624 * TODO: what to do with accounting?
625 */
626static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
627{
628 ti->task = NULL;
629 ti->cpu = cpu;
630 ti->preempt_count = 0;
631 ti->local_flags = 0;
632 ti->flags = 0;
633 klp_init_thread_info(ti);
634}
635
636/*
619 * Stack space used when we detect a bad kernel stack pointer, and 637 * Stack space used when we detect a bad kernel stack pointer, and
620 * early in SMP boots before relocation is enabled. Exclusive emergency 638 * early in SMP boots before relocation is enabled. Exclusive emergency
621 * stack for machine checks. 639 * stack for machine checks.
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
633 * Since we use these as temporary stacks during secondary CPU 651 * Since we use these as temporary stacks during secondary CPU
634 * bringup, we need to get at them in real mode. This means they 652 * bringup, we need to get at them in real mode. This means they
635 * must also be within the RMO region. 653 * must also be within the RMO region.
654 *
655 * The IRQ stacks allocated elsewhere in this file are zeroed and
656 * initialized in kernel/irq.c. These are initialized here in order
657 * to have emergency stacks available as early as possible.
636 */ 658 */
637 limit = min(safe_stack_limit(), ppc64_rma_size); 659 limit = min(safe_stack_limit(), ppc64_rma_size);
638 660
639 for_each_possible_cpu(i) { 661 for_each_possible_cpu(i) {
640 struct thread_info *ti; 662 struct thread_info *ti;
641 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 663 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
642 klp_init_thread_info(ti); 664 memset(ti, 0, THREAD_SIZE);
665 emerg_stack_init_thread_info(ti, i);
643 paca[i].emergency_sp = (void *)ti + THREAD_SIZE; 666 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
644 667
645#ifdef CONFIG_PPC_BOOK3S_64 668#ifdef CONFIG_PPC_BOOK3S_64
646 /* emergency stack for NMI exception handling. */ 669 /* emergency stack for NMI exception handling. */
647 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 670 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
648 klp_init_thread_info(ti); 671 memset(ti, 0, THREAD_SIZE);
672 emerg_stack_init_thread_info(ti, i);
649 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; 673 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
650 674
651 /* emergency stack for machine check exception handling. */ 675 /* emergency stack for machine check exception handling. */
652 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 676 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
653 klp_init_thread_info(ti); 677 memset(ti, 0, THREAD_SIZE);
678 emerg_stack_init_thread_info(ti, i);
654 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; 679 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
655#endif 680#endif
656 } 681 }
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 7c933a99f5d5..c98e90b4ea7b 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
45 stdu r1,-SWITCH_FRAME_SIZE(r1) 45 stdu r1,-SWITCH_FRAME_SIZE(r1)
46 46
47 /* Save all gprs to pt_regs */ 47 /* Save all gprs to pt_regs */
48 SAVE_8GPRS(0,r1) 48 SAVE_GPR(0, r1)
49 SAVE_8GPRS(8,r1) 49 SAVE_10GPRS(2, r1)
50 SAVE_8GPRS(16,r1) 50 SAVE_10GPRS(12, r1)
51 SAVE_8GPRS(24,r1) 51 SAVE_10GPRS(22, r1)
52
53 /* Save previous stack pointer (r1) */
54 addi r8, r1, SWITCH_FRAME_SIZE
55 std r8, GPR1(r1)
52 56
53 /* Load special regs for save below */ 57 /* Load special regs for save below */
54 mfmsr r8 58 mfmsr r8
@@ -95,18 +99,44 @@ ftrace_call:
95 bl ftrace_stub 99 bl ftrace_stub
96 nop 100 nop
97 101
98 /* Load ctr with the possibly modified NIP */ 102 /* Load the possibly modified NIP */
99 ld r3, _NIP(r1) 103 ld r15, _NIP(r1)
100 mtctr r3 104
101#ifdef CONFIG_LIVEPATCH 105#ifdef CONFIG_LIVEPATCH
102 cmpd r14,r3 /* has NIP been altered? */ 106 cmpd r14, r15 /* has NIP been altered? */
107#endif
108
109#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
110 /* NIP has not been altered, skip over further checks */
111 beq 1f
112
113 /* Check if there is an active kprobe on us */
114 subi r3, r14, 4
115 bl is_current_kprobe_addr
116 nop
117
118 /*
119 * If r3 == 1, then this is a kprobe/jprobe.
120 * else, this is livepatched function.
121 *
122 * The conditional branch for livepatch_handler below will use the
123 * result of this comparison. For kprobe/jprobe, we just need to branch to
124 * the new NIP, not call livepatch_handler. The branch below is bne, so we
125 * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
126 * CR0[EQ] = (r3 == 1).
127 */
128 cmpdi r3, 1
1291:
103#endif 130#endif
104 131
132 /* Load CTR with the possibly modified NIP */
133 mtctr r15
134
105 /* Restore gprs */ 135 /* Restore gprs */
106 REST_8GPRS(0,r1) 136 REST_GPR(0,r1)
107 REST_8GPRS(8,r1) 137 REST_10GPRS(2,r1)
108 REST_8GPRS(16,r1) 138 REST_10GPRS(12,r1)
109 REST_8GPRS(24,r1) 139 REST_10GPRS(22,r1)
110 140
111 /* Restore possibly modified LR */ 141 /* Restore possibly modified LR */
112 ld r0, _LINK(r1) 142 ld r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
119 addi r1, r1, SWITCH_FRAME_SIZE 149 addi r1, r1, SWITCH_FRAME_SIZE
120 150
121#ifdef CONFIG_LIVEPATCH 151#ifdef CONFIG_LIVEPATCH
122 /* Based on the cmpd above, if the NIP was altered handle livepatch */ 152 /*
153 * Based on the cmpd or cmpdi above, if the NIP was altered and we're
154 * not on a kprobe/jprobe, then handle livepatch.
155 */
123 bne- livepatch_handler 156 bne- livepatch_handler
124#endif 157#endif
125 158
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 42b7a4fd57d9..8d1a365b8edc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1487 break; 1487 break;
1488 case KVM_REG_PPC_TB_OFFSET: 1488 case KVM_REG_PPC_TB_OFFSET:
1489 /*
1490 * POWER9 DD1 has an erratum where writing TBU40 causes
1491 * the timebase to lose ticks. So we don't let the
1492 * timebase offset be changed on P9 DD1. (It is
1493 * initialized to zero.)
1494 */
1495 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1496 break;
1489 /* round up to multiple of 2^24 */ 1497 /* round up to multiple of 2^24 */
1490 vcpu->arch.vcore->tb_offset = 1498 vcpu->arch.vcore->tb_offset =
1491 ALIGN(set_reg_val(id, *val), 1UL << 24); 1499 ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2907{ 2915{
2908 int r; 2916 int r;
2909 int srcu_idx; 2917 int srcu_idx;
2918 unsigned long ebb_regs[3] = {}; /* shut up GCC */
2919 unsigned long user_tar = 0;
2920 unsigned int user_vrsave;
2910 2921
2911 if (!vcpu->arch.sane) { 2922 if (!vcpu->arch.sane) {
2912 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2923 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2913 return -EINVAL; 2924 return -EINVAL;
2914 } 2925 }
2915 2926
2927 /*
2928 * Don't allow entry with a suspended transaction, because
2929 * the guest entry/exit code will lose it.
2930 * If the guest has TM enabled, save away their TM-related SPRs
2931 * (they will get restored by the TM unavailable interrupt).
2932 */
2933#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2934 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
2935 (current->thread.regs->msr & MSR_TM)) {
2936 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
2937 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2938 run->fail_entry.hardware_entry_failure_reason = 0;
2939 return -EINVAL;
2940 }
2941 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
2942 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
2943 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
2944 current->thread.regs->msr &= ~MSR_TM;
2945 }
2946#endif
2947
2916 kvmppc_core_prepare_to_enter(vcpu); 2948 kvmppc_core_prepare_to_enter(vcpu);
2917 2949
2918 /* No need to go into the guest when all we'll do is come back out */ 2950 /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2934 2966
2935 flush_all_to_thread(current); 2967 flush_all_to_thread(current);
2936 2968
2969 /* Save userspace EBB and other register values */
2970 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
2971 ebb_regs[0] = mfspr(SPRN_EBBHR);
2972 ebb_regs[1] = mfspr(SPRN_EBBRR);
2973 ebb_regs[2] = mfspr(SPRN_BESCR);
2974 user_tar = mfspr(SPRN_TAR);
2975 }
2976 user_vrsave = mfspr(SPRN_VRSAVE);
2977
2937 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 2978 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2938 vcpu->arch.pgdir = current->mm->pgd; 2979 vcpu->arch.pgdir = current->mm->pgd;
2939 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 2980 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2960 } 3001 }
2961 } while (is_kvmppc_resume_guest(r)); 3002 } while (is_kvmppc_resume_guest(r));
2962 3003
3004 /* Restore userspace EBB and other register values */
3005 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3006 mtspr(SPRN_EBBHR, ebb_regs[0]);
3007 mtspr(SPRN_EBBRR, ebb_regs[1]);
3008 mtspr(SPRN_BESCR, ebb_regs[2]);
3009 mtspr(SPRN_TAR, user_tar);
3010 mtspr(SPRN_FSCR, current->thread.fscr);
3011 }
3012 mtspr(SPRN_VRSAVE, user_vrsave);
3013
2963 out: 3014 out:
2964 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3015 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2965 atomic_dec(&vcpu->kvm->arch.vcpus_running); 3016 atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 0fdc4a28970b..404deb512844 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
121 * Put whatever is in the decrementer into the 121 * Put whatever is in the decrementer into the
122 * hypervisor decrementer. 122 * hypervisor decrementer.
123 */ 123 */
124BEGIN_FTR_SECTION
125 ld r5, HSTATE_KVM_VCORE(r13)
126 ld r6, VCORE_KVM(r5)
127 ld r9, KVM_HOST_LPCR(r6)
128 andis. r9, r9, LPCR_LD@h
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
124 mfspr r8,SPRN_DEC 130 mfspr r8,SPRN_DEC
125 mftb r7 131 mftb r7
126 mtspr SPRN_HDEC,r8 132BEGIN_FTR_SECTION
133 /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
134 bne 32f
135END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
127 extsw r8,r8 136 extsw r8,r8
13732: mtspr SPRN_HDEC,r8
128 add r8,r8,r7 138 add r8,r8,r7
129 std r8,HSTATE_DECEXP(r13) 139 std r8,HSTATE_DECEXP(r13)
130 140
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bdb3f76ceb6b..4888dd494604 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,12 +32,29 @@
32#include <asm/opal.h> 32#include <asm/opal.h>
33#include <asm/xive-regs.h> 33#include <asm/xive-regs.h>
34 34
35/* Sign-extend HDEC if not on POWER9 */
36#define EXTEND_HDEC(reg) \
37BEGIN_FTR_SECTION; \
38 extsw reg, reg; \
39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40
35#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 41#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
36 42
37/* Values in HSTATE_NAPPING(r13) */ 43/* Values in HSTATE_NAPPING(r13) */
38#define NAPPING_CEDE 1 44#define NAPPING_CEDE 1
39#define NAPPING_NOVCPU 2 45#define NAPPING_NOVCPU 2
40 46
47/* Stack frame offsets for kvmppc_hv_entry */
48#define SFS 144
49#define STACK_SLOT_TRAP (SFS-4)
50#define STACK_SLOT_TID (SFS-16)
51#define STACK_SLOT_PSSCR (SFS-24)
52#define STACK_SLOT_PID (SFS-32)
53#define STACK_SLOT_IAMR (SFS-40)
54#define STACK_SLOT_CIABR (SFS-48)
55#define STACK_SLOT_DAWR (SFS-56)
56#define STACK_SLOT_DAWRX (SFS-64)
57
41/* 58/*
42 * Call kvmppc_hv_entry in real mode. 59 * Call kvmppc_hv_entry in real mode.
43 * Must be called with interrupts hard-disabled. 60 * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
214kvmppc_primary_no_guest: 231kvmppc_primary_no_guest:
215 /* We handle this much like a ceded vcpu */ 232 /* We handle this much like a ceded vcpu */
216 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 233 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
234 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
235 /* HDEC value came from DEC in the first place, it will fit */
217 mfspr r3, SPRN_HDEC 236 mfspr r3, SPRN_HDEC
218 mtspr SPRN_DEC, r3 237 mtspr SPRN_DEC, r3
219 /* 238 /*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
295 314
296 /* See if our timeslice has expired (HDEC is negative) */ 315 /* See if our timeslice has expired (HDEC is negative) */
297 mfspr r0, SPRN_HDEC 316 mfspr r0, SPRN_HDEC
317 EXTEND_HDEC(r0)
298 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 318 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
299 cmpwi r0, 0 319 cmpdi r0, 0
300 blt kvm_novcpu_exit 320 blt kvm_novcpu_exit
301 321
302 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 322 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
319 bl kvmhv_accumulate_time 339 bl kvmhv_accumulate_time
320#endif 340#endif
32113: mr r3, r12 34113: mr r3, r12
322 stw r12, 112-4(r1) 342 stw r12, STACK_SLOT_TRAP(r1)
323 bl kvmhv_commence_exit 343 bl kvmhv_commence_exit
324 nop 344 nop
325 lwz r12, 112-4(r1) 345 lwz r12, STACK_SLOT_TRAP(r1)
326 b kvmhv_switch_to_host 346 b kvmhv_switch_to_host
327 347
328/* 348/*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
390 lbz r4, HSTATE_PTID(r13) 410 lbz r4, HSTATE_PTID(r13)
391 cmpwi r4, 0 411 cmpwi r4, 0
392 bne 63f 412 bne 63f
393 lis r6, 0x7fff 413 LOAD_REG_ADDR(r6, decrementer_max)
394 ori r6, r6, 0xffff 414 ld r6, 0(r6)
395 mtspr SPRN_HDEC, r6 415 mtspr SPRN_HDEC, r6
396 /* and set per-LPAR registers, if doing dynamic micro-threading */ 416 /* and set per-LPAR registers, if doing dynamic micro-threading */
397 ld r6, HSTATE_SPLIT_MODE(r13) 417 ld r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
545 * * 565 * *
546 *****************************************************************************/ 566 *****************************************************************************/
547 567
548/* Stack frame offsets */
549#define STACK_SLOT_TID (112-16)
550#define STACK_SLOT_PSSCR (112-24)
551#define STACK_SLOT_PID (112-32)
552
553.global kvmppc_hv_entry 568.global kvmppc_hv_entry
554kvmppc_hv_entry: 569kvmppc_hv_entry:
555 570
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
565 */ 580 */
566 mflr r0 581 mflr r0
567 std r0, PPC_LR_STKOFF(r1) 582 std r0, PPC_LR_STKOFF(r1)
568 stdu r1, -112(r1) 583 stdu r1, -SFS(r1)
569 584
570 /* Save R1 in the PACA */ 585 /* Save R1 in the PACA */
571 std r1, HSTATE_HOST_R1(r13) 586 std r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
749 mfspr r5, SPRN_TIDR 764 mfspr r5, SPRN_TIDR
750 mfspr r6, SPRN_PSSCR 765 mfspr r6, SPRN_PSSCR
751 mfspr r7, SPRN_PID 766 mfspr r7, SPRN_PID
767 mfspr r8, SPRN_IAMR
752 std r5, STACK_SLOT_TID(r1) 768 std r5, STACK_SLOT_TID(r1)
753 std r6, STACK_SLOT_PSSCR(r1) 769 std r6, STACK_SLOT_PSSCR(r1)
754 std r7, STACK_SLOT_PID(r1) 770 std r7, STACK_SLOT_PID(r1)
771 std r8, STACK_SLOT_IAMR(r1)
755END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 772END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
773BEGIN_FTR_SECTION
774 mfspr r5, SPRN_CIABR
775 mfspr r6, SPRN_DAWR
776 mfspr r7, SPRN_DAWRX
777 std r5, STACK_SLOT_CIABR(r1)
778 std r6, STACK_SLOT_DAWR(r1)
779 std r7, STACK_SLOT_DAWRX(r1)
780END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
756 781
757BEGIN_FTR_SECTION 782BEGIN_FTR_SECTION
758 /* Set partition DABR */ 783 /* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
968 993
969 /* Check if HDEC expires soon */ 994 /* Check if HDEC expires soon */
970 mfspr r3, SPRN_HDEC 995 mfspr r3, SPRN_HDEC
971 cmpwi r3, 512 /* 1 microsecond */ 996 EXTEND_HDEC(r3)
997 cmpdi r3, 512 /* 1 microsecond */
972 blt hdec_soon 998 blt hdec_soon
973 999
974#ifdef CONFIG_KVM_XICS 1000#ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1505 * set by the guest could disrupt the host. 1531 * set by the guest could disrupt the host.
1506 */ 1532 */
1507 li r0, 0 1533 li r0, 0
1508 mtspr SPRN_IAMR, r0 1534 mtspr SPRN_PSPB, r0
1509 mtspr SPRN_CIABR, r0
1510 mtspr SPRN_DAWRX, r0
1511 mtspr SPRN_WORT, r0 1535 mtspr SPRN_WORT, r0
1512BEGIN_FTR_SECTION 1536BEGIN_FTR_SECTION
1537 mtspr SPRN_IAMR, r0
1513 mtspr SPRN_TCSCR, r0 1538 mtspr SPRN_TCSCR, r0
1514 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1539 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1515 li r0, 1 1540 li r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1525 std r6,VCPU_UAMOR(r9) 1550 std r6,VCPU_UAMOR(r9)
1526 li r6,0 1551 li r6,0
1527 mtspr SPRN_AMR,r6 1552 mtspr SPRN_AMR,r6
1553 mtspr SPRN_UAMOR, r6
1528 1554
1529 /* Switch DSCR back to host value */ 1555 /* Switch DSCR back to host value */
1530 mfspr r8, SPRN_DSCR 1556 mfspr r8, SPRN_DSCR
@@ -1670,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1670 1696
1671 /* Restore host values of some registers */ 1697 /* Restore host values of some registers */
1672BEGIN_FTR_SECTION 1698BEGIN_FTR_SECTION
1699 ld r5, STACK_SLOT_CIABR(r1)
1700 ld r6, STACK_SLOT_DAWR(r1)
1701 ld r7, STACK_SLOT_DAWRX(r1)
1702 mtspr SPRN_CIABR, r5
1703 mtspr SPRN_DAWR, r6
1704 mtspr SPRN_DAWRX, r7
1705END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1706BEGIN_FTR_SECTION
1673 ld r5, STACK_SLOT_TID(r1) 1707 ld r5, STACK_SLOT_TID(r1)
1674 ld r6, STACK_SLOT_PSSCR(r1) 1708 ld r6, STACK_SLOT_PSSCR(r1)
1675 ld r7, STACK_SLOT_PID(r1) 1709 ld r7, STACK_SLOT_PID(r1)
1710 ld r8, STACK_SLOT_IAMR(r1)
1676 mtspr SPRN_TIDR, r5 1711 mtspr SPRN_TIDR, r5
1677 mtspr SPRN_PSSCR, r6 1712 mtspr SPRN_PSSCR, r6
1678 mtspr SPRN_PID, r7 1713 mtspr SPRN_PID, r7
1714 mtspr SPRN_IAMR, r8
1679END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1715END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1680BEGIN_FTR_SECTION 1716BEGIN_FTR_SECTION
1681 PPC_INVALIDATE_ERAT 1717 PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1819 li r0, KVM_GUEST_MODE_NONE 1855 li r0, KVM_GUEST_MODE_NONE
1820 stb r0, HSTATE_IN_GUEST(r13) 1856 stb r0, HSTATE_IN_GUEST(r13)
1821 1857
1822 ld r0, 112+PPC_LR_STKOFF(r1) 1858 ld r0, SFS+PPC_LR_STKOFF(r1)
1823 addi r1, r1, 112 1859 addi r1, r1, SFS
1824 mtlr r0 1860 mtlr r0
1825 blr 1861 blr
1826 1862
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
2366 mfspr r3, SPRN_DEC 2402 mfspr r3, SPRN_DEC
2367 mfspr r4, SPRN_HDEC 2403 mfspr r4, SPRN_HDEC
2368 mftb r5 2404 mftb r5
2369 cmpw r3, r4 2405 extsw r3, r3
2406 EXTEND_HDEC(r4)
2407 cmpd r3, r4
2370 ble 67f 2408 ble 67f
2371 mtspr SPRN_DEC, r4 2409 mtspr SPRN_DEC, r4
237267: 241067:
2373 /* save expiry time of guest decrementer */ 2411 /* save expiry time of guest decrementer */
2374 extsw r3, r3
2375 add r3, r3, r5 2412 add r3, r3, r5
2376 ld r4, HSTATE_KVM_VCPU(r13) 2413 ld r4, HSTATE_KVM_VCPU(r13)
2377 ld r5, HSTATE_KVM_VCORE(r13) 2414 ld r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index cbd82fde5770..09ceea6175ba 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
101 struct pt_regs *regs_user_copy) 101 struct pt_regs *regs_user_copy)
102{ 102{
103 regs_user->regs = task_pt_regs(current); 103 regs_user->regs = task_pt_regs(current);
104 regs_user->abi = perf_reg_abi(current); 104 regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
105 PERF_SAMPLE_REGS_ABI_NONE;
105} 106}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index e6f444b46207..b5d960d6db3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
449 return mmio_atsd_reg; 449 return mmio_atsd_reg;
450} 450}
451 451
452static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) 452static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
453{ 453{
454 unsigned long launch; 454 unsigned long launch;
455 455
@@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
465 /* PID */ 465 /* PID */
466 launch |= pid << PPC_BITLSHIFT(38); 466 launch |= pid << PPC_BITLSHIFT(38);
467 467
468 /* No flush */
469 launch |= !flush << PPC_BITLSHIFT(39);
470
468 /* Invalidating the entire process doesn't use a va */ 471 /* Invalidating the entire process doesn't use a va */
469 return mmio_launch_invalidate(npu, launch, 0); 472 return mmio_launch_invalidate(npu, launch, 0);
470} 473}
471 474
472static int mmio_invalidate_va(struct npu *npu, unsigned long va, 475static int mmio_invalidate_va(struct npu *npu, unsigned long va,
473 unsigned long pid) 476 unsigned long pid, bool flush)
474{ 477{
475 unsigned long launch; 478 unsigned long launch;
476 479
@@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
486 /* PID */ 489 /* PID */
487 launch |= pid << PPC_BITLSHIFT(38); 490 launch |= pid << PPC_BITLSHIFT(38);
488 491
492 /* No flush */
493 launch |= !flush << PPC_BITLSHIFT(39);
494
489 return mmio_launch_invalidate(npu, launch, va); 495 return mmio_launch_invalidate(npu, launch, va);
490} 496}
491 497
492#define mn_to_npu_context(x) container_of(x, struct npu_context, mn) 498#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
493 499
500struct mmio_atsd_reg {
501 struct npu *npu;
502 int reg;
503};
504
505static void mmio_invalidate_wait(
506 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
507{
508 struct npu *npu;
509 int i, reg;
510
511 /* Wait for all invalidations to complete */
512 for (i = 0; i <= max_npu2_index; i++) {
513 if (mmio_atsd_reg[i].reg < 0)
514 continue;
515
516 /* Wait for completion */
517 npu = mmio_atsd_reg[i].npu;
518 reg = mmio_atsd_reg[i].reg;
519 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
520 cpu_relax();
521
522 put_mmio_atsd_reg(npu, reg);
523
524 /*
525 * The GPU requires two flush ATSDs to ensure all entries have
526 * been flushed. We use PID 0 as it will never be used for a
527 * process on the GPU.
528 */
529 if (flush)
530 mmio_invalidate_pid(npu, 0, true);
531 }
532}
533
494/* 534/*
495 * Invalidate either a single address or an entire PID depending on 535 * Invalidate either a single address or an entire PID depending on
496 * the value of va. 536 * the value of va.
497 */ 537 */
498static void mmio_invalidate(struct npu_context *npu_context, int va, 538static void mmio_invalidate(struct npu_context *npu_context, int va,
499 unsigned long address) 539 unsigned long address, bool flush)
500{ 540{
501 int i, j, reg; 541 int i, j;
502 struct npu *npu; 542 struct npu *npu;
503 struct pnv_phb *nphb; 543 struct pnv_phb *nphb;
504 struct pci_dev *npdev; 544 struct pci_dev *npdev;
505 struct { 545 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
506 struct npu *npu;
507 int reg;
508 } mmio_atsd_reg[NV_MAX_NPUS];
509 unsigned long pid = npu_context->mm->context.id; 546 unsigned long pid = npu_context->mm->context.id;
510 547
511 /* 548 /*
@@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
525 562
526 if (va) 563 if (va)
527 mmio_atsd_reg[i].reg = 564 mmio_atsd_reg[i].reg =
528 mmio_invalidate_va(npu, address, pid); 565 mmio_invalidate_va(npu, address, pid,
566 flush);
529 else 567 else
530 mmio_atsd_reg[i].reg = 568 mmio_atsd_reg[i].reg =
531 mmio_invalidate_pid(npu, pid); 569 mmio_invalidate_pid(npu, pid, flush);
532 570
533 /* 571 /*
534 * The NPU hardware forwards the shootdown to all GPUs 572 * The NPU hardware forwards the shootdown to all GPUs
@@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
544 */ 582 */
545 flush_tlb_mm(npu_context->mm); 583 flush_tlb_mm(npu_context->mm);
546 584
547 /* Wait for all invalidations to complete */ 585 mmio_invalidate_wait(mmio_atsd_reg, flush);
548 for (i = 0; i <= max_npu2_index; i++) { 586 if (flush)
549 if (mmio_atsd_reg[i].reg < 0) 587 /* Wait for the flush to complete */
550 continue; 588 mmio_invalidate_wait(mmio_atsd_reg, false);
551
552 /* Wait for completion */
553 npu = mmio_atsd_reg[i].npu;
554 reg = mmio_atsd_reg[i].reg;
555 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
556 cpu_relax();
557 put_mmio_atsd_reg(npu, reg);
558 }
559} 589}
560 590
561static void pnv_npu2_mn_release(struct mmu_notifier *mn, 591static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
571 * There should be no more translation requests for this PID, but we 601 * There should be no more translation requests for this PID, but we
572 * need to ensure any entries for it are removed from the TLB. 602 * need to ensure any entries for it are removed from the TLB.
573 */ 603 */
574 mmio_invalidate(npu_context, 0, 0); 604 mmio_invalidate(npu_context, 0, 0, true);
575} 605}
576 606
577static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, 607static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
581{ 611{
582 struct npu_context *npu_context = mn_to_npu_context(mn); 612 struct npu_context *npu_context = mn_to_npu_context(mn);
583 613
584 mmio_invalidate(npu_context, 1, address); 614 mmio_invalidate(npu_context, 1, address, true);
585} 615}
586 616
587static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, 617static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
590{ 620{
591 struct npu_context *npu_context = mn_to_npu_context(mn); 621 struct npu_context *npu_context = mn_to_npu_context(mn);
592 622
593 mmio_invalidate(npu_context, 1, address); 623 mmio_invalidate(npu_context, 1, address, true);
594} 624}
595 625
596static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, 626static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
600 struct npu_context *npu_context = mn_to_npu_context(mn); 630 struct npu_context *npu_context = mn_to_npu_context(mn);
601 unsigned long address; 631 unsigned long address;
602 632
603 for (address = start; address <= end; address += PAGE_SIZE) 633 for (address = start; address < end; address += PAGE_SIZE)
604 mmio_invalidate(npu_context, 1, address); 634 mmio_invalidate(npu_context, 1, address, false);
635
636 /* Do the flush only on the final addess == end */
637 mmio_invalidate(npu_context, 1, address, true);
605} 638}
606 639
607static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 640static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
651 /* No nvlink associated with this GPU device */ 684 /* No nvlink associated with this GPU device */
652 return ERR_PTR(-ENODEV); 685 return ERR_PTR(-ENODEV);
653 686
654 if (!mm) { 687 if (!mm || mm->context.id == 0) {
655 /* kernel thread contexts are not supported */ 688 /*
689 * Kernel thread contexts are not supported and context id 0 is
690 * reserved on the GPU.
691 */
656 return ERR_PTR(-EINVAL); 692 return ERR_PTR(-EINVAL);
657 } 693 }
658 694
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9da243d94cc3..3b297fa3aa67 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
977 ptr = asce.origin * 4096; 977 ptr = asce.origin * 4096;
978 if (asce.r) { 978 if (asce.r) {
979 *fake = 1; 979 *fake = 1;
980 ptr = 0;
980 asce.dt = ASCE_TYPE_REGION1; 981 asce.dt = ASCE_TYPE_REGION1;
981 } 982 }
982 switch (asce.dt) { 983 switch (asce.dt) {
983 case ASCE_TYPE_REGION1: 984 case ASCE_TYPE_REGION1:
984 if (vaddr.rfx01 > asce.tl && !asce.r) 985 if (vaddr.rfx01 > asce.tl && !*fake)
985 return PGM_REGION_FIRST_TRANS; 986 return PGM_REGION_FIRST_TRANS;
986 break; 987 break;
987 case ASCE_TYPE_REGION2: 988 case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1009 union region1_table_entry rfte; 1010 union region1_table_entry rfte;
1010 1011
1011 if (*fake) { 1012 if (*fake) {
1012 /* offset in 16EB guest memory block */ 1013 ptr += (unsigned long) vaddr.rfx << 53;
1013 ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
1014 rfte.val = ptr; 1014 rfte.val = ptr;
1015 goto shadow_r2t; 1015 goto shadow_r2t;
1016 } 1016 }
@@ -1036,8 +1036,7 @@ shadow_r2t:
1036 union region2_table_entry rste; 1036 union region2_table_entry rste;
1037 1037
1038 if (*fake) { 1038 if (*fake) {
1039 /* offset in 8PB guest memory block */ 1039 ptr += (unsigned long) vaddr.rsx << 42;
1040 ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
1041 rste.val = ptr; 1040 rste.val = ptr;
1042 goto shadow_r3t; 1041 goto shadow_r3t;
1043 } 1042 }
@@ -1064,8 +1063,7 @@ shadow_r3t:
1064 union region3_table_entry rtte; 1063 union region3_table_entry rtte;
1065 1064
1066 if (*fake) { 1065 if (*fake) {
1067 /* offset in 4TB guest memory block */ 1066 ptr += (unsigned long) vaddr.rtx << 31;
1068 ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
1069 rtte.val = ptr; 1067 rtte.val = ptr;
1070 goto shadow_sgt; 1068 goto shadow_sgt;
1071 } 1069 }
@@ -1101,8 +1099,7 @@ shadow_sgt:
1101 union segment_table_entry ste; 1099 union segment_table_entry ste;
1102 1100
1103 if (*fake) { 1101 if (*fake) {
1104 /* offset in 2G guest memory block */ 1102 ptr += (unsigned long) vaddr.sx << 20;
1105 ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
1106 ste.val = ptr; 1103 ste.val = ptr;
1107 goto shadow_pgt; 1104 goto shadow_pgt;
1108 } 1105 }
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a6d91d4e37a1..110ce8238466 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
431 [ C(DTLB) ] = { 431 [ C(DTLB) ] = {
432 [ C(OP_READ) ] = { 432 [ C(OP_READ) ] = {
433 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 433 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
434 [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 434 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
435 }, 435 },
436 [ C(OP_WRITE) ] = { 436 [ C(OP_WRITE) ] = {
437 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 437 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
438 [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 438 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
439 }, 439 },
440 [ C(OP_PREFETCH) ] = { 440 [ C(OP_PREFETCH) ] = {
441 [ C(RESULT_ACCESS) ] = 0x0, 441 [ C(RESULT_ACCESS) ] = 0x0,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 055962615779..722d0e568863 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
296 296
297 bool perm_ok; /* do not check permissions if true */ 297 bool perm_ok; /* do not check permissions if true */
298 bool ud; /* inject an #UD if host doesn't support insn */ 298 bool ud; /* inject an #UD if host doesn't support insn */
299 bool tf; /* TF value before instruction (after for syscall/sysret) */
299 300
300 bool have_exception; 301 bool have_exception;
301 struct x86_exception exception; 302 struct x86_exception exception;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index fba100713924..d5acc27ed1cc 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -2,8 +2,7 @@
2#define _ASM_X86_MSHYPER_H 2#define _ASM_X86_MSHYPER_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/interrupt.h> 5#include <linux/atomic.h>
6#include <linux/clocksource.h>
7#include <asm/hyperv.h> 6#include <asm/hyperv.h>
8 7
9/* 8/*
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0816ab2e8adc..80890dee66ce 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2743 } 2743 }
2744 2744
2745 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2745 return X86EMUL_CONTINUE; 2746 return X86EMUL_CONTINUE;
2746} 2747}
2747 2748
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 87d3cb901935..0e846f0cb83b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5313 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5313 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5314 5314
5315 ctxt->eflags = kvm_get_rflags(vcpu); 5315 ctxt->eflags = kvm_get_rflags(vcpu);
5316 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
5317
5316 ctxt->eip = kvm_rip_read(vcpu); 5318 ctxt->eip = kvm_rip_read(vcpu);
5317 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 5319 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5318 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 5320 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5528 return dr6; 5530 return dr6;
5529} 5531}
5530 5532
5531static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) 5533static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
5532{ 5534{
5533 struct kvm_run *kvm_run = vcpu->run; 5535 struct kvm_run *kvm_run = vcpu->run;
5534 5536
5535 /* 5537 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5536 * rflags is the old, "raw" value of the flags. The new value has 5538 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
5537 * not been saved yet. 5539 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5538 * 5540 kvm_run->debug.arch.exception = DB_VECTOR;
5539 * This is correct even for TF set by the guest, because "the 5541 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5540 * processor will not generate this exception after the instruction 5542 *r = EMULATE_USER_EXIT;
5541 * that sets the TF flag". 5543 } else {
5542 */ 5544 /*
5543 if (unlikely(rflags & X86_EFLAGS_TF)) { 5545 * "Certain debug exceptions may clear bit 0-3. The
5544 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5546 * remaining contents of the DR6 register are never
5545 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | 5547 * cleared by the processor".
5546 DR6_RTM; 5548 */
5547 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5549 vcpu->arch.dr6 &= ~15;
5548 kvm_run->debug.arch.exception = DB_VECTOR; 5550 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5549 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5551 kvm_queue_exception(vcpu, DB_VECTOR);
5550 *r = EMULATE_USER_EXIT;
5551 } else {
5552 /*
5553 * "Certain debug exceptions may clear bit 0-3. The
5554 * remaining contents of the DR6 register are never
5555 * cleared by the processor".
5556 */
5557 vcpu->arch.dr6 &= ~15;
5558 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5559 kvm_queue_exception(vcpu, DB_VECTOR);
5560 }
5561 } 5552 }
5562} 5553}
5563 5554
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
5567 int r = EMULATE_DONE; 5558 int r = EMULATE_DONE;
5568 5559
5569 kvm_x86_ops->skip_emulated_instruction(vcpu); 5560 kvm_x86_ops->skip_emulated_instruction(vcpu);
5570 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5561
5562 /*
5563 * rflags is the old, "raw" value of the flags. The new value has
5564 * not been saved yet.
5565 *
5566 * This is correct even for TF set by the guest, because "the
5567 * processor will not generate this exception after the instruction
5568 * that sets the TF flag".
5569 */
5570 if (unlikely(rflags & X86_EFLAGS_TF))
5571 kvm_vcpu_do_singlestep(vcpu, &r);
5571 return r == EMULATE_DONE; 5572 return r == EMULATE_DONE;
5572} 5573}
5573EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 5574EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ restart:
5726 toggle_interruptibility(vcpu, ctxt->interruptibility); 5727 toggle_interruptibility(vcpu, ctxt->interruptibility);
5727 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5728 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5728 kvm_rip_write(vcpu, ctxt->eip); 5729 kvm_rip_write(vcpu, ctxt->eip);
5729 if (r == EMULATE_DONE) 5730 if (r == EMULATE_DONE &&
5730 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5731 (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
5732 kvm_vcpu_do_singlestep(vcpu, &r);
5731 if (!ctxt->have_exception || 5733 if (!ctxt->have_exception ||
5732 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 5734 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5733 __kvm_set_rflags(vcpu, ctxt->eflags); 5735 __kvm_set_rflags(vcpu, ctxt->eflags);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 1f5b692526ae..0ded5e846335 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
69} 69}
70 70
71/*
72 * Mark a hardware queue as needing a restart. For shared queues, maintain
73 * a count of how many hardware queues are marked for restart.
74 */
75static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
76{
77 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
78 return;
79
80 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
81 struct request_queue *q = hctx->queue;
82
83 if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
84 atomic_inc(&q->shared_hctx_restart);
85 } else
86 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
87}
88
89static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
90{
91 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
92 return false;
93
94 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
95 struct request_queue *q = hctx->queue;
96
97 if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
98 atomic_dec(&q->shared_hctx_restart);
99 } else
100 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
101
102 if (blk_mq_hctx_has_pending(hctx)) {
103 blk_mq_run_hw_queue(hctx, true);
104 return true;
105 }
106
107 return false;
108}
109
71struct request *blk_mq_sched_get_request(struct request_queue *q, 110struct request *blk_mq_sched_get_request(struct request_queue *q,
72 struct bio *bio, 111 struct bio *bio,
73 unsigned int op, 112 unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
266 return true; 305 return true;
267} 306}
268 307
269static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
270{
271 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
272 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
273 if (blk_mq_hctx_has_pending(hctx)) {
274 blk_mq_run_hw_queue(hctx, true);
275 return true;
276 }
277 }
278 return false;
279}
280
281/** 308/**
282 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list 309 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
283 * @pos: loop cursor. 310 * @pos: loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
309 unsigned int i, j; 336 unsigned int i, j;
310 337
311 if (set->flags & BLK_MQ_F_TAG_SHARED) { 338 if (set->flags & BLK_MQ_F_TAG_SHARED) {
339 /*
340 * If this is 0, then we know that no hardware queues
341 * have RESTART marked. We're done.
342 */
343 if (!atomic_read(&queue->shared_hctx_restart))
344 return;
345
312 rcu_read_lock(); 346 rcu_read_lock();
313 list_for_each_entry_rcu_rr(q, queue, &set->tag_list, 347 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
314 tag_set_list) { 348 tag_set_list) {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index edafb5383b7b..5007edece51a 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
115 return false; 115 return false;
116} 116}
117 117
118/*
119 * Mark a hardware queue as needing a restart.
120 */
121static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
122{
123 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
124 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
125}
126
127static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 118static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
128{ 119{
129 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 120 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bb66c96850b1..958cedaff8b8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
2103 } 2103 }
2104} 2104}
2105 2105
2106/*
2107 * Caller needs to ensure that we're either frozen/quiesced, or that
2108 * the queue isn't live yet.
2109 */
2106static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2110static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2107{ 2111{
2108 struct blk_mq_hw_ctx *hctx; 2112 struct blk_mq_hw_ctx *hctx;
2109 int i; 2113 int i;
2110 2114
2111 queue_for_each_hw_ctx(q, hctx, i) { 2115 queue_for_each_hw_ctx(q, hctx, i) {
2112 if (shared) 2116 if (shared) {
2117 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2118 atomic_inc(&q->shared_hctx_restart);
2113 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2119 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2114 else 2120 } else {
2121 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2122 atomic_dec(&q->shared_hctx_restart);
2115 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2123 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2124 }
2116 } 2125 }
2117} 2126}
2118 2127
2119static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) 2128static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2129 bool shared)
2120{ 2130{
2121 struct request_queue *q; 2131 struct request_queue *q;
2122 2132
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3a10d7573477..d53162997f32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
1428 adev->flags.coherent_dma = cca; 1428 adev->flags.coherent_dma = cca;
1429} 1429}
1430 1430
1431static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
1432{
1433 bool *is_spi_i2c_slave_p = data;
1434
1435 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1436 return 1;
1437
1438 /*
1439 * devices that are connected to UART still need to be enumerated to
1440 * platform bus
1441 */
1442 if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
1443 *is_spi_i2c_slave_p = true;
1444
1445 /* no need to do more checking */
1446 return -1;
1447}
1448
1449static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
1450{
1451 struct list_head resource_list;
1452 bool is_spi_i2c_slave = false;
1453
1454 INIT_LIST_HEAD(&resource_list);
1455 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
1456 &is_spi_i2c_slave);
1457 acpi_dev_free_resource_list(&resource_list);
1458
1459 return is_spi_i2c_slave;
1460}
1461
1431void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1462void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1432 int type, unsigned long long sta) 1463 int type, unsigned long long sta)
1433{ 1464{
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1443 acpi_bus_get_flags(device); 1474 acpi_bus_get_flags(device);
1444 device->flags.match_driver = false; 1475 device->flags.match_driver = false;
1445 device->flags.initialized = true; 1476 device->flags.initialized = true;
1477 device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
1446 acpi_device_clear_enumerated(device); 1478 acpi_device_clear_enumerated(device);
1447 device_initialize(&device->dev); 1479 device_initialize(&device->dev);
1448 dev_set_uevent_suppress(&device->dev, true); 1480 dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1727 return AE_OK; 1759 return AE_OK;
1728} 1760}
1729 1761
1730static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
1731{
1732 bool *is_spi_i2c_slave_p = data;
1733
1734 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1735 return 1;
1736
1737 /*
1738 * devices that are connected to UART still need to be enumerated to
1739 * platform bus
1740 */
1741 if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
1742 *is_spi_i2c_slave_p = true;
1743
1744 /* no need to do more checking */
1745 return -1;
1746}
1747
1748static void acpi_default_enumeration(struct acpi_device *device) 1762static void acpi_default_enumeration(struct acpi_device *device)
1749{ 1763{
1750 struct list_head resource_list;
1751 bool is_spi_i2c_slave = false;
1752
1753 /* 1764 /*
1754 * Do not enumerate SPI/I2C slaves as they will be enumerated by their 1765 * Do not enumerate SPI/I2C slaves as they will be enumerated by their
1755 * respective parents. 1766 * respective parents.
1756 */ 1767 */
1757 INIT_LIST_HEAD(&resource_list); 1768 if (!device->flags.spi_i2c_slave) {
1758 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
1759 &is_spi_i2c_slave);
1760 acpi_dev_free_resource_list(&resource_list);
1761 if (!is_spi_i2c_slave) {
1762 acpi_create_platform_device(device, NULL); 1769 acpi_create_platform_device(device, NULL);
1763 acpi_device_set_enumerated(device); 1770 acpi_device_set_enumerated(device);
1764 } else { 1771 } else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
1854 return; 1861 return;
1855 1862
1856 device->flags.match_driver = true; 1863 device->flags.match_driver = true;
1857 if (ret > 0) { 1864 if (ret > 0 && !device->flags.spi_i2c_slave) {
1858 acpi_device_set_enumerated(device); 1865 acpi_device_set_enumerated(device);
1859 goto ok; 1866 goto ok;
1860 } 1867 }
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
1863 if (ret < 0) 1870 if (ret < 0)
1864 return; 1871 return;
1865 1872
1866 if (device->pnp.type.platform_id) 1873 if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
1867 acpi_default_enumeration(device);
1868 else
1869 acpi_device_set_enumerated(device); 1874 acpi_device_set_enumerated(device);
1875 else
1876 acpi_default_enumeration(device);
1870 1877
1871 ok: 1878 ok:
1872 list_for_each_entry(child, &device->children, node) 1879 list_for_each_entry(child, &device->children, node)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 726c32e35db9..0e824091a12f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
609 unsigned long timeout; 609 unsigned long timeout;
610 int ret; 610 int ret;
611 611
612 xen_blkif_get(blkif);
613
614 set_freezable(); 612 set_freezable();
615 while (!kthread_should_stop()) { 613 while (!kthread_should_stop()) {
616 if (try_to_freeze()) 614 if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
665 print_stats(ring); 663 print_stats(ring);
666 664
667 ring->xenblkd = NULL; 665 ring->xenblkd = NULL;
668 xen_blkif_put(blkif);
669 666
670 return 0; 667 return 0;
671} 668}
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1436static void make_response(struct xen_blkif_ring *ring, u64 id, 1433static void make_response(struct xen_blkif_ring *ring, u64 id,
1437 unsigned short op, int st) 1434 unsigned short op, int st)
1438{ 1435{
1439 struct blkif_response resp; 1436 struct blkif_response *resp;
1440 unsigned long flags; 1437 unsigned long flags;
1441 union blkif_back_rings *blk_rings; 1438 union blkif_back_rings *blk_rings;
1442 int notify; 1439 int notify;
1443 1440
1444 resp.id = id;
1445 resp.operation = op;
1446 resp.status = st;
1447
1448 spin_lock_irqsave(&ring->blk_ring_lock, flags); 1441 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1449 blk_rings = &ring->blk_rings; 1442 blk_rings = &ring->blk_rings;
1450 /* Place on the response ring for the relevant domain. */ 1443 /* Place on the response ring for the relevant domain. */
1451 switch (ring->blkif->blk_protocol) { 1444 switch (ring->blkif->blk_protocol) {
1452 case BLKIF_PROTOCOL_NATIVE: 1445 case BLKIF_PROTOCOL_NATIVE:
1453 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), 1446 resp = RING_GET_RESPONSE(&blk_rings->native,
1454 &resp, sizeof(resp)); 1447 blk_rings->native.rsp_prod_pvt);
1455 break; 1448 break;
1456 case BLKIF_PROTOCOL_X86_32: 1449 case BLKIF_PROTOCOL_X86_32:
1457 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), 1450 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1458 &resp, sizeof(resp)); 1451 blk_rings->x86_32.rsp_prod_pvt);
1459 break; 1452 break;
1460 case BLKIF_PROTOCOL_X86_64: 1453 case BLKIF_PROTOCOL_X86_64:
1461 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), 1454 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1462 &resp, sizeof(resp)); 1455 blk_rings->x86_64.rsp_prod_pvt);
1463 break; 1456 break;
1464 default: 1457 default:
1465 BUG(); 1458 BUG();
1466 } 1459 }
1460
1461 resp->id = id;
1462 resp->operation = op;
1463 resp->status = st;
1464
1467 blk_rings->common.rsp_prod_pvt++; 1465 blk_rings->common.rsp_prod_pvt++;
1468 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); 1466 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1469 spin_unlock_irqrestore(&ring->blk_ring_lock, flags); 1467 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6ab8cb..ecb35fe8ca8d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
75struct blkif_common_request { 75struct blkif_common_request {
76 char dummy; 76 char dummy;
77}; 77};
78struct blkif_common_response { 78
79 char dummy; 79/* i386 protocol version */
80};
81 80
82struct blkif_x86_32_request_rw { 81struct blkif_x86_32_request_rw {
83 uint8_t nr_segments; /* number of segments */ 82 uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
129 } u; 128 } u;
130} __attribute__((__packed__)); 129} __attribute__((__packed__));
131 130
132/* i386 protocol version */
133#pragma pack(push, 4)
134struct blkif_x86_32_response {
135 uint64_t id; /* copied from request */
136 uint8_t operation; /* copied from request */
137 int16_t status; /* BLKIF_RSP_??? */
138};
139#pragma pack(pop)
140/* x86_64 protocol version */ 131/* x86_64 protocol version */
141 132
142struct blkif_x86_64_request_rw { 133struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
193 } u; 184 } u;
194} __attribute__((__packed__)); 185} __attribute__((__packed__));
195 186
196struct blkif_x86_64_response {
197 uint64_t __attribute__((__aligned__(8))) id;
198 uint8_t operation; /* copied from request */
199 int16_t status; /* BLKIF_RSP_??? */
200};
201
202DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, 187DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
203 struct blkif_common_response); 188 struct blkif_response);
204DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, 189DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
205 struct blkif_x86_32_response); 190 struct blkif_response __packed);
206DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, 191DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
207 struct blkif_x86_64_response); 192 struct blkif_response);
208 193
209union blkif_back_rings { 194union blkif_back_rings {
210 struct blkif_back_ring native; 195 struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
281 266
282 wait_queue_head_t wq; 267 wait_queue_head_t wq;
283 atomic_t inflight; 268 atomic_t inflight;
269 bool active;
284 /* One thread per blkif ring. */ 270 /* One thread per blkif ring. */
285 struct task_struct *xenblkd; 271 struct task_struct *xenblkd;
286 unsigned int waiting_reqs; 272 unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 1f3dfaa54d87..792da683e70d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
159 init_waitqueue_head(&ring->shutdown_wq); 159 init_waitqueue_head(&ring->shutdown_wq);
160 ring->blkif = blkif; 160 ring->blkif = blkif;
161 ring->st_print = jiffies; 161 ring->st_print = jiffies;
162 xen_blkif_get(blkif); 162 ring->active = true;
163 } 163 }
164 164
165 return 0; 165 return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
249 struct xen_blkif_ring *ring = &blkif->rings[r]; 249 struct xen_blkif_ring *ring = &blkif->rings[r];
250 unsigned int i = 0; 250 unsigned int i = 0;
251 251
252 if (!ring->active)
253 continue;
254
252 if (ring->xenblkd) { 255 if (ring->xenblkd) {
253 kthread_stop(ring->xenblkd); 256 kthread_stop(ring->xenblkd);
254 wake_up(&ring->shutdown_wq); 257 wake_up(&ring->shutdown_wq);
255 ring->xenblkd = NULL;
256 } 258 }
257 259
258 /* The above kthread_stop() guarantees that at this point we 260 /* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
296 BUG_ON(ring->free_pages_num != 0); 298 BUG_ON(ring->free_pages_num != 0);
297 BUG_ON(ring->persistent_gnt_c != 0); 299 BUG_ON(ring->persistent_gnt_c != 0);
298 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 300 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
299 xen_blkif_put(blkif); 301 ring->active = false;
300 } 302 }
301 blkif->nr_ring_pages = 0; 303 blkif->nr_ring_pages = 0;
302 /* 304 /*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
312 314
313static void xen_blkif_free(struct xen_blkif *blkif) 315static void xen_blkif_free(struct xen_blkif *blkif)
314{ 316{
315 317 WARN_ON(xen_blkif_disconnect(blkif));
316 xen_blkif_disconnect(blkif);
317 xen_vbd_free(&blkif->vbd); 318 xen_vbd_free(&blkif->vbd);
319 kfree(blkif->be->mode);
320 kfree(blkif->be);
318 321
319 /* Make sure everything is drained before shutting down */ 322 /* Make sure everything is drained before shutting down */
320 kmem_cache_free(xen_blkif_cachep, blkif); 323 kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
511 xen_blkif_put(be->blkif); 514 xen_blkif_put(be->blkif);
512 } 515 }
513 516
514 kfree(be->mode);
515 kfree(be);
516 return 0; 517 return 0;
517} 518}
518 519
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0a52da439abf..b83c5351376c 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -121,7 +121,6 @@ config QCOM_EBI2
121config SIMPLE_PM_BUS 121config SIMPLE_PM_BUS
122 bool "Simple Power-Managed Bus Driver" 122 bool "Simple Power-Managed Bus Driver"
123 depends on OF && PM 123 depends on OF && PM
124 depends on ARCH_RENESAS || COMPILE_TEST
125 help 124 help
126 Driver for transparent busses that don't need a real driver, but 125 Driver for transparent busses that don't need a real driver, but
127 where the bus controller is part of a PM domain, or under the control 126 where the bus controller is part of a PM domain, or under the control
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e870f329db88..01a260f67437 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
803 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; 803 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
804 cp++; crng_init_cnt++; len--; 804 cp++; crng_init_cnt++; len--;
805 } 805 }
806 spin_unlock_irqrestore(&primary_crng.lock, flags);
806 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 807 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807 invalidate_batched_entropy(); 808 invalidate_batched_entropy();
808 crng_init = 1; 809 crng_init = 1;
809 wake_up_interruptible(&crng_init_wait); 810 wake_up_interruptible(&crng_init_wait);
810 pr_notice("random: fast init done\n"); 811 pr_notice("random: fast init done\n");
811 } 812 }
812 spin_unlock_irqrestore(&primary_crng.lock, flags);
813 return 1; 813 return 1;
814} 814}
815 815
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
841 } 841 }
842 memzero_explicit(&buf, sizeof(buf)); 842 memzero_explicit(&buf, sizeof(buf));
843 crng->init_time = jiffies; 843 crng->init_time = jiffies;
844 spin_unlock_irqrestore(&primary_crng.lock, flags);
844 if (crng == &primary_crng && crng_init < 2) { 845 if (crng == &primary_crng && crng_init < 2) {
845 invalidate_batched_entropy(); 846 invalidate_batched_entropy();
846 crng_init = 2; 847 crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
848 wake_up_interruptible(&crng_init_wait); 849 wake_up_interruptible(&crng_init_wait);
849 pr_notice("random: crng init done\n"); 850 pr_notice("random: crng init done\n");
850 } 851 }
851 spin_unlock_irqrestore(&primary_crng.lock, flags);
852} 852}
853 853
854static inline void crng_wait_ready(void) 854static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2041u64 get_random_u64(void) 2041u64 get_random_u64(void)
2042{ 2042{
2043 u64 ret; 2043 u64 ret;
2044 bool use_lock = crng_init < 2; 2044 bool use_lock = READ_ONCE(crng_init) < 2;
2045 unsigned long flags; 2045 unsigned long flags = 0;
2046 struct batched_entropy *batch; 2046 struct batched_entropy *batch;
2047 2047
2048#if BITS_PER_LONG == 64 2048#if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2073u32 get_random_u32(void) 2073u32 get_random_u32(void)
2074{ 2074{
2075 u32 ret; 2075 u32 ret;
2076 bool use_lock = crng_init < 2; 2076 bool use_lock = READ_ONCE(crng_init) < 2;
2077 unsigned long flags; 2077 unsigned long flags = 0;
2078 struct batched_entropy *batch; 2078 struct batched_entropy *batch;
2079 2079
2080 if (arch_get_random_int(&ret)) 2080 if (arch_get_random_int(&ret))
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 19480bcc7046..2f29ee1a4d00 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
14config COMMON_CLK_GXBB 14config COMMON_CLK_GXBB
15 bool 15 bool
16 depends on COMMON_CLK_AMLOGIC 16 depends on COMMON_CLK_AMLOGIC
17 select RESET_CONTROLLER
17 help 18 help
18 Support for the clock controller on AmLogic S905 devices, aka gxbb. 19 Support for the clock controller on AmLogic S905 devices, aka gxbb.
19 Say Y if you want peripherals and CPU frequency scaling to work. 20 Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index b0d551a8efe4..eb89c7801f00 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -156,6 +156,7 @@ config SUN8I_R_CCU
156 bool "Support for Allwinner SoCs' PRCM CCUs" 156 bool "Support for Allwinner SoCs' PRCM CCUs"
157 select SUNXI_CCU_DIV 157 select SUNXI_CCU_DIV
158 select SUNXI_CCU_GATE 158 select SUNXI_CCU_GATE
159 select SUNXI_CCU_MP
159 default MACH_SUN8I || (ARCH_SUNXI && ARM64) 160 default MACH_SUN8I || (ARCH_SUNXI && ARM64)
160 161
161endif 162endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 9b3cd24b78d2..061b6fbb4f95 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -31,7 +31,9 @@
31#define CLK_PLL_VIDEO0_2X 8 31#define CLK_PLL_VIDEO0_2X 8
32#define CLK_PLL_VE 9 32#define CLK_PLL_VE 9
33#define CLK_PLL_DDR0 10 33#define CLK_PLL_DDR0 10
34#define CLK_PLL_PERIPH0 11 34
35/* PLL_PERIPH0 exported for PRCM */
36
35#define CLK_PLL_PERIPH0_2X 12 37#define CLK_PLL_PERIPH0_2X 12
36#define CLK_PLL_PERIPH1 13 38#define CLK_PLL_PERIPH1 13
37#define CLK_PLL_PERIPH1_2X 14 39#define CLK_PLL_PERIPH1_2X 14
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5c476f966a72..5372bf8be5e6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
243static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", 243static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
244 0x060, BIT(6), 0); 244 0x060, BIT(6), 0);
245static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", 245static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
246 0x060, BIT(6), 0); 246 0x060, BIT(7), 0);
247static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", 247static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
248 0x060, BIT(8), 0); 248 0x060, BIT(8), 0);
249static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", 249static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 89e68d29bf45..df97e25aec76 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
556 0x12c, 0, 4, 24, 3, BIT(31), 556 0x12c, 0, 4, 24, 3, BIT(31),
557 CLK_SET_RATE_PARENT); 557 CLK_SET_RATE_PARENT);
558static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, 558static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
559 0x12c, 0, 4, 24, 3, BIT(31), 559 0x130, 0, 4, 24, 3, BIT(31),
560 CLK_SET_RATE_PARENT); 560 CLK_SET_RATE_PARENT);
561 561
562static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", 562static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index 85973d1e8165..1b4baea37d81 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -29,7 +29,9 @@
29#define CLK_PLL_VIDEO 6 29#define CLK_PLL_VIDEO 6
30#define CLK_PLL_VE 7 30#define CLK_PLL_VE 7
31#define CLK_PLL_DDR 8 31#define CLK_PLL_DDR 8
32#define CLK_PLL_PERIPH0 9 32
33/* PLL_PERIPH0 exported for PRCM */
34
33#define CLK_PLL_PERIPH0_2X 10 35#define CLK_PLL_PERIPH0_2X 10
34#define CLK_PLL_GPU 11 36#define CLK_PLL_GPU 11
35#define CLK_PLL_PERIPH1 12 37#define CLK_PLL_PERIPH1 12
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index e58706b40ae9..6297add857b5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
537 [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, 537 [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
538 [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, 538 [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
539 [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, 539 [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
540 [RST_BUS_OTG] = { 0x2c0, BIT(23) }, 540 [RST_BUS_OTG] = { 0x2c0, BIT(24) },
541 [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, 541 [RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, 542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
543 543
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4bed671e490e..8b5c30062d99 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1209 return 0; 1209 return 0;
1210 } 1210 }
1211 1211
1212 rate = readl_relaxed(frame + CNTFRQ); 1212 rate = readl_relaxed(base + CNTFRQ);
1213 1213
1214 iounmap(frame); 1214 iounmap(base);
1215 1215
1216 return rate; 1216 return rate;
1217} 1217}
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 44e5e951583b..8e64b8460f11 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -18,6 +18,7 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/clockchips.h> 20#include <linux/clockchips.h>
21#include <linux/clocksource.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
22#include <linux/of_irq.h> 23#include <linux/of_irq.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2e9c830ae1cd..c4656c4d44a6 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/clockchips.h> 14#include <linux/clockchips.h>
15#include <linux/clocksource.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
17#include <linux/irq.h> 18#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5104b6398139..c83ea68be792 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
721 u32 set; 721 u32 set;
722 722
723 if (!of_device_is_compatible(mvchip->chip.of_node, 723 if (!of_device_is_compatible(mvchip->chip.of_node,
724 "marvell,armada-370-xp-gpio")) 724 "marvell,armada-370-gpio"))
725 return 0; 725 return 0;
726 726
727 if (IS_ERR(mvchip->clk)) 727 if (IS_ERR(mvchip->clk))
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
852 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, 852 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
853 }, 853 },
854 { 854 {
855 .compatible = "marvell,armada-370-xp-gpio", 855 .compatible = "marvell,armada-370-gpio",
856 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, 856 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
857 }, 857 },
858 { 858 {
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
1128 mvchip); 1128 mvchip);
1129 } 1129 }
1130 1130
1131 /* Armada 370/XP has simple PWM support for GPIO lines */ 1131 /* Some MVEBU SoCs have simple PWM support for GPIO lines */
1132 if (IS_ENABLED(CONFIG_PWM)) 1132 if (IS_ENABLED(CONFIG_PWM))
1133 return mvebu_pwm_probe(pdev, mvchip, id); 1133 return mvebu_pwm_probe(pdev, mvchip, id);
1134 1134
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1cf78f4dd339..1e8e1123ddf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", 693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
694 adev->clock.default_dispclk / 100); 694 adev->clock.default_dispclk / 100);
695 adev->clock.default_dispclk = 60000; 695 adev->clock.default_dispclk = 60000;
696 } else if (adev->clock.default_dispclk <= 60000) {
697 DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
698 adev->clock.default_dispclk / 100);
699 adev->clock.default_dispclk = 62500;
696 } 700 }
697 adev->clock.dp_extclk = 701 adev->clock.dp_extclk =
698 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 702 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4c7c2628ace4..3e5d550c5bd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -475,6 +475,7 @@ static const struct pci_device_id pciidlist[] = {
475 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 475 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
476 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 476 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
477 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 477 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
478 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
478 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 479 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
479 /* Vega 10 */ 480 /* Vega 10 */
480 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, 481 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75a9c2d..8a0818b23ea4 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
165 struct drm_device *dev = crtc->dev; 165 struct drm_device *dev = crtc->dev;
166 struct amdgpu_device *adev = dev->dev_private; 166 struct amdgpu_device *adev = dev->dev_private;
167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
168 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 168 ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
169 169
170 memset(&args, 0, sizeof(args)); 170 memset(&args, 0, sizeof(args));
171 171
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
178void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) 178void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
179{ 179{
180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
181 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 181 ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
182 182
183 memset(&args, 0, sizeof(args)); 183 memset(&args, 0, sizeof(args));
184 184
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 1a3359c0f6cd..d67b6f15e8b8 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -261,21 +261,14 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
261{ 261{
262 struct drm_framebuffer *fb = plane->state->fb; 262 struct drm_framebuffer *fb = plane->state->fb;
263 struct hdlcd_drm_private *hdlcd; 263 struct hdlcd_drm_private *hdlcd;
264 struct drm_gem_cma_object *gem; 264 u32 dest_h;
265 u32 src_x, src_y, dest_h;
266 dma_addr_t scanout_start; 265 dma_addr_t scanout_start;
267 266
268 if (!fb) 267 if (!fb)
269 return; 268 return;
270 269
271 src_x = plane->state->src.x1 >> 16;
272 src_y = plane->state->src.y1 >> 16;
273 dest_h = drm_rect_height(&plane->state->dst); 270 dest_h = drm_rect_height(&plane->state->dst);
274 gem = drm_fb_cma_get_gem_obj(fb, 0); 271 scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
275
276 scanout_start = gem->paddr + fb->offsets[0] +
277 src_y * fb->pitches[0] +
278 src_x * fb->format->cpp[0];
279 272
280 hdlcd = plane->dev->dev_private; 273 hdlcd = plane->dev->dev_private;
281 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); 274 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 345c8357b273..d3da87fbd85a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -297,6 +297,9 @@ static int hdlcd_drm_bind(struct device *dev)
297 if (ret) 297 if (ret)
298 goto err_free; 298 goto err_free;
299 299
300 /* Set the CRTC's port so that the encoder component can find it */
301 hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
302
300 ret = component_bind_all(dev, drm); 303 ret = component_bind_all(dev, drm);
301 if (ret) { 304 if (ret) {
302 DRM_ERROR("Failed to bind all components\n"); 305 DRM_ERROR("Failed to bind all components\n");
@@ -340,11 +343,14 @@ err_register:
340 } 343 }
341err_fbdev: 344err_fbdev:
342 drm_kms_helper_poll_fini(drm); 345 drm_kms_helper_poll_fini(drm);
346 drm_vblank_cleanup(drm);
343err_vblank: 347err_vblank:
344 pm_runtime_disable(drm->dev); 348 pm_runtime_disable(drm->dev);
345err_pm_active: 349err_pm_active:
346 component_unbind_all(dev, drm); 350 component_unbind_all(dev, drm);
347err_unload: 351err_unload:
352 of_node_put(hdlcd->crtc.port);
353 hdlcd->crtc.port = NULL;
348 drm_irq_uninstall(drm); 354 drm_irq_uninstall(drm);
349 of_reserved_mem_device_release(drm->dev); 355 of_reserved_mem_device_release(drm->dev);
350err_free: 356err_free:
@@ -367,6 +373,9 @@ static void hdlcd_drm_unbind(struct device *dev)
367 } 373 }
368 drm_kms_helper_poll_fini(drm); 374 drm_kms_helper_poll_fini(drm);
369 component_unbind_all(dev, drm); 375 component_unbind_all(dev, drm);
376 of_node_put(hdlcd->crtc.port);
377 hdlcd->crtc.port = NULL;
378 drm_vblank_cleanup(drm);
370 pm_runtime_get_sync(drm->dev); 379 pm_runtime_get_sync(drm->dev);
371 drm_irq_uninstall(drm); 380 drm_irq_uninstall(drm);
372 pm_runtime_put_sync(drm->dev); 381 pm_runtime_put_sync(drm->dev);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5cd61aff7857..8072e6e4c62c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1293,21 +1293,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1293 if (!connector) 1293 if (!connector)
1294 return -ENOENT; 1294 return -ENOENT;
1295 1295
1296 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1297 encoder = drm_connector_get_encoder(connector);
1298 if (encoder)
1299 out_resp->encoder_id = encoder->base.id;
1300 else
1301 out_resp->encoder_id = 0;
1302
1303 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
1304 (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
1305 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
1306 &out_resp->count_props);
1307 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1308 if (ret)
1309 goto out_unref;
1310
1311 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) 1296 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
1312 if (connector->encoder_ids[i] != 0) 1297 if (connector->encoder_ids[i] != 0)
1313 encoders_count++; 1298 encoders_count++;
@@ -1320,7 +1305,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1320 if (put_user(connector->encoder_ids[i], 1305 if (put_user(connector->encoder_ids[i],
1321 encoder_ptr + copied)) { 1306 encoder_ptr + copied)) {
1322 ret = -EFAULT; 1307 ret = -EFAULT;
1323 goto out_unref; 1308 goto out;
1324 } 1309 }
1325 copied++; 1310 copied++;
1326 } 1311 }
@@ -1364,15 +1349,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1364 if (copy_to_user(mode_ptr + copied, 1349 if (copy_to_user(mode_ptr + copied,
1365 &u_mode, sizeof(u_mode))) { 1350 &u_mode, sizeof(u_mode))) {
1366 ret = -EFAULT; 1351 ret = -EFAULT;
1352 mutex_unlock(&dev->mode_config.mutex);
1353
1367 goto out; 1354 goto out;
1368 } 1355 }
1369 copied++; 1356 copied++;
1370 } 1357 }
1371 } 1358 }
1372 out_resp->count_modes = mode_count; 1359 out_resp->count_modes = mode_count;
1373out:
1374 mutex_unlock(&dev->mode_config.mutex); 1360 mutex_unlock(&dev->mode_config.mutex);
1375out_unref: 1361
1362 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1363 encoder = drm_connector_get_encoder(connector);
1364 if (encoder)
1365 out_resp->encoder_id = encoder->base.id;
1366 else
1367 out_resp->encoder_id = 0;
1368
1369 /* Only grab properties after probing, to make sure EDID and other
1370 * properties reflect the latest status. */
1371 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
1372 (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
1373 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
1374 &out_resp->count_props);
1375 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1376
1377out:
1376 drm_connector_put(connector); 1378 drm_connector_put(connector);
1377 1379
1378 return ret; 1380 return ret;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 16dccf550412..f8227318dcaf 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -129,7 +129,16 @@ i915-y += i915_vgpu.o
129 129
130# perf code 130# perf code
131i915-y += i915_perf.o \ 131i915-y += i915_perf.o \
132 i915_oa_hsw.o 132 i915_oa_hsw.o \
133 i915_oa_bdw.o \
134 i915_oa_chv.o \
135 i915_oa_sklgt2.o \
136 i915_oa_sklgt3.o \
137 i915_oa_sklgt4.o \
138 i915_oa_bxt.o \
139 i915_oa_kblgt2.o \
140 i915_oa_kblgt3.o \
141 i915_oa_glk.o
133 142
134ifeq ($(CONFIG_DRM_I915_GVT),y) 143ifeq ($(CONFIG_DRM_I915_GVT),y)
135i915-y += intel_gvt.o 144i915-y += intel_gvt.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 44b3159f2fe8..7aeeffd2428b 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -217,9 +217,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
217 217
218 name = ch7xxx_get_id(vendor); 218 name = ch7xxx_get_id(vendor);
219 if (!name) { 219 if (!name) {
220 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " 220 DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
221 "slave %d.\n", 221 vendor, adapter->name, dvo->slave_addr);
222 vendor, adapter->name, dvo->slave_addr);
223 goto out; 222 goto out;
224 } 223 }
225 224
@@ -229,9 +228,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
229 228
230 devid = ch7xxx_get_did(device); 229 devid = ch7xxx_get_did(device);
231 if (!devid) { 230 if (!devid) {
232 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " 231 DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
233 "slave %d.\n", 232 device, adapter->name, dvo->slave_addr);
234 vendor, adapter->name, dvo->slave_addr);
235 goto out; 233 goto out;
236 } 234 }
237 235
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index b123c20e2097..f5486cb94818 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -3,6 +3,6 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
3 interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ 3 interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
4 execlist.o scheduler.o sched_policy.o render.o cmd_parser.o 4 execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
5 5
6ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall 6ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
7i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) 7i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
8obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o 8obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 41b2c3aaa04a..51241de5e7a7 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2414,53 +2414,13 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2414 hash_add(gvt->cmd_table, &e->hlist, e->info->opcode); 2414 hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2415} 2415}
2416 2416
2417#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
2418
2419static void trace_cs_command(struct parser_exec_state *s,
2420 cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
2421{
2422 /* This buffer is used by ftrace to store all commands copied from
2423 * guest gma space. Sometimes commands can cross pages, this should
2424 * not be handled in ftrace logic. So this is just used as a
2425 * 'bounce buffer'
2426 */
2427 u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
2428 int i;
2429 u32 cmd_len = cmd_length(s);
2430 /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
2431 * following two considerations:
2432 * 1) From observation, most common ring commands is not that long.
2433 * But there are execeptions. So it indeed makes sence to observe
2434 * longer commands.
2435 * 2) From the performance and debugging point of view, dumping all
2436 * contents of very commands is not necessary.
2437 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
2438 * future for performance considerations.
2439 */
2440 if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
2441 gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
2442 cmd_len = GVT_MAX_CMD_LENGTH;
2443 }
2444
2445 for (i = 0; i < cmd_len; i++)
2446 cmd_trace_buf[i] = cmd_val(s, i);
2447
2448 trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
2449 cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
2450 cost_pre_cmd_handler, cost_cmd_handler);
2451}
2452
2453/* call the cmd handler, and advance ip */ 2417/* call the cmd handler, and advance ip */
2454static int cmd_parser_exec(struct parser_exec_state *s) 2418static int cmd_parser_exec(struct parser_exec_state *s)
2455{ 2419{
2420 struct intel_vgpu *vgpu = s->vgpu;
2456 struct cmd_info *info; 2421 struct cmd_info *info;
2457 u32 cmd; 2422 u32 cmd;
2458 int ret = 0; 2423 int ret = 0;
2459 cycles_t t0, t1, t2;
2460 struct parser_exec_state s_before_advance_custom;
2461 struct intel_vgpu *vgpu = s->vgpu;
2462
2463 t0 = get_cycles();
2464 2424
2465 cmd = cmd_val(s, 0); 2425 cmd = cmd_val(s, 0);
2466 2426
@@ -2471,13 +2431,10 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2471 return -EINVAL; 2431 return -EINVAL;
2472 } 2432 }
2473 2433
2474 gvt_dbg_cmd("%s\n", info->name);
2475
2476 s->info = info; 2434 s->info = info;
2477 2435
2478 t1 = get_cycles(); 2436 trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
2479 2437 cmd_length(s), s->buf_type);
2480 s_before_advance_custom = *s;
2481 2438
2482 if (info->handler) { 2439 if (info->handler) {
2483 ret = info->handler(s); 2440 ret = info->handler(s);
@@ -2486,9 +2443,6 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2486 return ret; 2443 return ret;
2487 } 2444 }
2488 } 2445 }
2489 t2 = get_cycles();
2490
2491 trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
2492 2446
2493 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2447 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2494 ret = cmd_advance_default(s); 2448 ret = cmd_advance_default(s);
@@ -2522,8 +2476,6 @@ static int command_scan(struct parser_exec_state *s,
2522 gma_tail = rb_start + rb_tail; 2476 gma_tail = rb_start + rb_tail;
2523 gma_bottom = rb_start + rb_len; 2477 gma_bottom = rb_start + rb_len;
2524 2478
2525 gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
2526
2527 while (s->ip_gma != gma_tail) { 2479 while (s->ip_gma != gma_tail) {
2528 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2480 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2529 if (!(s->ip_gma >= rb_start) || 2481 if (!(s->ip_gma >= rb_start) ||
@@ -2552,8 +2504,6 @@ static int command_scan(struct parser_exec_state *s,
2552 } 2504 }
2553 } 2505 }
2554 2506
2555 gvt_dbg_cmd("scan_end\n");
2556
2557 return ret; 2507 return ret;
2558} 2508}
2559 2509
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 24fe04d6307b..700050556242 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -708,53 +708,43 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
708int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) 708int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
709{ 709{
710 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; 710 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
711 struct execlist_ctx_descriptor_format *desc[2], valid_desc[2]; 711 struct execlist_ctx_descriptor_format desc[2];
712 unsigned long valid_desc_bitmap = 0; 712 int i, ret;
713 bool emulate_schedule_in = true;
714 int ret;
715 int i;
716 713
717 memset(valid_desc, 0, sizeof(valid_desc)); 714 desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
715 desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
718 716
719 desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); 717 if (!desc[0].valid) {
720 desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); 718 gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
719 goto inv_desc;
720 }
721 721
722 for (i = 0; i < 2; i++) { 722 for (i = 0; i < ARRAY_SIZE(desc); i++) {
723 if (!desc[i]->valid) 723 if (!desc[i].valid)
724 continue; 724 continue;
725 725 if (!desc[i].privilege_access) {
726 if (!desc[i]->privilege_access) {
727 gvt_vgpu_err("unexpected GGTT elsp submission\n"); 726 gvt_vgpu_err("unexpected GGTT elsp submission\n");
728 return -EINVAL; 727 goto inv_desc;
729 } 728 }
730
731 /* TODO: add another guest context checks here. */
732 set_bit(i, &valid_desc_bitmap);
733 valid_desc[i] = *desc[i];
734 }
735
736 if (!valid_desc_bitmap) {
737 gvt_vgpu_err("no valid desc in a elsp submission\n");
738 return -EINVAL;
739 }
740
741 if (!test_bit(0, (void *)&valid_desc_bitmap) &&
742 test_bit(1, (void *)&valid_desc_bitmap)) {
743 gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
744 return -EINVAL;
745 } 729 }
746 730
747 /* submit workload */ 731 /* submit workload */
748 for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) { 732 for (i = 0; i < ARRAY_SIZE(desc); i++) {
749 ret = submit_context(vgpu, ring_id, &valid_desc[i], 733 if (!desc[i].valid)
750 emulate_schedule_in); 734 continue;
735 ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
751 if (ret) { 736 if (ret) {
752 gvt_vgpu_err("fail to schedule workload\n"); 737 gvt_vgpu_err("failed to submit desc %d\n", i);
753 return ret; 738 return ret;
754 } 739 }
755 emulate_schedule_in = false;
756 } 740 }
741
757 return 0; 742 return 0;
743
744inv_desc:
745 gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
746 desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
747 return -EINVAL;
758} 748}
759 749
760static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) 750static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index dce8d15f706f..5dad9298b2d5 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -102,13 +102,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
102 102
103 p = firmware + h->mmio_offset; 103 p = firmware + h->mmio_offset;
104 104
105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) { 105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
106 int j; 106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
107
108 for (j = 0; j < e->length; j += 4)
109 *(u32 *)(p + e->offset + j) =
110 I915_READ_NOTRACE(_MMIO(e->offset + j));
111 }
112 107
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 108 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 109
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c6f0077f590d..66374dba3b1a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -244,15 +244,19 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
244 return readq(addr); 244 return readq(addr);
245} 245}
246 246
247static void gtt_invalidate(struct drm_i915_private *dev_priv)
248{
249 mmio_hw_access_pre(dev_priv);
250 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
251 mmio_hw_access_post(dev_priv);
252}
253
247static void write_pte64(struct drm_i915_private *dev_priv, 254static void write_pte64(struct drm_i915_private *dev_priv,
248 unsigned long index, u64 pte) 255 unsigned long index, u64 pte)
249{ 256{
250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
251 258
252 writeq(pte, addr); 259 writeq(pte, addr);
253
254 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
255 POSTING_READ(GFX_FLSH_CNTL_GEN6);
256} 260}
257 261
258static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt, 262static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
@@ -1849,6 +1853,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1849 } 1853 }
1850 1854
1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1855 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
1856 gtt_invalidate(gvt->dev_priv);
1852 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1857 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1853 return 0; 1858 return 0;
1854} 1859}
@@ -2301,8 +2306,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2301 u32 num_entries; 2306 u32 num_entries;
2302 struct intel_gvt_gtt_entry e; 2307 struct intel_gvt_gtt_entry e;
2303 2308
2304 intel_runtime_pm_get(dev_priv);
2305
2306 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); 2309 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2307 e.type = GTT_TYPE_GGTT_PTE; 2310 e.type = GTT_TYPE_GGTT_PTE;
2308 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); 2311 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@@ -2318,7 +2321,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2318 for (offset = 0; offset < num_entries; offset++) 2321 for (offset = 0; offset < num_entries; offset++)
2319 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2322 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2320 2323
2321 intel_runtime_pm_put(dev_priv); 2324 gtt_invalidate(dev_priv);
2322} 2325}
2323 2326
2324/** 2327/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 7dea5e5d5567..c27c6838eaca 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -147,7 +147,9 @@ static int gvt_service_thread(void *data)
147 mutex_unlock(&gvt->lock); 147 mutex_unlock(&gvt->lock);
148 } 148 }
149 149
150 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, 150 if (test_bit(INTEL_GVT_REQUEST_SCHED,
151 (void *)&gvt->service_request) ||
152 test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
151 (void *)&gvt->service_request)) { 153 (void *)&gvt->service_request)) {
152 intel_gvt_schedule(gvt); 154 intel_gvt_schedule(gvt);
153 } 155 }
@@ -244,7 +246,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
244 gvt_dbg_core("init gvt device\n"); 246 gvt_dbg_core("init gvt device\n");
245 247
246 idr_init(&gvt->vgpu_idr); 248 idr_init(&gvt->vgpu_idr);
247 249 spin_lock_init(&gvt->scheduler.mmio_context_lock);
248 mutex_init(&gvt->lock); 250 mutex_init(&gvt->lock);
249 gvt->dev_priv = dev_priv; 251 gvt->dev_priv = dev_priv;
250 252
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 930732e5c780..3a74e79eac2f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -165,7 +165,6 @@ struct intel_vgpu {
165 struct list_head workload_q_head[I915_NUM_ENGINES]; 165 struct list_head workload_q_head[I915_NUM_ENGINES];
166 struct kmem_cache *workloads; 166 struct kmem_cache *workloads;
167 atomic_t running_workload_num; 167 atomic_t running_workload_num;
168 ktime_t last_ctx_submit_time;
169 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 168 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
170 struct i915_gem_context *shadow_ctx; 169 struct i915_gem_context *shadow_ctx;
171 170
@@ -196,11 +195,27 @@ struct intel_gvt_fence {
196 unsigned long vgpu_allocated_fence_num; 195 unsigned long vgpu_allocated_fence_num;
197}; 196};
198 197
199#define INTEL_GVT_MMIO_HASH_BITS 9 198#define INTEL_GVT_MMIO_HASH_BITS 11
200 199
201struct intel_gvt_mmio { 200struct intel_gvt_mmio {
202 u32 *mmio_attribute; 201 u8 *mmio_attribute;
202/* Register contains RO bits */
203#define F_RO (1 << 0)
204/* Register contains graphics address */
205#define F_GMADR (1 << 1)
206/* Mode mask registers with high 16 bits as the mask bits */
207#define F_MODE_MASK (1 << 2)
208/* This reg can be accessed by GPU commands */
209#define F_CMD_ACCESS (1 << 3)
210/* This reg has been accessed by a VM */
211#define F_ACCESSED (1 << 4)
212/* This reg has been accessed through GPU commands */
213#define F_CMD_ACCESSED (1 << 5)
214/* This reg could be accessed by unaligned address */
215#define F_UNALIGN (1 << 6)
216
203 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 217 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
218 unsigned int num_tracked_mmio;
204}; 219};
205 220
206struct intel_gvt_firmware { 221struct intel_gvt_firmware {
@@ -257,7 +272,12 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
257 272
258enum { 273enum {
259 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, 274 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
275
276 /* Scheduling trigger by timer */
260 INTEL_GVT_REQUEST_SCHED = 1, 277 INTEL_GVT_REQUEST_SCHED = 1,
278
279 /* Scheduling trigger by event */
280 INTEL_GVT_REQUEST_EVENT_SCHED = 2,
261}; 281};
262 282
263static inline void intel_gvt_request_service(struct intel_gvt *gvt, 283static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -473,6 +493,80 @@ enum {
473 GVT_FAILSAFE_INSUFFICIENT_RESOURCE, 493 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
474}; 494};
475 495
496static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
497{
498 intel_runtime_pm_get(dev_priv);
499}
500
501static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
502{
503 intel_runtime_pm_put(dev_priv);
504}
505
506/**
507 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
508 * @gvt: a GVT device
509 * @offset: register offset
510 *
511 */
512static inline void intel_gvt_mmio_set_accessed(
513 struct intel_gvt *gvt, unsigned int offset)
514{
515 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
516}
517
518/**
519 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
520 * @gvt: a GVT device
521 * @offset: register offset
522 *
523 */
524static inline bool intel_gvt_mmio_is_cmd_access(
525 struct intel_gvt *gvt, unsigned int offset)
526{
527 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
528}
529
530/**
531 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
532 * @gvt: a GVT device
533 * @offset: register offset
534 *
535 */
536static inline bool intel_gvt_mmio_is_unalign(
537 struct intel_gvt *gvt, unsigned int offset)
538{
539 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
540}
541
542/**
543 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
544 * @gvt: a GVT device
545 * @offset: register offset
546 *
547 */
548static inline void intel_gvt_mmio_set_cmd_accessed(
549 struct intel_gvt *gvt, unsigned int offset)
550{
551 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
552}
553
554/**
555 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
556 * @gvt: a GVT device
557 * @offset: register offset
558 *
559 * Returns:
560 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
561 *
562 */
563static inline bool intel_gvt_mmio_has_mode_mask(
564 struct intel_gvt *gvt, unsigned int offset)
565{
566 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
567}
568
569#include "trace.h"
476#include "mpt.h" 570#include "mpt.h"
477 571
478#endif 572#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0ffd69654592..1414d7e6148d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -47,21 +47,6 @@
47#define PCH_PP_OFF_DELAYS _MMIO(0xc720c) 47#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
48#define PCH_PP_DIVISOR _MMIO(0xc7210) 48#define PCH_PP_DIVISOR _MMIO(0xc7210)
49 49
50/* Register contains RO bits */
51#define F_RO (1 << 0)
52/* Register contains graphics address */
53#define F_GMADR (1 << 1)
54/* Mode mask registers with high 16 bits as the mask bits */
55#define F_MODE_MASK (1 << 2)
56/* This reg can be accessed by GPU commands */
57#define F_CMD_ACCESS (1 << 3)
58/* This reg has been accessed by a VM */
59#define F_ACCESSED (1 << 4)
60/* This reg has been accessed through GPU commands */
61#define F_CMD_ACCESSED (1 << 5)
62/* This reg could be accessed by unaligned address */
63#define F_UNALIGN (1 << 6)
64
65unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) 50unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
66{ 51{
67 if (IS_BROADWELL(gvt->dev_priv)) 52 if (IS_BROADWELL(gvt->dev_priv))
@@ -92,11 +77,22 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
92 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); 77 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
93} 78}
94 79
80static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
81 unsigned int offset)
82{
83 struct intel_gvt_mmio_info *e;
84
85 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
86 if (e->offset == offset)
87 return e;
88 }
89 return NULL;
90}
91
95static int new_mmio_info(struct intel_gvt *gvt, 92static int new_mmio_info(struct intel_gvt *gvt,
96 u32 offset, u32 flags, u32 size, 93 u32 offset, u8 flags, u32 size,
97 u32 addr_mask, u32 ro_mask, u32 device, 94 u32 addr_mask, u32 ro_mask, u32 device,
98 int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), 95 gvt_mmio_func read, gvt_mmio_func write)
99 int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
100{ 96{
101 struct intel_gvt_mmio_info *info, *p; 97 struct intel_gvt_mmio_info *info, *p;
102 u32 start, end, i; 98 u32 start, end, i;
@@ -116,13 +112,11 @@ static int new_mmio_info(struct intel_gvt *gvt,
116 return -ENOMEM; 112 return -ENOMEM;
117 113
118 info->offset = i; 114 info->offset = i;
119 p = intel_gvt_find_mmio_info(gvt, info->offset); 115 p = find_mmio_info(gvt, info->offset);
120 if (p) 116 if (p)
121 gvt_err("dup mmio definition offset %x\n", 117 gvt_err("dup mmio definition offset %x\n",
122 info->offset); 118 info->offset);
123 info->size = size; 119
124 info->length = (i + 4) < end ? 4 : (end - i);
125 info->addr_mask = addr_mask;
126 info->ro_mask = ro_mask; 120 info->ro_mask = ro_mask;
127 info->device = device; 121 info->device = device;
128 info->read = read ? read : intel_vgpu_default_mmio_read; 122 info->read = read ? read : intel_vgpu_default_mmio_read;
@@ -130,6 +124,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
130 gvt->mmio.mmio_attribute[info->offset / 4] = flags; 124 gvt->mmio.mmio_attribute[info->offset / 4] = flags;
131 INIT_HLIST_NODE(&info->node); 125 INIT_HLIST_NODE(&info->node);
132 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); 126 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
127 gvt->mmio.num_tracked_mmio++;
133 } 128 }
134 return 0; 129 return 0;
135} 130}
@@ -209,6 +204,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
209static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 204static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
210 void *p_data, unsigned int bytes) 205 void *p_data, unsigned int bytes)
211{ 206{
207 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
212 unsigned int fence_num = offset_to_fence_num(off); 208 unsigned int fence_num = offset_to_fence_num(off);
213 int ret; 209 int ret;
214 210
@@ -217,8 +213,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
217 return ret; 213 return ret;
218 write_vreg(vgpu, off, p_data, bytes); 214 write_vreg(vgpu, off, p_data, bytes);
219 215
216 mmio_hw_access_pre(dev_priv);
220 intel_vgpu_write_fence(vgpu, fence_num, 217 intel_vgpu_write_fence(vgpu, fence_num,
221 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); 218 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
219 mmio_hw_access_post(dev_priv);
222 return 0; 220 return 0;
223} 221}
224 222
@@ -300,6 +298,9 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
300 298
301 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); 299 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
302 300
301 /* sw will wait for the device to ack the reset request */
302 vgpu_vreg(vgpu, offset) = 0;
303
303 return 0; 304 return 0;
304} 305}
305 306
@@ -1265,7 +1266,10 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1265 } 1266 }
1266 write_vreg(vgpu, offset, p_data, bytes); 1267 write_vreg(vgpu, offset, p_data, bytes);
1267 /* TRTTE is not per-context */ 1268 /* TRTTE is not per-context */
1269
1270 mmio_hw_access_pre(dev_priv);
1268 I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); 1271 I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
1272 mmio_hw_access_post(dev_priv);
1269 1273
1270 return 0; 1274 return 0;
1271} 1275}
@@ -1278,7 +1282,9 @@ static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1278 1282
1279 if (val & 1) { 1283 if (val & 1) {
1280 /* unblock hw logic */ 1284 /* unblock hw logic */
1285 mmio_hw_access_pre(dev_priv);
1281 I915_WRITE(_MMIO(offset), val); 1286 I915_WRITE(_MMIO(offset), val);
1287 mmio_hw_access_post(dev_priv);
1282 } 1288 }
1283 write_vreg(vgpu, offset, p_data, bytes); 1289 write_vreg(vgpu, offset, p_data, bytes);
1284 return 0; 1290 return 0;
@@ -1415,7 +1421,20 @@ static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
1415{ 1421{
1416 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1422 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1417 1423
1424 mmio_hw_access_pre(dev_priv);
1418 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); 1425 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1426 mmio_hw_access_post(dev_priv);
1427 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1428}
1429
1430static int instdone_mmio_read(struct intel_vgpu *vgpu,
1431 unsigned int offset, void *p_data, unsigned int bytes)
1432{
1433 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1434
1435 mmio_hw_access_pre(dev_priv);
1436 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1437 mmio_hw_access_post(dev_priv);
1419 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 1438 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1420} 1439}
1421 1440
@@ -1434,7 +1453,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1434 1453
1435 execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; 1454 execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
1436 if (execlist->elsp_dwords.index == 3) { 1455 if (execlist->elsp_dwords.index == 3) {
1437 vgpu->last_ctx_submit_time = ktime_get();
1438 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1456 ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1439 if(ret) 1457 if(ret)
1440 gvt_vgpu_err("fail submit workload on ring %d\n", 1458 gvt_vgpu_err("fail submit workload on ring %d\n",
@@ -1603,6 +1621,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1603 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); 1621 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1604#undef RING_REG 1622#undef RING_REG
1605 1623
1624#define RING_REG(base) (base + 0x6c)
1625 MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
1626 MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
1627#undef RING_REG
1628 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
1629
1606 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); 1630 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
1607 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); 1631 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
1608 MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL); 1632 MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
@@ -1779,10 +1803,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1779 MMIO_D(SPRSCALE(PIPE_C), D_ALL); 1803 MMIO_D(SPRSCALE(PIPE_C), D_ALL);
1780 MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); 1804 MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
1781 1805
1782 MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
1783 MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
1784 MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
1785
1786 MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); 1806 MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
1787 MMIO_D(HBLANK(TRANSCODER_A), D_ALL); 1807 MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
1788 MMIO_D(HSYNC(TRANSCODER_A), D_ALL); 1808 MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
@@ -2187,7 +2207,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2187 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); 2207 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2188 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); 2208 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2189 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); 2209 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2190 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); 2210 MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2191 MMIO_D(ECOBUS, D_ALL); 2211 MMIO_D(ECOBUS, D_ALL);
2192 MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL); 2212 MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2193 MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL); 2213 MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
@@ -2219,22 +2239,19 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2219 MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); 2239 MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
2220 MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); 2240 MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
2221 MMIO_D(GEN6_PMINTRMSK, D_ALL); 2241 MMIO_D(GEN6_PMINTRMSK, D_ALL);
2222 MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write); 2242 MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
2223 MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write); 2243 MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
2224 MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write); 2244 MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
2225 MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write); 2245 MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
2226 MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write); 2246 MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2227 MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write); 2247 MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2228 2248
2229 MMIO_D(RSTDBYCTL, D_ALL); 2249 MMIO_D(RSTDBYCTL, D_ALL);
2230 2250
2231 MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write); 2251 MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2232 MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write); 2252 MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2233 MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
2234 MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write); 2253 MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2235 2254
2236 MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
2237
2238 MMIO_D(TILECTL, D_ALL); 2255 MMIO_D(TILECTL, D_ALL);
2239 2256
2240 MMIO_D(GEN6_UCGCTL1, D_ALL); 2257 MMIO_D(GEN6_UCGCTL1, D_ALL);
@@ -2242,7 +2259,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2242 2259
2243 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); 2260 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
2244 2261
2245 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
2246 MMIO_D(GEN6_PCODE_DATA, D_ALL); 2262 MMIO_D(GEN6_PCODE_DATA, D_ALL);
2247 MMIO_D(0x13812c, D_ALL); 2263 MMIO_D(0x13812c, D_ALL);
2248 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); 2264 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2321,14 +2337,13 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2321 MMIO_D(0x1a054, D_ALL); 2337 MMIO_D(0x1a054, D_ALL);
2322 2338
2323 MMIO_D(0x44070, D_ALL); 2339 MMIO_D(0x44070, D_ALL);
2324 MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL); 2340 MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2325 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2341 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2326 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2342 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2327 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2343 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2328 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2344 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2329 2345
2330 MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL); 2346 MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2331 MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
2332 MMIO_D(0x2b00, D_BDW_PLUS); 2347 MMIO_D(0x2b00, D_BDW_PLUS);
2333 MMIO_D(0x2360, D_BDW_PLUS); 2348 MMIO_D(0x2360, D_BDW_PLUS);
2334 MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2349 MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -2766,7 +2781,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2766 MMIO_D(0x72380, D_SKL_PLUS); 2781 MMIO_D(0x72380, D_SKL_PLUS);
2767 MMIO_D(0x7039c, D_SKL_PLUS); 2782 MMIO_D(0x7039c, D_SKL_PLUS);
2768 2783
2769 MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2770 MMIO_D(0x8f074, D_SKL | D_KBL); 2784 MMIO_D(0x8f074, D_SKL | D_KBL);
2771 MMIO_D(0x8f004, D_SKL | D_KBL); 2785 MMIO_D(0x8f004, D_SKL | D_KBL);
2772 MMIO_D(0x8f034, D_SKL | D_KBL); 2786 MMIO_D(0x8f034, D_SKL | D_KBL);
@@ -2840,26 +2854,36 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2840 return 0; 2854 return 0;
2841} 2855}
2842 2856
2843/** 2857/* Special MMIO blocks. */
2844 * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset 2858static struct gvt_mmio_block {
2845 * @gvt: GVT device 2859 unsigned int device;
2846 * @offset: register offset 2860 i915_reg_t offset;
2847 * 2861 unsigned int size;
2848 * This function is used to find the MMIO information entry from hash table 2862 gvt_mmio_func read;
2849 * 2863 gvt_mmio_func write;
2850 * Returns: 2864} gvt_mmio_blocks[] = {
2851 * pointer to MMIO information entry, NULL if not exists 2865 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2852 */ 2866 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2853struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, 2867 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2854 unsigned int offset) 2868 pvinfo_mmio_read, pvinfo_mmio_write},
2855{ 2869 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2856 struct intel_gvt_mmio_info *e; 2870 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2871 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2872};
2857 2873
2858 WARN_ON(!IS_ALIGNED(offset, 4)); 2874static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2875 unsigned int offset)
2876{
2877 unsigned long device = intel_gvt_get_device_type(gvt);
2878 struct gvt_mmio_block *block = gvt_mmio_blocks;
2879 int i;
2859 2880
2860 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) { 2881 for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
2861 if (e->offset == offset) 2882 if (!(device & block->device))
2862 return e; 2883 continue;
2884 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
2885 offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
2886 return block;
2863 } 2887 }
2864 return NULL; 2888 return NULL;
2865} 2889}
@@ -2899,9 +2923,10 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2899{ 2923{
2900 struct intel_gvt_device_info *info = &gvt->device_info; 2924 struct intel_gvt_device_info *info = &gvt->device_info;
2901 struct drm_i915_private *dev_priv = gvt->dev_priv; 2925 struct drm_i915_private *dev_priv = gvt->dev_priv;
2926 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2902 int ret; 2927 int ret;
2903 2928
2904 gvt->mmio.mmio_attribute = vzalloc(info->mmio_size); 2929 gvt->mmio.mmio_attribute = vzalloc(size);
2905 if (!gvt->mmio.mmio_attribute) 2930 if (!gvt->mmio.mmio_attribute)
2906 return -ENOMEM; 2931 return -ENOMEM;
2907 2932
@@ -2922,77 +2947,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2922 if (ret) 2947 if (ret)
2923 goto err; 2948 goto err;
2924 } 2949 }
2950
2951 gvt_dbg_mmio("traced %u virtual mmio registers\n",
2952 gvt->mmio.num_tracked_mmio);
2925 return 0; 2953 return 0;
2926err: 2954err:
2927 intel_gvt_clean_mmio_info(gvt); 2955 intel_gvt_clean_mmio_info(gvt);
2928 return ret; 2956 return ret;
2929} 2957}
2930 2958
2931/**
2932 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
2933 * @gvt: a GVT device
2934 * @offset: register offset
2935 *
2936 */
2937void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
2938{
2939 gvt->mmio.mmio_attribute[offset >> 2] |=
2940 F_ACCESSED;
2941}
2942
2943/**
2944 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
2945 * @gvt: a GVT device
2946 * @offset: register offset
2947 *
2948 */
2949bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
2950 unsigned int offset)
2951{
2952 return gvt->mmio.mmio_attribute[offset >> 2] &
2953 F_CMD_ACCESS;
2954}
2955
2956/**
2957 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
2958 * @gvt: a GVT device
2959 * @offset: register offset
2960 *
2961 */
2962bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
2963 unsigned int offset)
2964{
2965 return gvt->mmio.mmio_attribute[offset >> 2] &
2966 F_UNALIGN;
2967}
2968
2969/**
2970 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
2971 * @gvt: a GVT device
2972 * @offset: register offset
2973 *
2974 */
2975void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
2976 unsigned int offset)
2977{
2978 gvt->mmio.mmio_attribute[offset >> 2] |=
2979 F_CMD_ACCESSED;
2980}
2981
2982/**
2983 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
2984 * @gvt: a GVT device
2985 * @offset: register offset
2986 *
2987 * Returns:
2988 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
2989 *
2990 */
2991bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
2992{
2993 return gvt->mmio.mmio_attribute[offset >> 2] &
2994 F_MODE_MASK;
2995}
2996 2959
2997/** 2960/**
2998 * intel_vgpu_default_mmio_read - default MMIO read handler 2961 * intel_vgpu_default_mmio_read - default MMIO read handler
@@ -3044,3 +3007,91 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3044{ 3007{
3045 return in_whitelist(offset); 3008 return in_whitelist(offset);
3046} 3009}
3010
3011/**
3012 * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3013 * @vgpu: a vGPU
3014 * @offset: register offset
3015 * @pdata: data buffer
3016 * @bytes: data length
3017 *
3018 * Returns:
3019 * Zero on success, negative error code if failed.
3020 */
3021int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3022 void *pdata, unsigned int bytes, bool is_read)
3023{
3024 struct intel_gvt *gvt = vgpu->gvt;
3025 struct intel_gvt_mmio_info *mmio_info;
3026 struct gvt_mmio_block *mmio_block;
3027 gvt_mmio_func func;
3028 int ret;
3029
3030 if (WARN_ON(bytes > 4))
3031 return -EINVAL;
3032
3033 /*
3034 * Handle special MMIO blocks.
3035 */
3036 mmio_block = find_mmio_block(gvt, offset);
3037 if (mmio_block) {
3038 func = is_read ? mmio_block->read : mmio_block->write;
3039 if (func)
3040 return func(vgpu, offset, pdata, bytes);
3041 goto default_rw;
3042 }
3043
3044 /*
3045 * Normal tracked MMIOs.
3046 */
3047 mmio_info = find_mmio_info(gvt, offset);
3048 if (!mmio_info) {
3049 if (!vgpu->mmio.disable_warn_untrack)
3050 gvt_vgpu_err("untracked MMIO %08x len %d\n",
3051 offset, bytes);
3052 goto default_rw;
3053 }
3054
3055 if (is_read)
3056 return mmio_info->read(vgpu, offset, pdata, bytes);
3057 else {
3058 u64 ro_mask = mmio_info->ro_mask;
3059 u32 old_vreg = 0, old_sreg = 0;
3060 u64 data = 0;
3061
3062 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3063 old_vreg = vgpu_vreg(vgpu, offset);
3064 old_sreg = vgpu_sreg(vgpu, offset);
3065 }
3066
3067 if (likely(!ro_mask))
3068 ret = mmio_info->write(vgpu, offset, pdata, bytes);
3069 else if (!~ro_mask) {
3070 gvt_vgpu_err("try to write RO reg %x\n", offset);
3071 return 0;
3072 } else {
3073 /* keep the RO bits in the virtual register */
3074 memcpy(&data, pdata, bytes);
3075 data &= ~ro_mask;
3076 data |= vgpu_vreg(vgpu, offset) & ro_mask;
3077 ret = mmio_info->write(vgpu, offset, &data, bytes);
3078 }
3079
3080 /* higher 16bits of mode ctl regs are mask bits for change */
3081 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3082 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3083
3084 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3085 | (vgpu_vreg(vgpu, offset) & mask);
3086 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
3087 | (vgpu_sreg(vgpu, offset) & mask);
3088 }
3089 }
3090
3091 return ret;
3092
3093default_rw:
3094 return is_read ?
3095 intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3096 intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3097}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 9d6812f0957f..7a041b368f68 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -31,6 +31,7 @@
31 31
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "gvt.h" 33#include "gvt.h"
34#include "trace.h"
34 35
35/* common offset among interrupt control registers */ 36/* common offset among interrupt control registers */
36#define regbase_to_isr(base) (base) 37#define regbase_to_isr(base) (base)
@@ -178,8 +179,8 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
178 struct intel_gvt_irq_ops *ops = gvt->irq.ops; 179 struct intel_gvt_irq_ops *ops = gvt->irq.ops;
179 u32 imr = *(u32 *)p_data; 180 u32 imr = *(u32 *)p_data;
180 181
181 gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n", 182 trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg),
182 reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr); 183 (vgpu_vreg(vgpu, reg) ^ imr));
183 184
184 vgpu_vreg(vgpu, reg) = imr; 185 vgpu_vreg(vgpu, reg) = imr;
185 186
@@ -209,8 +210,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
209 u32 ier = *(u32 *)p_data; 210 u32 ier = *(u32 *)p_data;
210 u32 virtual_ier = vgpu_vreg(vgpu, reg); 211 u32 virtual_ier = vgpu_vreg(vgpu, reg);
211 212
212 gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n", 213 trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier,
213 reg, ier, virtual_ier, virtual_ier ^ ier); 214 (virtual_ier ^ ier));
214 215
215 /* 216 /*
216 * GEN8_MASTER_IRQ is a special irq register, 217 * GEN8_MASTER_IRQ is a special irq register,
@@ -248,8 +249,8 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
248 struct intel_gvt_irq_info *info; 249 struct intel_gvt_irq_info *info;
249 u32 ier = *(u32 *)p_data; 250 u32 ier = *(u32 *)p_data;
250 251
251 gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n", 252 trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg),
252 reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier); 253 (vgpu_vreg(vgpu, reg) ^ ier));
253 254
254 vgpu_vreg(vgpu, reg) = ier; 255 vgpu_vreg(vgpu, reg) = ier;
255 256
@@ -285,8 +286,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
285 iir_to_regbase(reg)); 286 iir_to_regbase(reg));
286 u32 iir = *(u32 *)p_data; 287 u32 iir = *(u32 *)p_data;
287 288
288 gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n", 289 trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
289 reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir); 290 (vgpu_vreg(vgpu, reg) ^ iir));
290 291
291 if (WARN_ON(!info)) 292 if (WARN_ON(!info))
292 return -EINVAL; 293 return -EINVAL;
@@ -411,8 +412,7 @@ static void propagate_event(struct intel_gvt_irq *irq,
411 412
412 if (!test_bit(bit, (void *)&vgpu_vreg(vgpu, 413 if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
413 regbase_to_imr(reg_base)))) { 414 regbase_to_imr(reg_base)))) {
414 gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n", 415 trace_propagate_event(vgpu->id, irq_name[event], bit);
415 bit, irq_name[event], vgpu->id);
416 set_bit(bit, (void *)&vgpu_vreg(vgpu, 416 set_bit(bit, (void *)&vgpu_vreg(vgpu,
417 regbase_to_iir(reg_base))); 417 regbase_to_iir(reg_base)));
418 } 418 }
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 1ba3bdb09341..980ec8906b1e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -123,7 +123,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
123 void *p_data, unsigned int bytes) 123 void *p_data, unsigned int bytes)
124{ 124{
125 struct intel_gvt *gvt = vgpu->gvt; 125 struct intel_gvt *gvt = vgpu->gvt;
126 struct intel_gvt_mmio_info *mmio;
127 unsigned int offset = 0; 126 unsigned int offset = 0;
128 int ret = -EINVAL; 127 int ret = -EINVAL;
129 128
@@ -187,32 +186,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
187 goto err; 186 goto err;
188 } 187 }
189 188
190 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 189 ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
191 if (mmio) { 190 if (ret < 0)
192 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
193 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
194 goto err;
195 if (WARN_ON(mmio->offset != offset))
196 goto err;
197 }
198 ret = mmio->read(vgpu, offset, p_data, bytes);
199 } else {
200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
201
202 if (!vgpu->mmio.disable_warn_untrack) {
203 gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
204 offset, bytes, *(u32 *)p_data);
205
206 if (offset == 0x206c) {
207 gvt_vgpu_err("------------------------------------------\n");
208 gvt_vgpu_err("likely triggers a gfx reset\n");
209 gvt_vgpu_err("------------------------------------------\n");
210 vgpu->mmio.disable_warn_untrack = true;
211 }
212 }
213 }
214
215 if (ret)
216 goto err; 191 goto err;
217 192
218 intel_gvt_mmio_set_accessed(gvt, offset); 193 intel_gvt_mmio_set_accessed(gvt, offset);
@@ -239,9 +214,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
239 void *p_data, unsigned int bytes) 214 void *p_data, unsigned int bytes)
240{ 215{
241 struct intel_gvt *gvt = vgpu->gvt; 216 struct intel_gvt *gvt = vgpu->gvt;
242 struct intel_gvt_mmio_info *mmio;
243 unsigned int offset = 0; 217 unsigned int offset = 0;
244 u32 old_vreg = 0, old_sreg = 0;
245 int ret = -EINVAL; 218 int ret = -EINVAL;
246 219
247 if (vgpu->failsafe) { 220 if (vgpu->failsafe) {
@@ -296,66 +269,10 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
296 return ret; 269 return ret;
297 } 270 }
298 271
299 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 272 ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
300 if (!mmio && !vgpu->mmio.disable_warn_untrack) 273 if (ret < 0)
301 gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
302 vgpu->id, offset, bytes, *(u32 *)p_data);
303
304 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
305 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
306 goto err;
307 }
308
309 if (mmio) {
310 u64 ro_mask = mmio->ro_mask;
311
312 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
313 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
314 goto err;
315 if (WARN_ON(mmio->offset != offset))
316 goto err;
317 }
318
319 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
320 old_vreg = vgpu_vreg(vgpu, offset);
321 old_sreg = vgpu_sreg(vgpu, offset);
322 }
323
324 if (!ro_mask) {
325 ret = mmio->write(vgpu, offset, p_data, bytes);
326 } else {
327 /* Protect RO bits like HW */
328 u64 data = 0;
329
330 /* all register bits are RO. */
331 if (ro_mask == ~(u64)0) {
332 gvt_vgpu_err("try to write RO reg %x\n",
333 offset);
334 ret = 0;
335 goto out;
336 }
337 /* keep the RO bits in the virtual register */
338 memcpy(&data, p_data, bytes);
339 data &= ~mmio->ro_mask;
340 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
341 ret = mmio->write(vgpu, offset, &data, bytes);
342 }
343
344 /* higher 16bits of mode ctl regs are mask bits for change */
345 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
346 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
347
348 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
349 | (vgpu_vreg(vgpu, offset) & mask);
350 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
351 | (vgpu_sreg(vgpu, offset) & mask);
352 }
353 } else
354 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
355 bytes);
356 if (ret)
357 goto err; 274 goto err;
358out: 275
359 intel_gvt_mmio_set_accessed(gvt, offset); 276 intel_gvt_mmio_set_accessed(gvt, offset);
360 mutex_unlock(&gvt->lock); 277 mutex_unlock(&gvt->lock);
361 return 0; 278 return 0;
@@ -372,20 +289,32 @@ err:
372 * @vgpu: a vGPU 289 * @vgpu: a vGPU
373 * 290 *
374 */ 291 */
375void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) 292void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
376{ 293{
377 struct intel_gvt *gvt = vgpu->gvt; 294 struct intel_gvt *gvt = vgpu->gvt;
378 const struct intel_gvt_device_info *info = &gvt->device_info; 295 const struct intel_gvt_device_info *info = &gvt->device_info;
296 void *mmio = gvt->firmware.mmio;
297
298 if (dmlr) {
299 memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
300 memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
379 301
380 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 302 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
381 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
382 303
383 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 304 /* set the bit 0:2(Core C-State ) to C0 */
305 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
384 306
385 /* set the bit 0:2(Core C-State ) to C0 */ 307 vgpu->mmio.disable_warn_untrack = false;
386 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 308 } else {
309#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
310 /* only reset the engine related, so starting with 0x44200
311 * interrupt include DE,display mmio related will not be
312 * touched
313 */
314 memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
315 memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
316 }
387 317
388 vgpu->mmio.disable_warn_untrack = false;
389} 318}
390 319
391/** 320/**
@@ -405,7 +334,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
405 334
406 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 335 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
407 336
408 intel_vgpu_reset_mmio(vgpu); 337 intel_vgpu_reset_mmio(vgpu, true);
409 338
410 return 0; 339 return 0;
411} 340}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 7edd66f38ef9..32cd64ddad26 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -39,36 +39,28 @@
39struct intel_gvt; 39struct intel_gvt;
40struct intel_vgpu; 40struct intel_vgpu;
41 41
42#define D_SNB (1 << 0) 42#define D_BDW (1 << 0)
43#define D_IVB (1 << 1) 43#define D_SKL (1 << 1)
44#define D_HSW (1 << 2) 44#define D_KBL (1 << 2)
45#define D_BDW (1 << 3)
46#define D_SKL (1 << 4)
47#define D_KBL (1 << 5)
48 45
49#define D_GEN9PLUS (D_SKL | D_KBL) 46#define D_GEN9PLUS (D_SKL | D_KBL)
50#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) 47#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
51#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
52#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
53 48
54#define D_SKL_PLUS (D_SKL | D_KBL) 49#define D_SKL_PLUS (D_SKL | D_KBL)
55#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) 50#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
56#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
57#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
58 51
59#define D_PRE_BDW (D_SNB | D_IVB | D_HSW) 52#define D_PRE_SKL (D_BDW)
60#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW) 53#define D_ALL (D_BDW | D_SKL | D_KBL)
61#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) 54
55typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
56 unsigned int);
62 57
63struct intel_gvt_mmio_info { 58struct intel_gvt_mmio_info {
64 u32 offset; 59 u32 offset;
65 u32 size;
66 u32 length;
67 u32 addr_mask;
68 u64 ro_mask; 60 u64 ro_mask;
69 u32 device; 61 u32 device;
70 int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int); 62 gvt_mmio_func read;
71 int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int); 63 gvt_mmio_func write;
72 u32 addr_range; 64 u32 addr_range;
73 struct hlist_node node; 65 struct hlist_node node;
74}; 66};
@@ -79,8 +71,6 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
79int intel_gvt_setup_mmio_info(struct intel_gvt *gvt); 71int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
80void intel_gvt_clean_mmio_info(struct intel_gvt *gvt); 72void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
81 73
82struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
83 unsigned int offset);
84#define INTEL_GVT_MMIO_OFFSET(reg) ({ \ 74#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
85 typeof(reg) __reg = reg; \ 75 typeof(reg) __reg = reg; \
86 u32 *offset = (u32 *)&__reg; \ 76 u32 *offset = (u32 *)&__reg; \
@@ -88,7 +78,7 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
88}) 78})
89 79
90int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 80int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
91void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); 81void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
92void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 82void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
93 83
94int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 84int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
@@ -97,13 +87,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
97 void *p_data, unsigned int bytes); 87 void *p_data, unsigned int bytes);
98int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, 88int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
99 void *p_data, unsigned int bytes); 89 void *p_data, unsigned int bytes);
100bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt, 90
101 unsigned int offset);
102bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
103void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
104void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
105 unsigned int offset);
106bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
107int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 91int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
108 void *p_data, unsigned int bytes); 92 void *p_data, unsigned int bytes);
109int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 93int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
@@ -111,4 +95,8 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
111 95
112bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, 96bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
113 unsigned int offset); 97 unsigned int offset);
98
99int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
100 void *pdata, unsigned int bytes, bool is_read);
101
114#endif 102#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 419353624c5a..f0e5487e6688 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -133,8 +133,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
133 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) 133 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
134 return -EINVAL; 134 return -EINVAL;
135 135
136 gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr, 136 trace_inject_msi(vgpu->id, addr, data);
137 data);
138 137
139 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data); 138 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
140 if (ret) 139 if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index a5e11d89df2f..504e57c3bc23 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -35,6 +35,7 @@
35 35
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "gvt.h" 37#include "gvt.h"
38#include "trace.h"
38 39
39struct render_mmio { 40struct render_mmio {
40 int ring_id; 41 int ring_id;
@@ -260,7 +261,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
260 261
261#define CTX_CONTEXT_CONTROL_VAL 0x03 262#define CTX_CONTEXT_CONTROL_VAL 0x03
262 263
263void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) 264/* Switch ring mmio values (context) from host to a vgpu. */
265static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
264{ 266{
265 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 267 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
266 struct render_mmio *mmio; 268 struct render_mmio *mmio;
@@ -305,14 +307,15 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
305 I915_WRITE(mmio->reg, v); 307 I915_WRITE(mmio->reg, v);
306 POSTING_READ(mmio->reg); 308 POSTING_READ(mmio->reg);
307 309
308 gvt_dbg_render("load reg %x old %x new %x\n", 310 trace_render_mmio(vgpu->id, "load",
309 i915_mmio_reg_offset(mmio->reg), 311 i915_mmio_reg_offset(mmio->reg),
310 mmio->value, v); 312 mmio->value, v);
311 } 313 }
312 handle_tlb_pending_event(vgpu, ring_id); 314 handle_tlb_pending_event(vgpu, ring_id);
313} 315}
314 316
315void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id) 317/* Switch ring mmio values (context) from vgpu to host. */
318static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
316{ 319{
317 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 320 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
318 struct render_mmio *mmio; 321 struct render_mmio *mmio;
@@ -346,8 +349,37 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
346 I915_WRITE(mmio->reg, v); 349 I915_WRITE(mmio->reg, v);
347 POSTING_READ(mmio->reg); 350 POSTING_READ(mmio->reg);
348 351
349 gvt_dbg_render("restore reg %x old %x new %x\n", 352 trace_render_mmio(vgpu->id, "restore",
350 i915_mmio_reg_offset(mmio->reg), 353 i915_mmio_reg_offset(mmio->reg),
351 mmio->value, v); 354 mmio->value, v);
352 } 355 }
353} 356}
357
358/**
359 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
360 * @pre: the last vGPU that own the engine
361 * @next: the vGPU to switch to
362 * @ring_id: specify the engine
363 *
364 * If pre is null indicates that host own the engine. If next is null
365 * indicates that we are switching to host workload.
366 */
367void intel_gvt_switch_mmio(struct intel_vgpu *pre,
368 struct intel_vgpu *next, int ring_id)
369{
370 if (WARN_ON(!pre && !next))
371 return;
372
373 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
374 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
375
376 /**
377 * TODO: Optimize for vGPU to vGPU switch by merging
378 * switch_mmio_to_host() and switch_mmio_to_vgpu().
379 */
380 if (pre)
381 switch_mmio_to_host(pre, ring_id);
382
383 if (next)
384 switch_mmio_to_vgpu(next, ring_id);
385}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
index dac1a3cc458b..91db1d39d28f 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -36,8 +36,8 @@
36#ifndef __GVT_RENDER_H__ 36#ifndef __GVT_RENDER_H__
37#define __GVT_RENDER_H__ 37#define __GVT_RENDER_H__
38 38
39void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id); 39void intel_gvt_switch_mmio(struct intel_vgpu *pre,
40 struct intel_vgpu *next, int ring_id);
40 41
41void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
42 42
43#endif 43#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index f25ff133865f..436377da41ba 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -202,11 +202,6 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
203 struct vgpu_sched_data *vgpu_data; 203 struct vgpu_sched_data *vgpu_data;
204 struct intel_vgpu *vgpu = NULL; 204 struct intel_vgpu *vgpu = NULL;
205 static uint64_t timer_check;
206
207 if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
208 gvt_balance_timeslice(sched_data);
209
210 /* no active vgpu or has already had a target */ 205 /* no active vgpu or has already had a target */
211 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) 206 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
212 goto out; 207 goto out;
@@ -231,9 +226,19 @@ out:
231void intel_gvt_schedule(struct intel_gvt *gvt) 226void intel_gvt_schedule(struct intel_gvt *gvt)
232{ 227{
233 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; 228 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
229 static uint64_t timer_check;
234 230
235 mutex_lock(&gvt->lock); 231 mutex_lock(&gvt->lock);
232
233 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
234 (void *)&gvt->service_request)) {
235 if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
236 gvt_balance_timeslice(sched_data);
237 }
238 clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
239
236 tbs_sched_func(sched_data); 240 tbs_sched_func(sched_data);
241
237 mutex_unlock(&gvt->lock); 242 mutex_unlock(&gvt->lock);
238} 243}
239 244
@@ -303,8 +308,20 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
303 308
304static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
305{ 310{
311 struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
312 int ring_id;
313
306 kfree(vgpu->sched_data); 314 kfree(vgpu->sched_data);
307 vgpu->sched_data = NULL; 315 vgpu->sched_data = NULL;
316
317 spin_lock_bh(&scheduler->mmio_context_lock);
318 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
319 if (scheduler->engine_owner[ring_id] == vgpu) {
320 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
321 scheduler->engine_owner[ring_id] = NULL;
322 }
323 }
324 spin_unlock_bh(&scheduler->mmio_context_lock);
308} 325}
309 326
310static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 327static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6ae286cb5804..488fdea348a9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -138,21 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb,
138 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, 138 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
139 shadow_ctx_notifier_block[req->engine->id]); 139 shadow_ctx_notifier_block[req->engine->id]);
140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
141 struct intel_vgpu_workload *workload = 141 enum intel_engine_id ring_id = req->engine->id;
142 scheduler->current_workload[req->engine->id]; 142 struct intel_vgpu_workload *workload;
143
144 if (!is_gvt_request(req)) {
145 spin_lock_bh(&scheduler->mmio_context_lock);
146 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
147 scheduler->engine_owner[ring_id]) {
148 /* Switch ring from vGPU to host. */
149 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
150 NULL, ring_id);
151 scheduler->engine_owner[ring_id] = NULL;
152 }
153 spin_unlock_bh(&scheduler->mmio_context_lock);
143 154
144 if (!is_gvt_request(req) || unlikely(!workload)) 155 return NOTIFY_OK;
156 }
157
158 workload = scheduler->current_workload[ring_id];
159 if (unlikely(!workload))
145 return NOTIFY_OK; 160 return NOTIFY_OK;
146 161
147 switch (action) { 162 switch (action) {
148 case INTEL_CONTEXT_SCHEDULE_IN: 163 case INTEL_CONTEXT_SCHEDULE_IN:
149 intel_gvt_load_render_mmio(workload->vgpu, 164 spin_lock_bh(&scheduler->mmio_context_lock);
150 workload->ring_id); 165 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
166 /* Switch ring from host to vGPU or vGPU to vGPU. */
167 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
168 workload->vgpu, ring_id);
169 scheduler->engine_owner[ring_id] = workload->vgpu;
170 } else
171 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
172 ring_id, workload->vgpu->id);
173 spin_unlock_bh(&scheduler->mmio_context_lock);
151 atomic_set(&workload->shadow_ctx_active, 1); 174 atomic_set(&workload->shadow_ctx_active, 1);
152 break; 175 break;
153 case INTEL_CONTEXT_SCHEDULE_OUT: 176 case INTEL_CONTEXT_SCHEDULE_OUT:
154 intel_gvt_restore_render_mmio(workload->vgpu,
155 workload->ring_id);
156 /* If the status is -EINPROGRESS means this workload 177 /* If the status is -EINPROGRESS means this workload
157 * doesn't meet any issue during dispatching so when 178 * doesn't meet any issue during dispatching so when
158 * get the SCHEDULE_OUT set the status to be zero for 179 * get the SCHEDULE_OUT set the status to be zero for
@@ -431,6 +452,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
431 452
432 atomic_dec(&vgpu->running_workload_num); 453 atomic_dec(&vgpu->running_workload_num);
433 wake_up(&scheduler->workload_complete_wq); 454 wake_up(&scheduler->workload_complete_wq);
455
456 if (gvt->scheduler.need_reschedule)
457 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
458
434 mutex_unlock(&gvt->lock); 459 mutex_unlock(&gvt->lock);
435} 460}
436 461
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2cd725c0573e..9b6bf51e9b9b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -42,6 +42,10 @@ struct intel_gvt_workload_scheduler {
42 struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; 42 struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
43 bool need_reschedule; 43 bool need_reschedule;
44 44
45 spinlock_t mmio_context_lock;
46 /* can be null when owner is host */
47 struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
48
45 wait_queue_head_t workload_complete_wq; 49 wait_queue_head_t workload_complete_wq;
46 struct task_struct *thread[I915_NUM_ENGINES]; 50 struct task_struct *thread[I915_NUM_ENGINES];
47 wait_queue_head_t waitq[I915_NUM_ENGINES]; 51 wait_queue_head_t waitq[I915_NUM_ENGINES];
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 53a2d10cf3f1..8c150381d9a4 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -224,58 +224,138 @@ TRACE_EVENT(oos_sync,
224 TP_printk("%s", __entry->buf) 224 TP_printk("%s", __entry->buf)
225); 225);
226 226
227#define MAX_CMD_STR_LEN 256
228TRACE_EVENT(gvt_command, 227TRACE_EVENT(gvt_command,
229 TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler), 228 TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
230 229 u32 buf_type),
231 TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler), 230
232 231 TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
233 TP_STRUCT__entry( 232
234 __field(u8, vm_id) 233 TP_STRUCT__entry(
235 __field(u8, ring_id) 234 __field(u8, vgpu_id)
236 __field(int, i) 235 __field(u8, ring_id)
237 __array(char, tmp_buf, MAX_CMD_STR_LEN) 236 __field(u32, ip_gma)
238 __array(char, cmd_str, MAX_CMD_STR_LEN) 237 __field(u32, buf_type)
239 ), 238 __field(u32, cmd_len)
240 239 __dynamic_array(u32, raw_cmd, cmd_len)
241 TP_fast_assign( 240 ),
242 __entry->vm_id = vm_id; 241
243 __entry->ring_id = ring_id; 242 TP_fast_assign(
244 __entry->cmd_str[0] = '\0'; 243 __entry->vgpu_id = vgpu_id;
245 snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler); 244 __entry->ring_id = ring_id;
246 strcat(__entry->cmd_str, __entry->tmp_buf); 245 __entry->ip_gma = ip_gma;
247 entry->i = 0; 246 __entry->buf_type = buf_type;
248 while (cmd_len > 0) { 247 __entry->cmd_len = cmd_len;
249 if (cmd_len >= 8) { 248 memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
250 snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ", 249 ),
251 cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3], 250
252 cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]); 251
253 __entry->i += 8; 252 TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
254 cmd_len -= 8; 253 __entry->vgpu_id,
255 strcat(__entry->cmd_str, __entry->tmp_buf); 254 __entry->ring_id,
256 } else if (cmd_len >= 4) { 255 __entry->buf_type,
257 snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ", 256 __entry->ip_gma,
258 cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]); 257 __print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
259 __entry->i += 4; 258);
260 cmd_len -= 4; 259
261 strcat(__entry->cmd_str, __entry->tmp_buf); 260#define GVT_TEMP_STR_LEN 10
262 } else if (cmd_len >= 2) { 261TRACE_EVENT(write_ir,
263 snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]); 262 TP_PROTO(int id, char *reg_name, unsigned int reg, unsigned int new_val,
264 __entry->i += 2; 263 unsigned int old_val, bool changed),
265 cmd_len -= 2; 264
266 strcat(__entry->cmd_str, __entry->tmp_buf); 265 TP_ARGS(id, reg_name, reg, new_val, old_val, changed),
267 } else if (cmd_len == 1) { 266
268 snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]); 267 TP_STRUCT__entry(
269 __entry->i += 1; 268 __field(int, id)
270 cmd_len -= 1; 269 __array(char, buf, GVT_TEMP_STR_LEN)
271 strcat(__entry->cmd_str, __entry->tmp_buf); 270 __field(unsigned int, reg)
272 } 271 __field(unsigned int, new_val)
273 } 272 __field(unsigned int, old_val)
274 strcat(__entry->cmd_str, "\n"); 273 __field(bool, changed)
275 ), 274 ),
275
276 TP_fast_assign(
277 __entry->id = id;
278 snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", reg_name);
279 __entry->reg = reg;
280 __entry->new_val = new_val;
281 __entry->old_val = old_val;
282 __entry->changed = changed;
283 ),
284
285 TP_printk("VM%u write [%s] %x, new %08x, old %08x, changed %08x\n",
286 __entry->id, __entry->buf, __entry->reg, __entry->new_val,
287 __entry->old_val, __entry->changed)
288);
289
290TRACE_EVENT(propagate_event,
291 TP_PROTO(int id, const char *irq_name, int bit),
292
293 TP_ARGS(id, irq_name, bit),
294
295 TP_STRUCT__entry(
296 __field(int, id)
297 __array(char, buf, GVT_TEMP_STR_LEN)
298 __field(int, bit)
299 ),
276 300
277 TP_printk("%s", __entry->cmd_str) 301 TP_fast_assign(
302 __entry->id = id;
303 snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", irq_name);
304 __entry->bit = bit;
305 ),
306
307 TP_printk("Set bit (%d) for (%s) for vgpu (%d)\n",
308 __entry->bit, __entry->buf, __entry->id)
278); 309);
310
311TRACE_EVENT(inject_msi,
312 TP_PROTO(int id, unsigned int address, unsigned int data),
313
314 TP_ARGS(id, address, data),
315
316 TP_STRUCT__entry(
317 __field(int, id)
318 __field(unsigned int, address)
319 __field(unsigned int, data)
320 ),
321
322 TP_fast_assign(
323 __entry->id = id;
324 __entry->address = address;
325 __entry->data = data;
326 ),
327
328 TP_printk("vgpu%d:inject msi address %x data %x\n",
329 __entry->id, __entry->address, __entry->data)
330);
331
332TRACE_EVENT(render_mmio,
333 TP_PROTO(int id, char *action, unsigned int reg,
334 unsigned int old_val, unsigned int new_val),
335
336 TP_ARGS(id, action, reg, new_val, old_val),
337
338 TP_STRUCT__entry(
339 __field(int, id)
340 __array(char, buf, GVT_TEMP_STR_LEN)
341 __field(unsigned int, reg)
342 __field(unsigned int, old_val)
343 __field(unsigned int, new_val)
344 ),
345
346 TP_fast_assign(
347 __entry->id = id;
348 snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
349 __entry->reg = reg;
350 __entry->old_val = old_val;
351 __entry->new_val = new_val;
352 ),
353
354 TP_printk("VM%u %s reg %x, old %08x new %08x\n",
355 __entry->id, __entry->buf, __entry->reg,
356 __entry->old_val, __entry->new_val)
357);
358
279#endif /* _GVT_TRACE_H_ */ 359#endif /* _GVT_TRACE_H_ */
280 360
281/* This part must be out of protection */ 361/* This part must be out of protection */
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 6e3cbd8caec2..90c14e6e3ea0 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -501,9 +501,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
501 501
502 /* full GPU reset or device model level reset */ 502 /* full GPU reset or device model level reset */
503 if (engine_mask == ALL_ENGINES || dmlr) { 503 if (engine_mask == ALL_ENGINES || dmlr) {
504
504 intel_vgpu_reset_gtt(vgpu, dmlr); 505 intel_vgpu_reset_gtt(vgpu, dmlr);
505 intel_vgpu_reset_resource(vgpu); 506
506 intel_vgpu_reset_mmio(vgpu); 507 /*fence will not be reset during virtual reset */
508 if (dmlr)
509 intel_vgpu_reset_resource(vgpu);
510
511 intel_vgpu_reset_mmio(vgpu, dmlr);
507 populate_pvinfo_page(vgpu); 512 populate_pvinfo_page(vgpu);
508 intel_vgpu_reset_display(vgpu); 513 intel_vgpu_reset_display(vgpu);
509 514
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7e0816ccdc21..4577b0af6886 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1670,12 +1670,22 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1670 seq_printf(m, "FBC disabled: %s\n", 1670 seq_printf(m, "FBC disabled: %s\n",
1671 dev_priv->fbc.no_fbc_reason); 1671 dev_priv->fbc.no_fbc_reason);
1672 1672
1673 if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) { 1673 if (intel_fbc_is_active(dev_priv)) {
1674 uint32_t mask = INTEL_GEN(dev_priv) >= 8 ? 1674 u32 mask;
1675 BDW_FBC_COMPRESSION_MASK : 1675
1676 IVB_FBC_COMPRESSION_MASK; 1676 if (INTEL_GEN(dev_priv) >= 8)
1677 seq_printf(m, "Compressing: %s\n", 1677 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1678 yesno(I915_READ(FBC_STATUS2) & mask)); 1678 else if (INTEL_GEN(dev_priv) >= 7)
1679 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1680 else if (INTEL_GEN(dev_priv) >= 5)
1681 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1682 else if (IS_G4X(dev_priv))
1683 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1684 else
1685 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1686 FBC_STAT_COMPRESSED);
1687
1688 seq_printf(m, "Compressing: %s\n", yesno(mask));
1679 } 1689 }
1680 1690
1681 mutex_unlock(&dev_priv->fbc.lock); 1691 mutex_unlock(&dev_priv->fbc.lock);
@@ -1684,7 +1694,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1684 return 0; 1694 return 0;
1685} 1695}
1686 1696
1687static int i915_fbc_fc_get(void *data, u64 *val) 1697static int i915_fbc_false_color_get(void *data, u64 *val)
1688{ 1698{
1689 struct drm_i915_private *dev_priv = data; 1699 struct drm_i915_private *dev_priv = data;
1690 1700
@@ -1696,7 +1706,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
1696 return 0; 1706 return 0;
1697} 1707}
1698 1708
1699static int i915_fbc_fc_set(void *data, u64 val) 1709static int i915_fbc_false_color_set(void *data, u64 val)
1700{ 1710{
1701 struct drm_i915_private *dev_priv = data; 1711 struct drm_i915_private *dev_priv = data;
1702 u32 reg; 1712 u32 reg;
@@ -1717,8 +1727,8 @@ static int i915_fbc_fc_set(void *data, u64 val)
1717 return 0; 1727 return 0;
1718} 1728}
1719 1729
1720DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1730DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1721 i915_fbc_fc_get, i915_fbc_fc_set, 1731 i915_fbc_false_color_get, i915_fbc_false_color_set,
1722 "%llu\n"); 1732 "%llu\n");
1723 1733
1724static int i915_ips_status(struct seq_file *m, void *unused) 1734static int i915_ips_status(struct seq_file *m, void *unused)
@@ -1988,6 +1998,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
1988 seq_putc(m, '\n'); 1998 seq_putc(m, '\n');
1989 } 1999 }
1990 2000
2001 seq_printf(m,
2002 "\tvma hashtable size=%u (actual %lu), count=%u\n",
2003 ctx->vma_lut.ht_size,
2004 BIT(ctx->vma_lut.ht_bits),
2005 ctx->vma_lut.ht_count);
2006
1991 seq_putc(m, '\n'); 2007 seq_putc(m, '\n');
1992 } 2008 }
1993 2009
@@ -4289,26 +4305,27 @@ i915_drop_caches_set(void *data, u64 val)
4289{ 4305{
4290 struct drm_i915_private *dev_priv = data; 4306 struct drm_i915_private *dev_priv = data;
4291 struct drm_device *dev = &dev_priv->drm; 4307 struct drm_device *dev = &dev_priv->drm;
4292 int ret; 4308 int ret = 0;
4293 4309
4294 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4310 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4295 4311
4296 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4312 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4297 * on ioctls on -EAGAIN. */ 4313 * on ioctls on -EAGAIN. */
4298 ret = mutex_lock_interruptible(&dev->struct_mutex); 4314 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4299 if (ret) 4315 ret = mutex_lock_interruptible(&dev->struct_mutex);
4300 return ret;
4301
4302 if (val & DROP_ACTIVE) {
4303 ret = i915_gem_wait_for_idle(dev_priv,
4304 I915_WAIT_INTERRUPTIBLE |
4305 I915_WAIT_LOCKED);
4306 if (ret) 4316 if (ret)
4307 goto unlock; 4317 return ret;
4308 }
4309 4318
4310 if (val & DROP_RETIRE) 4319 if (val & DROP_ACTIVE)
4311 i915_gem_retire_requests(dev_priv); 4320 ret = i915_gem_wait_for_idle(dev_priv,
4321 I915_WAIT_INTERRUPTIBLE |
4322 I915_WAIT_LOCKED);
4323
4324 if (val & DROP_RETIRE)
4325 i915_gem_retire_requests(dev_priv);
4326
4327 mutex_unlock(&dev->struct_mutex);
4328 }
4312 4329
4313 lockdep_set_current_reclaim_state(GFP_KERNEL); 4330 lockdep_set_current_reclaim_state(GFP_KERNEL);
4314 if (val & DROP_BOUND) 4331 if (val & DROP_BOUND)
@@ -4321,9 +4338,6 @@ i915_drop_caches_set(void *data, u64 val)
4321 i915_gem_shrink_all(dev_priv); 4338 i915_gem_shrink_all(dev_priv);
4322 lockdep_clear_current_reclaim_state(); 4339 lockdep_clear_current_reclaim_state();
4323 4340
4324unlock:
4325 mutex_unlock(&dev->struct_mutex);
4326
4327 if (val & DROP_FREED) { 4341 if (val & DROP_FREED) {
4328 synchronize_rcu(); 4342 synchronize_rcu();
4329 i915_gem_drain_freed_objects(dev_priv); 4343 i915_gem_drain_freed_objects(dev_priv);
@@ -4861,7 +4875,7 @@ static const struct i915_debugfs_files {
4861 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4875 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4862 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4876 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4863 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4877 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4864 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 4878 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4865 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4879 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4866 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4880 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4867 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4881 {"i915_dp_test_active", &i915_displayport_test_active_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d703897786e9..ee2325b180e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -139,6 +139,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
139 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 139 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
140 ret = PCH_SPT; 140 ret = PCH_SPT;
141 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 141 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
142 } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
143 ret = PCH_CNP;
144 DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
142 } 145 }
143 146
144 return ret; 147 return ret;
@@ -170,24 +173,29 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
170 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { 173 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
171 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 174 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
172 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 175 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
173 dev_priv->pch_id = id; 176 unsigned short id_ext = pch->device &
177 INTEL_PCH_DEVICE_ID_MASK_EXT;
174 178
175 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 179 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
180 dev_priv->pch_id = id;
176 dev_priv->pch_type = PCH_IBX; 181 dev_priv->pch_type = PCH_IBX;
177 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 182 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
178 WARN_ON(!IS_GEN5(dev_priv)); 183 WARN_ON(!IS_GEN5(dev_priv));
179 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 184 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
185 dev_priv->pch_id = id;
180 dev_priv->pch_type = PCH_CPT; 186 dev_priv->pch_type = PCH_CPT;
181 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 187 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
182 WARN_ON(!(IS_GEN6(dev_priv) || 188 WARN_ON(!(IS_GEN6(dev_priv) ||
183 IS_IVYBRIDGE(dev_priv))); 189 IS_IVYBRIDGE(dev_priv)));
184 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 190 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
185 /* PantherPoint is CPT compatible */ 191 /* PantherPoint is CPT compatible */
192 dev_priv->pch_id = id;
186 dev_priv->pch_type = PCH_CPT; 193 dev_priv->pch_type = PCH_CPT;
187 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 194 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
188 WARN_ON(!(IS_GEN6(dev_priv) || 195 WARN_ON(!(IS_GEN6(dev_priv) ||
189 IS_IVYBRIDGE(dev_priv))); 196 IS_IVYBRIDGE(dev_priv)));
190 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 197 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
198 dev_priv->pch_id = id;
191 dev_priv->pch_type = PCH_LPT; 199 dev_priv->pch_type = PCH_LPT;
192 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 200 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
193 WARN_ON(!IS_HASWELL(dev_priv) && 201 WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -195,6 +203,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
195 WARN_ON(IS_HSW_ULT(dev_priv) || 203 WARN_ON(IS_HSW_ULT(dev_priv) ||
196 IS_BDW_ULT(dev_priv)); 204 IS_BDW_ULT(dev_priv));
197 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 205 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
206 dev_priv->pch_id = id;
198 dev_priv->pch_type = PCH_LPT; 207 dev_priv->pch_type = PCH_LPT;
199 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 208 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
200 WARN_ON(!IS_HASWELL(dev_priv) && 209 WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -202,20 +211,35 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
202 WARN_ON(!IS_HSW_ULT(dev_priv) && 211 WARN_ON(!IS_HSW_ULT(dev_priv) &&
203 !IS_BDW_ULT(dev_priv)); 212 !IS_BDW_ULT(dev_priv));
204 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 213 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
214 dev_priv->pch_id = id;
205 dev_priv->pch_type = PCH_SPT; 215 dev_priv->pch_type = PCH_SPT;
206 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 216 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
207 WARN_ON(!IS_SKYLAKE(dev_priv) && 217 WARN_ON(!IS_SKYLAKE(dev_priv) &&
208 !IS_KABYLAKE(dev_priv)); 218 !IS_KABYLAKE(dev_priv));
209 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 219 } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
220 dev_priv->pch_id = id_ext;
210 dev_priv->pch_type = PCH_SPT; 221 dev_priv->pch_type = PCH_SPT;
211 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 222 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
212 WARN_ON(!IS_SKYLAKE(dev_priv) && 223 WARN_ON(!IS_SKYLAKE(dev_priv) &&
213 !IS_KABYLAKE(dev_priv)); 224 !IS_KABYLAKE(dev_priv));
214 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 225 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
226 dev_priv->pch_id = id;
215 dev_priv->pch_type = PCH_KBP; 227 dev_priv->pch_type = PCH_KBP;
216 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 228 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
217 WARN_ON(!IS_SKYLAKE(dev_priv) && 229 WARN_ON(!IS_SKYLAKE(dev_priv) &&
218 !IS_KABYLAKE(dev_priv)); 230 !IS_KABYLAKE(dev_priv));
231 } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
232 dev_priv->pch_id = id;
233 dev_priv->pch_type = PCH_CNP;
234 DRM_DEBUG_KMS("Found CannonPoint PCH\n");
235 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
236 !IS_COFFEELAKE(dev_priv));
237 } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
238 dev_priv->pch_id = id_ext;
239 dev_priv->pch_type = PCH_CNP;
240 DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
241 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
242 !IS_COFFEELAKE(dev_priv));
219 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 243 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
220 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 244 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
221 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 245 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -223,6 +247,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
223 PCI_SUBVENDOR_ID_REDHAT_QUMRANET && 247 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
224 pch->subsystem_device == 248 pch->subsystem_device ==
225 PCI_SUBDEVICE_ID_QEMU)) { 249 PCI_SUBDEVICE_ID_QEMU)) {
250 dev_priv->pch_id = id;
226 dev_priv->pch_type = 251 dev_priv->pch_type =
227 intel_virt_detect_pch(dev_priv); 252 intel_virt_detect_pch(dev_priv);
228 } else 253 } else
@@ -351,6 +376,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
351 case I915_PARAM_HAS_EXEC_ASYNC: 376 case I915_PARAM_HAS_EXEC_ASYNC:
352 case I915_PARAM_HAS_EXEC_FENCE: 377 case I915_PARAM_HAS_EXEC_FENCE:
353 case I915_PARAM_HAS_EXEC_CAPTURE: 378 case I915_PARAM_HAS_EXEC_CAPTURE:
379 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
354 /* For the time being all of these are always true; 380 /* For the time being all of these are always true;
355 * if some supported hardware does not have one of these 381 * if some supported hardware does not have one of these
356 * features this value needs to be provided from 382 * features this value needs to be provided from
@@ -358,6 +384,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
358 */ 384 */
359 value = 1; 385 value = 1;
360 break; 386 break;
387 case I915_PARAM_SLICE_MASK:
388 value = INTEL_INFO(dev_priv)->sseu.slice_mask;
389 if (!value)
390 return -ENODEV;
391 break;
392 case I915_PARAM_SUBSLICE_MASK:
393 value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
394 if (!value)
395 return -ENODEV;
396 break;
361 default: 397 default:
362 DRM_DEBUG("Unknown parameter %d\n", param->param); 398 DRM_DEBUG("Unknown parameter %d\n", param->param);
363 return -EINVAL; 399 return -EINVAL;
@@ -553,6 +589,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
553 intel_uc_fini_hw(dev_priv); 589 intel_uc_fini_hw(dev_priv);
554 i915_gem_cleanup_engines(dev_priv); 590 i915_gem_cleanup_engines(dev_priv);
555 i915_gem_context_fini(dev_priv); 591 i915_gem_context_fini(dev_priv);
592 i915_gem_cleanup_userptr(dev_priv);
556 mutex_unlock(&dev_priv->drm.struct_mutex); 593 mutex_unlock(&dev_priv->drm.struct_mutex);
557 594
558 i915_gem_drain_freed_objects(dev_priv); 595 i915_gem_drain_freed_objects(dev_priv);
@@ -997,6 +1034,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
997 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); 1034 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
998 1035
999 intel_uc_sanitize_options(dev_priv); 1036 intel_uc_sanitize_options(dev_priv);
1037
1038 intel_gvt_sanitize_options(dev_priv);
1000} 1039}
1001 1040
1002/** 1041/**
@@ -2459,9 +2498,6 @@ static int intel_runtime_resume(struct device *kdev)
2459 2498
2460 intel_guc_resume(dev_priv); 2499 intel_guc_resume(dev_priv);
2461 2500
2462 if (IS_GEN6(dev_priv))
2463 intel_init_pch_refclk(dev_priv);
2464
2465 if (IS_GEN9_LP(dev_priv)) { 2501 if (IS_GEN9_LP(dev_priv)) {
2466 bxt_disable_dc9(dev_priv); 2502 bxt_disable_dc9(dev_priv);
2467 bxt_display_core_init(dev_priv, true); 2503 bxt_display_core_init(dev_priv, true);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35f71b559808..e1f7c97a338a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -37,7 +37,7 @@
37#include <linux/i2c.h> 37#include <linux/i2c.h>
38#include <linux/i2c-algo-bit.h> 38#include <linux/i2c-algo-bit.h>
39#include <linux/backlight.h> 39#include <linux/backlight.h>
40#include <linux/hashtable.h> 40#include <linux/hash.h>
41#include <linux/intel-iommu.h> 41#include <linux/intel-iommu.h>
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/pm_qos.h> 43#include <linux/pm_qos.h>
@@ -80,8 +80,8 @@
80 80
81#define DRIVER_NAME "i915" 81#define DRIVER_NAME "i915"
82#define DRIVER_DESC "Intel Graphics" 82#define DRIVER_DESC "Intel Graphics"
83#define DRIVER_DATE "20170529" 83#define DRIVER_DATE "20170619"
84#define DRIVER_TIMESTAMP 1496041258 84#define DRIVER_TIMESTAMP 1497857498
85 85
86/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 86/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
87 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 87 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -752,7 +752,6 @@ struct intel_csr {
752 func(has_aliasing_ppgtt); \ 752 func(has_aliasing_ppgtt); \
753 func(has_csr); \ 753 func(has_csr); \
754 func(has_ddi); \ 754 func(has_ddi); \
755 func(has_decoupled_mmio); \
756 func(has_dp_mst); \ 755 func(has_dp_mst); \
757 func(has_fbc); \ 756 func(has_fbc); \
758 func(has_fpga_dbg); \ 757 func(has_fpga_dbg); \
@@ -827,6 +826,8 @@ enum intel_platform {
827 INTEL_BROXTON, 826 INTEL_BROXTON,
828 INTEL_KABYLAKE, 827 INTEL_KABYLAKE,
829 INTEL_GEMINILAKE, 828 INTEL_GEMINILAKE,
829 INTEL_COFFEELAKE,
830 INTEL_CANNONLAKE,
830 INTEL_MAX_PLATFORMS 831 INTEL_MAX_PLATFORMS
831}; 832};
832 833
@@ -1152,6 +1153,7 @@ enum intel_pch {
1152 PCH_LPT, /* Lynxpoint PCH */ 1153 PCH_LPT, /* Lynxpoint PCH */
1153 PCH_SPT, /* Sunrisepoint PCH */ 1154 PCH_SPT, /* Sunrisepoint PCH */
1154 PCH_KBP, /* Kabypoint PCH */ 1155 PCH_KBP, /* Kabypoint PCH */
1156 PCH_CNP, /* Cannonpoint PCH */
1155 PCH_NOP, 1157 PCH_NOP,
1156}; 1158};
1157 1159
@@ -1160,11 +1162,9 @@ enum intel_sbi_destination {
1160 SBI_MPHY, 1162 SBI_MPHY,
1161}; 1163};
1162 1164
1163#define QUIRK_PIPEA_FORCE (1<<0)
1164#define QUIRK_LVDS_SSC_DISABLE (1<<1) 1165#define QUIRK_LVDS_SSC_DISABLE (1<<1)
1165#define QUIRK_INVERT_BRIGHTNESS (1<<2) 1166#define QUIRK_INVERT_BRIGHTNESS (1<<2)
1166#define QUIRK_BACKLIGHT_PRESENT (1<<3) 1167#define QUIRK_BACKLIGHT_PRESENT (1<<3)
1167#define QUIRK_PIPEB_FORCE (1<<4)
1168#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1168#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
1169 1169
1170struct intel_fbdev; 1170struct intel_fbdev;
@@ -1454,6 +1454,13 @@ struct i915_gem_mm {
1454 /** LRU list of objects with fence regs on them. */ 1454 /** LRU list of objects with fence regs on them. */
1455 struct list_head fence_list; 1455 struct list_head fence_list;
1456 1456
1457 /**
1458 * Workqueue to fault in userptr pages, flushed by the execbuf
1459 * when required but otherwise left to userspace to try again
1460 * on EAGAIN.
1461 */
1462 struct workqueue_struct *userptr_wq;
1463
1457 u64 unordered_timeline; 1464 u64 unordered_timeline;
1458 1465
1459 /* the indicator for dispatch video commands on two BSD rings */ 1466 /* the indicator for dispatch video commands on two BSD rings */
@@ -2017,9 +2024,17 @@ struct i915_oa_ops {
2017 void (*init_oa_buffer)(struct drm_i915_private *dev_priv); 2024 void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
2018 2025
2019 /** 2026 /**
2020 * @enable_metric_set: Applies any MUX configuration to set up the 2027 * @select_metric_set: The auto generated code that checks whether a
2021 * Boolean and Custom (B/C) counters that are part of the counter 2028 * requested OA config is applicable to the system and if so sets up
2022 * reports being sampled. May apply system constraints such as 2029 * the mux, oa and flex eu register config pointers according to the
2030 * current dev_priv->perf.oa.metrics_set.
2031 */
2032 int (*select_metric_set)(struct drm_i915_private *dev_priv);
2033
2034 /**
2035 * @enable_metric_set: Selects and applies any MUX configuration to set
2036 * up the Boolean and Custom (B/C) counters that are part of the
2037 * counter reports being sampled. May apply system constraints such as
2023 * disabling EU clock gating as required. 2038 * disabling EU clock gating as required.
2024 */ 2039 */
2025 int (*enable_metric_set)(struct drm_i915_private *dev_priv); 2040 int (*enable_metric_set)(struct drm_i915_private *dev_priv);
@@ -2050,20 +2065,13 @@ struct i915_oa_ops {
2050 size_t *offset); 2065 size_t *offset);
2051 2066
2052 /** 2067 /**
2053 * @oa_buffer_check: Check for OA buffer data + update tail 2068 * @oa_hw_tail_read: read the OA tail pointer register
2054 *
2055 * This is either called via fops or the poll check hrtimer (atomic
2056 * ctx) without any locks taken.
2057 * 2069 *
2058 * It's safe to read OA config state here unlocked, assuming that this 2070 * In particular this enables us to share all the fiddly code for
2059 * is only called while the stream is enabled, while the global OA 2071 * handling the OA unit tail pointer race that affects multiple
2060 * configuration can't be modified. 2072 * generations.
2061 *
2062 * Efficiency is more important than avoiding some false positives
2063 * here, which will be handled gracefully - likely resulting in an
2064 * %EAGAIN error for userspace.
2065 */ 2073 */
2066 bool (*oa_buffer_check)(struct drm_i915_private *dev_priv); 2074 u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
2067}; 2075};
2068 2076
2069struct intel_cdclk_state { 2077struct intel_cdclk_state {
@@ -2394,8 +2402,6 @@ struct drm_i915_private {
2394 struct mutex lock; 2402 struct mutex lock;
2395 struct list_head streams; 2403 struct list_head streams;
2396 2404
2397 spinlock_t hook_lock;
2398
2399 struct { 2405 struct {
2400 struct i915_perf_stream *exclusive_stream; 2406 struct i915_perf_stream *exclusive_stream;
2401 2407
@@ -2413,17 +2419,23 @@ struct drm_i915_private {
2413 2419
2414 bool periodic; 2420 bool periodic;
2415 int period_exponent; 2421 int period_exponent;
2422 int timestamp_frequency;
2416 2423
2417 int metrics_set; 2424 int metrics_set;
2418 2425
2419 const struct i915_oa_reg *mux_regs; 2426 const struct i915_oa_reg *mux_regs[6];
2420 int mux_regs_len; 2427 int mux_regs_lens[6];
2428 int n_mux_configs;
2429
2421 const struct i915_oa_reg *b_counter_regs; 2430 const struct i915_oa_reg *b_counter_regs;
2422 int b_counter_regs_len; 2431 int b_counter_regs_len;
2432 const struct i915_oa_reg *flex_regs;
2433 int flex_regs_len;
2423 2434
2424 struct { 2435 struct {
2425 struct i915_vma *vma; 2436 struct i915_vma *vma;
2426 u8 *vaddr; 2437 u8 *vaddr;
2438 u32 last_ctx_id;
2427 int format; 2439 int format;
2428 int format_size; 2440 int format_size;
2429 2441
@@ -2493,6 +2505,15 @@ struct drm_i915_private {
2493 } oa_buffer; 2505 } oa_buffer;
2494 2506
2495 u32 gen7_latched_oastatus1; 2507 u32 gen7_latched_oastatus1;
2508 u32 ctx_oactxctrl_offset;
2509 u32 ctx_flexeu0_offset;
2510
2511 /**
2512 * The RPT_ID/reason field for Gen8+ includes a bit
2513 * to determine if the CTX ID in the report is valid
2514 * but the specific bit differs between Gen 8 and 9
2515 */
2516 u32 gen8_valid_ctx_bit;
2496 2517
2497 struct i915_oa_ops ops; 2518 struct i915_oa_ops ops;
2498 const struct i915_oa_format *oa_formats; 2519 const struct i915_oa_format *oa_formats;
@@ -2768,6 +2789,8 @@ intel_info(const struct drm_i915_private *dev_priv)
2768#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON) 2789#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
2769#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE) 2790#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
2770#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE) 2791#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
2792#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE)
2793#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE)
2771#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2794#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
2772#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2795#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2773 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2796 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2803,10 +2826,18 @@ intel_info(const struct drm_i915_private *dev_priv)
2803#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2826#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2804 INTEL_DEVID(dev_priv) == 0x5915 || \ 2827 INTEL_DEVID(dev_priv) == 0x5915 || \
2805 INTEL_DEVID(dev_priv) == 0x591E) 2828 INTEL_DEVID(dev_priv) == 0x591E)
2829#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2830 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
2806#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2831#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
2807 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2832 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2808#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2833#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
2809 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) 2834 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
2835#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
2836 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
2837#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
2838 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2839#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2840 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
2810 2841
2811#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2842#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2812 2843
@@ -2845,6 +2876,12 @@ intel_info(const struct drm_i915_private *dev_priv)
2845#define IS_GLK_REVID(dev_priv, since, until) \ 2876#define IS_GLK_REVID(dev_priv, since, until) \
2846 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2877 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2847 2878
2879#define CNL_REVID_A0 0x0
2880#define CNL_REVID_B0 0x1
2881
2882#define IS_CNL_REVID(p, since, until) \
2883 (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
2884
2848/* 2885/*
2849 * The genX designation typically refers to the render engine, so render 2886 * The genX designation typically refers to the render engine, so render
2850 * capability related checks should use IS_GEN, while display and other checks 2887 * capability related checks should use IS_GEN, while display and other checks
@@ -2859,6 +2896,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2859#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2896#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
2860#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2897#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
2861#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2898#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
2899#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
2862 2900
2863#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 2901#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
2864#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) 2902#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
@@ -2959,6 +2997,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2959#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) 2997#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
2960 2998
2961#define INTEL_PCH_DEVICE_ID_MASK 0xff00 2999#define INTEL_PCH_DEVICE_ID_MASK 0xff00
3000#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
2962#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 3001#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2963#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 3002#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2964#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 3003#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
@@ -2967,11 +3006,16 @@ intel_info(const struct drm_i915_private *dev_priv)
2967#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 3006#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2968#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 3007#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2969#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 3008#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
3009#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
3010#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
2970#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 3011#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
2971#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 3012#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
2972#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 3013#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
2973 3014
2974#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 3015#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
3016#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
3017#define HAS_PCH_CNP_LP(dev_priv) \
3018 ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
2975#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 3019#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2976#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 3020#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2977#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 3021#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@@ -2986,7 +3030,7 @@ intel_info(const struct drm_i915_private *dev_priv)
2986 3030
2987#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 3031#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
2988 3032
2989#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv)) 3033#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2990 3034
2991/* DPF == dynamic parity feature */ 3035/* DPF == dynamic parity feature */
2992#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 3036#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
@@ -2996,8 +3040,6 @@ intel_info(const struct drm_i915_private *dev_priv)
2996#define GT_FREQUENCY_MULTIPLIER 50 3040#define GT_FREQUENCY_MULTIPLIER 50
2997#define GEN9_FREQ_SCALER 3 3041#define GEN9_FREQ_SCALER 3
2998 3042
2999#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
3000
3001#include "i915_trace.h" 3043#include "i915_trace.h"
3002 3044
3003static inline bool intel_vtd_active(void) 3045static inline bool intel_vtd_active(void)
@@ -3194,7 +3236,8 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
3194 struct drm_file *file_priv); 3236 struct drm_file *file_priv);
3195int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 3237int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
3196 struct drm_file *file_priv); 3238 struct drm_file *file_priv);
3197void i915_gem_init_userptr(struct drm_i915_private *dev_priv); 3239int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3240void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
3198int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3241int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3199 struct drm_file *file); 3242 struct drm_file *file);
3200int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3243int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -3534,6 +3577,9 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
3534 3577
3535int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3578int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3536 struct drm_file *file); 3579 struct drm_file *file);
3580void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3581 struct i915_gem_context *ctx,
3582 uint32_t *reg_state);
3537 3583
3538/* i915_gem_evict.c */ 3584/* i915_gem_evict.c */
3539int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3585int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3544,7 +3590,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3544int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 3590int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
3545 struct drm_mm_node *node, 3591 struct drm_mm_node *node,
3546 unsigned int flags); 3592 unsigned int flags);
3547int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3593int i915_gem_evict_vm(struct i915_address_space *vm);
3548 3594
3549/* belongs in i915_gem_gtt.h */ 3595/* belongs in i915_gem_gtt.h */
3550static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3596static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7b676fd1f075..7dcac3bfb771 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -49,10 +49,10 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
49 49
50static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 50static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
51{ 51{
52 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 52 if (obj->cache_dirty)
53 return false; 53 return false;
54 54
55 if (!i915_gem_object_is_coherent(obj)) 55 if (!obj->cache_coherent)
56 return true; 56 return true;
57 57
58 return obj->pin_display; 58 return obj->pin_display;
@@ -143,9 +143,9 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
143 struct i915_ggtt *ggtt = &dev_priv->ggtt; 143 struct i915_ggtt *ggtt = &dev_priv->ggtt;
144 struct drm_i915_gem_get_aperture *args = data; 144 struct drm_i915_gem_get_aperture *args = data;
145 struct i915_vma *vma; 145 struct i915_vma *vma;
146 size_t pinned; 146 u64 pinned;
147 147
148 pinned = 0; 148 pinned = ggtt->base.reserved;
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 150 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
151 if (i915_vma_is_pinned(vma)) 151 if (i915_vma_is_pinned(vma))
@@ -233,6 +233,14 @@ err_phys:
233 return st; 233 return st;
234} 234}
235 235
236static void __start_cpu_write(struct drm_i915_gem_object *obj)
237{
238 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
239 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
240 if (cpu_write_needs_clflush(obj))
241 obj->cache_dirty = true;
242}
243
236static void 244static void
237__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 245__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
238 struct sg_table *pages, 246 struct sg_table *pages,
@@ -245,11 +253,10 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
245 253
246 if (needs_clflush && 254 if (needs_clflush &&
247 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 255 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
248 !i915_gem_object_is_coherent(obj)) 256 !obj->cache_coherent)
249 drm_clflush_sg(pages); 257 drm_clflush_sg(pages);
250 258
251 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 259 __start_cpu_write(obj);
252 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
253} 260}
254 261
255static void 262static void
@@ -684,6 +691,12 @@ i915_gem_dumb_create(struct drm_file *file,
684 args->size, &args->handle); 691 args->size, &args->handle);
685} 692}
686 693
694static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
695{
696 return !(obj->cache_level == I915_CACHE_NONE ||
697 obj->cache_level == I915_CACHE_WT);
698}
699
687/** 700/**
688 * Creates a new mm object and returns a handle to it. 701 * Creates a new mm object and returns a handle to it.
689 * @dev: drm device pointer 702 * @dev: drm device pointer
@@ -753,6 +766,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
753 case I915_GEM_DOMAIN_CPU: 766 case I915_GEM_DOMAIN_CPU:
754 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 767 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
755 break; 768 break;
769
770 case I915_GEM_DOMAIN_RENDER:
771 if (gpu_write_needs_clflush(obj))
772 obj->cache_dirty = true;
773 break;
756 } 774 }
757 775
758 obj->base.write_domain = 0; 776 obj->base.write_domain = 0;
@@ -838,8 +856,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
838 if (ret) 856 if (ret)
839 return ret; 857 return ret;
840 858
841 if (i915_gem_object_is_coherent(obj) || 859 if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
842 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
843 ret = i915_gem_object_set_to_cpu_domain(obj, false); 860 ret = i915_gem_object_set_to_cpu_domain(obj, false);
844 if (ret) 861 if (ret)
845 goto err_unpin; 862 goto err_unpin;
@@ -854,7 +871,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
854 * optimizes for the case when the gpu will dirty the data 871 * optimizes for the case when the gpu will dirty the data
855 * anyway again before the next pread happens. 872 * anyway again before the next pread happens.
856 */ 873 */
857 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 874 if (!obj->cache_dirty &&
875 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
858 *needs_clflush = CLFLUSH_BEFORE; 876 *needs_clflush = CLFLUSH_BEFORE;
859 877
860out: 878out:
@@ -890,8 +908,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
890 if (ret) 908 if (ret)
891 return ret; 909 return ret;
892 910
893 if (i915_gem_object_is_coherent(obj) || 911 if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
894 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
895 ret = i915_gem_object_set_to_cpu_domain(obj, true); 912 ret = i915_gem_object_set_to_cpu_domain(obj, true);
896 if (ret) 913 if (ret)
897 goto err_unpin; 914 goto err_unpin;
@@ -906,14 +923,16 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
906 * This optimizes for the case when the gpu will use the data 923 * This optimizes for the case when the gpu will use the data
907 * right away and we therefore have to clflush anyway. 924 * right away and we therefore have to clflush anyway.
908 */ 925 */
909 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 926 if (!obj->cache_dirty) {
910 *needs_clflush |= CLFLUSH_AFTER; 927 *needs_clflush |= CLFLUSH_AFTER;
911 928
912 /* Same trick applies to invalidate partially written cachelines read 929 /*
913 * before writing. 930 * Same trick applies to invalidate partially written
914 */ 931 * cachelines read before writing.
915 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 932 */
916 *needs_clflush |= CLFLUSH_BEFORE; 933 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
934 *needs_clflush |= CLFLUSH_BEFORE;
935 }
917 936
918out: 937out:
919 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 938 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
@@ -2337,8 +2356,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2337 struct page *page; 2356 struct page *page;
2338 unsigned long last_pfn = 0; /* suppress gcc warning */ 2357 unsigned long last_pfn = 0; /* suppress gcc warning */
2339 unsigned int max_segment; 2358 unsigned int max_segment;
2359 gfp_t noreclaim;
2340 int ret; 2360 int ret;
2341 gfp_t gfp;
2342 2361
2343 /* Assert that the object is not currently in any GPU domain. As it 2362 /* Assert that the object is not currently in any GPU domain. As it
2344 * wasn't in the GTT, there shouldn't be any way it could have been in 2363 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2367,22 +2386,30 @@ rebuild_st:
2367 * Fail silently without starting the shrinker 2386 * Fail silently without starting the shrinker
2368 */ 2387 */
2369 mapping = obj->base.filp->f_mapping; 2388 mapping = obj->base.filp->f_mapping;
2370 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); 2389 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2371 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2390 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2391
2372 sg = st->sgl; 2392 sg = st->sgl;
2373 st->nents = 0; 2393 st->nents = 0;
2374 for (i = 0; i < page_count; i++) { 2394 for (i = 0; i < page_count; i++) {
2375 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2395 const unsigned int shrink[] = {
2376 if (unlikely(IS_ERR(page))) { 2396 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2377 i915_gem_shrink(dev_priv, 2397 0,
2378 page_count, 2398 }, *s = shrink;
2379 I915_SHRINK_BOUND | 2399 gfp_t gfp = noreclaim;
2380 I915_SHRINK_UNBOUND | 2400
2381 I915_SHRINK_PURGEABLE); 2401 do {
2382 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2402 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2383 } 2403 if (likely(!IS_ERR(page)))
2384 if (unlikely(IS_ERR(page))) { 2404 break;
2385 gfp_t reclaim; 2405
2406 if (!*s) {
2407 ret = PTR_ERR(page);
2408 goto err_sg;
2409 }
2410
2411 i915_gem_shrink(dev_priv, 2 * page_count, *s++);
2412 cond_resched();
2386 2413
2387 /* We've tried hard to allocate the memory by reaping 2414 /* We've tried hard to allocate the memory by reaping
2388 * our own buffer, now let the real VM do its job and 2415 * our own buffer, now let the real VM do its job and
@@ -2392,15 +2419,26 @@ rebuild_st:
2392 * defer the oom here by reporting the ENOMEM back 2419 * defer the oom here by reporting the ENOMEM back
2393 * to userspace. 2420 * to userspace.
2394 */ 2421 */
2395 reclaim = mapping_gfp_mask(mapping); 2422 if (!*s) {
2396 reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ 2423 /* reclaim and warn, but no oom */
2397 2424 gfp = mapping_gfp_mask(mapping);
2398 page = shmem_read_mapping_page_gfp(mapping, i, reclaim); 2425
2399 if (IS_ERR(page)) { 2426 /* Our bo are always dirty and so we require
2400 ret = PTR_ERR(page); 2427 * kswapd to reclaim our pages (direct reclaim
2401 goto err_sg; 2428 * does not effectively begin pageout of our
2429 * buffers on its own). However, direct reclaim
2430 * only waits for kswapd when under allocation
2431 * congestion. So as a result __GFP_RECLAIM is
2432 * unreliable and fails to actually reclaim our
2433 * dirty pages -- unless you try over and over
2434 * again with !__GFP_NORETRY. However, we still
2435 * want to fail this allocation rather than
2436 * trigger the out-of-memory killer and for
2437 * this we want the future __GFP_MAYFAIL.
2438 */
2402 } 2439 }
2403 } 2440 } while (1);
2441
2404 if (!i || 2442 if (!i ||
2405 sg->length >= max_segment || 2443 sg->length >= max_segment ||
2406 page_to_pfn(page) != last_pfn + 1) { 2444 page_to_pfn(page) != last_pfn + 1) {
@@ -3223,6 +3261,10 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3223 if (vma->vm->file == fpriv) 3261 if (vma->vm->file == fpriv)
3224 i915_vma_close(vma); 3262 i915_vma_close(vma);
3225 3263
3264 vma = obj->vma_hashed;
3265 if (vma && vma->ctx->file_priv == fpriv)
3266 i915_vma_unlink_ctx(vma);
3267
3226 if (i915_gem_object_is_active(obj) && 3268 if (i915_gem_object_is_active(obj) &&
3227 !i915_gem_object_has_active_reference(obj)) { 3269 !i915_gem_object_has_active_reference(obj)) {
3228 i915_gem_object_set_active_reference(obj); 3270 i915_gem_object_set_active_reference(obj);
@@ -3376,10 +3418,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3376 3418
3377static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 3419static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3378{ 3420{
3379 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty) 3421 /*
3380 return; 3422 * We manually flush the CPU domain so that we can override and
3381 3423 * force the flush for the display, and perform it asyncrhonously.
3382 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3424 */
3425 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3426 if (obj->cache_dirty)
3427 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3383 obj->base.write_domain = 0; 3428 obj->base.write_domain = 0;
3384} 3429}
3385 3430
@@ -3638,13 +3683,11 @@ restart:
3638 } 3683 }
3639 } 3684 }
3640 3685
3641 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
3642 i915_gem_object_is_coherent(obj))
3643 obj->cache_dirty = true;
3644
3645 list_for_each_entry(vma, &obj->vma_list, obj_link) 3686 list_for_each_entry(vma, &obj->vma_list, obj_link)
3646 vma->node.color = cache_level; 3687 vma->node.color = cache_level;
3647 obj->cache_level = cache_level; 3688 obj->cache_level = cache_level;
3689 obj->cache_coherent = i915_gem_object_is_coherent(obj);
3690 obj->cache_dirty = true; /* Always invalidate stale cachelines */
3648 3691
3649 return 0; 3692 return 0;
3650} 3693}
@@ -3866,9 +3909,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3866 if (ret) 3909 if (ret)
3867 return ret; 3910 return ret;
3868 3911
3869 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3870 return 0;
3871
3872 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3912 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3873 3913
3874 /* Flush the CPU cache if it's still invalid. */ 3914 /* Flush the CPU cache if it's still invalid. */
@@ -3880,15 +3920,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3880 /* It should now be out of any other write domains, and we can update 3920 /* It should now be out of any other write domains, and we can update
3881 * the domain values for our changes. 3921 * the domain values for our changes.
3882 */ 3922 */
3883 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3923 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3884 3924
3885 /* If we're writing through the CPU, then the GPU read domains will 3925 /* If we're writing through the CPU, then the GPU read domains will
3886 * need to be invalidated at next use. 3926 * need to be invalidated at next use.
3887 */ 3927 */
3888 if (write) { 3928 if (write)
3889 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3929 __start_cpu_write(obj);
3890 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3891 }
3892 3930
3893 return 0; 3931 return 0;
3894} 3932}
@@ -4220,7 +4258,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4220 4258
4221 INIT_LIST_HEAD(&obj->global_link); 4259 INIT_LIST_HEAD(&obj->global_link);
4222 INIT_LIST_HEAD(&obj->userfault_link); 4260 INIT_LIST_HEAD(&obj->userfault_link);
4223 INIT_LIST_HEAD(&obj->obj_exec_link);
4224 INIT_LIST_HEAD(&obj->vma_list); 4261 INIT_LIST_HEAD(&obj->vma_list);
4225 INIT_LIST_HEAD(&obj->batch_pool_link); 4262 INIT_LIST_HEAD(&obj->batch_pool_link);
4226 4263
@@ -4285,6 +4322,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4285 4322
4286 mapping = obj->base.filp->f_mapping; 4323 mapping = obj->base.filp->f_mapping;
4287 mapping_set_gfp_mask(mapping, mask); 4324 mapping_set_gfp_mask(mapping, mask);
4325 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4288 4326
4289 i915_gem_object_init(obj, &i915_gem_object_ops); 4327 i915_gem_object_init(obj, &i915_gem_object_ops);
4290 4328
@@ -4308,6 +4346,9 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4308 } else 4346 } else
4309 obj->cache_level = I915_CACHE_NONE; 4347 obj->cache_level = I915_CACHE_NONE;
4310 4348
4349 obj->cache_coherent = i915_gem_object_is_coherent(obj);
4350 obj->cache_dirty = !obj->cache_coherent;
4351
4311 trace_i915_gem_object_create(obj); 4352 trace_i915_gem_object_create(obj);
4312 4353
4313 return obj; 4354 return obj;
@@ -4356,7 +4397,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
4356 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4397 GEM_BUG_ON(i915_gem_object_is_active(obj));
4357 list_for_each_entry_safe(vma, vn, 4398 list_for_each_entry_safe(vma, vn,
4358 &obj->vma_list, obj_link) { 4399 &obj->vma_list, obj_link) {
4359 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4360 GEM_BUG_ON(i915_vma_is_active(vma)); 4400 GEM_BUG_ON(i915_vma_is_active(vma));
4361 vma->flags &= ~I915_VMA_PIN_MASK; 4401 vma->flags &= ~I915_VMA_PIN_MASK;
4362 i915_vma_close(vma); 4402 i915_vma_close(vma);
@@ -4763,7 +4803,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
4763 */ 4803 */
4764 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4804 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4765 4805
4766 i915_gem_init_userptr(dev_priv); 4806 ret = i915_gem_init_userptr(dev_priv);
4807 if (ret)
4808 goto out_unlock;
4767 4809
4768 ret = i915_gem_init_ggtt(dev_priv); 4810 ret = i915_gem_init_ggtt(dev_priv);
4769 if (ret) 4811 if (ret)
@@ -4974,10 +5016,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4974 5016
4975 mutex_lock(&dev_priv->drm.struct_mutex); 5017 mutex_lock(&dev_priv->drm.struct_mutex);
4976 for (p = phases; *p; p++) { 5018 for (p = phases; *p; p++) {
4977 list_for_each_entry(obj, *p, global_link) { 5019 list_for_each_entry(obj, *p, global_link)
4978 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 5020 __start_cpu_write(obj);
4979 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4980 }
4981 } 5021 }
4982 mutex_unlock(&dev_priv->drm.struct_mutex); 5022 mutex_unlock(&dev_priv->drm.struct_mutex);
4983 5023
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 41aa598c4f3b..c93005c2e0fb 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -114,12 +114,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
114 list_for_each_entry(obj, list, batch_pool_link) { 114 list_for_each_entry(obj, list, batch_pool_link) {
115 /* The batches are strictly LRU ordered */ 115 /* The batches are strictly LRU ordered */
116 if (i915_gem_object_is_active(obj)) { 116 if (i915_gem_object_is_active(obj)) {
117 if (!reservation_object_test_signaled_rcu(obj->resv, 117 struct reservation_object *resv = obj->resv;
118 true)) 118
119 if (!reservation_object_test_signaled_rcu(resv, true))
119 break; 120 break;
120 121
121 i915_gem_retire_requests(pool->engine->i915); 122 i915_gem_retire_requests(pool->engine->i915);
122 GEM_BUG_ON(i915_gem_object_is_active(obj)); 123 GEM_BUG_ON(i915_gem_object_is_active(obj));
124
125 /*
126 * The object is now idle, clear the array of shared
127 * fences before we add a new request. Although, we
128 * remain on the same engine, we may be on a different
129 * timeline and so may continually grow the array,
130 * trapping a reference to all the old fences, rather
131 * than replace the existing fence.
132 */
133 if (rcu_access_pointer(resv->fence)) {
134 reservation_object_lock(resv, NULL);
135 reservation_object_add_excl_fence(resv, NULL);
136 reservation_object_unlock(resv);
137 }
123 } 138 }
124 139
125 GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv, 140 GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index ffac7a1f0caf..152f16c11878 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -71,8 +71,6 @@ static const struct dma_fence_ops i915_clflush_ops = {
71static void __i915_do_clflush(struct drm_i915_gem_object *obj) 71static void __i915_do_clflush(struct drm_i915_gem_object *obj)
72{ 72{
73 drm_clflush_sg(obj->mm.pages); 73 drm_clflush_sg(obj->mm.pages);
74 obj->cache_dirty = false;
75
76 intel_fb_obj_flush(obj, ORIGIN_CPU); 74 intel_fb_obj_flush(obj, ORIGIN_CPU);
77} 75}
78 76
@@ -81,9 +79,6 @@ static void i915_clflush_work(struct work_struct *work)
81 struct clflush *clflush = container_of(work, typeof(*clflush), work); 79 struct clflush *clflush = container_of(work, typeof(*clflush), work);
82 struct drm_i915_gem_object *obj = clflush->obj; 80 struct drm_i915_gem_object *obj = clflush->obj;
83 81
84 if (!obj->cache_dirty)
85 goto out;
86
87 if (i915_gem_object_pin_pages(obj)) { 82 if (i915_gem_object_pin_pages(obj)) {
88 DRM_ERROR("Failed to acquire obj->pages for clflushing\n"); 83 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
89 goto out; 84 goto out;
@@ -131,10 +126,10 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
131 * anything not backed by physical memory we consider to be always 126 * anything not backed by physical memory we consider to be always
132 * coherent and not need clflushing. 127 * coherent and not need clflushing.
133 */ 128 */
134 if (!i915_gem_object_has_struct_page(obj)) 129 if (!i915_gem_object_has_struct_page(obj)) {
130 obj->cache_dirty = false;
135 return; 131 return;
136 132 }
137 obj->cache_dirty = true;
138 133
139 /* If the GPU is snooping the contents of the CPU cache, 134 /* If the GPU is snooping the contents of the CPU cache,
140 * we do not need to manually clear the CPU cache lines. However, 135 * we do not need to manually clear the CPU cache lines. However,
@@ -144,7 +139,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
144 * snooping behaviour occurs naturally as the result of our domain 139 * snooping behaviour occurs naturally as the result of our domain
145 * tracking. 140 * tracking.
146 */ 141 */
147 if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj)) 142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
148 return; 143 return;
149 144
150 trace_i915_gem_object_clflush(obj); 145 trace_i915_gem_object_clflush(obj);
@@ -153,6 +148,8 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
153 if (!(flags & I915_CLFLUSH_SYNC)) 148 if (!(flags & I915_CLFLUSH_SYNC))
154 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL); 149 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
155 if (clflush) { 150 if (clflush) {
151 GEM_BUG_ON(!obj->cache_dirty);
152
156 dma_fence_init(&clflush->dma, 153 dma_fence_init(&clflush->dma,
157 &i915_clflush_ops, 154 &i915_clflush_ops,
158 &clflush_lock, 155 &clflush_lock,
@@ -180,4 +177,6 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
180 } else { 177 } else {
181 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 178 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
182 } 179 }
180
181 obj->cache_dirty = false;
183} 182}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c5d1666d7071..39ed58a21fc1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -85,6 +85,7 @@
85 * 85 *
86 */ 86 */
87 87
88#include <linux/log2.h>
88#include <drm/drmP.h> 89#include <drm/drmP.h>
89#include <drm/i915_drm.h> 90#include <drm/i915_drm.h>
90#include "i915_drv.h" 91#include "i915_drv.h"
@@ -92,6 +93,71 @@
92 93
93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 94#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
94 95
96/* Initial size (as log2) to preallocate the handle->object hashtable */
97#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
98
99static void resize_vma_ht(struct work_struct *work)
100{
101 struct i915_gem_context_vma_lut *lut =
102 container_of(work, typeof(*lut), resize);
103 unsigned int bits, new_bits, size, i;
104 struct hlist_head *new_ht;
105
106 GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
107
108 bits = 1 + ilog2(4*lut->ht_count/3 + 1);
109 new_bits = min_t(unsigned int,
110 max(bits, VMA_HT_BITS),
111 sizeof(unsigned int) * BITS_PER_BYTE - 1);
112 if (new_bits == lut->ht_bits)
113 goto out;
114
115 new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
116 if (!new_ht)
117 new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
118 if (!new_ht)
119 /* Pretend resize succeeded and stop calling us for a bit! */
120 goto out;
121
122 size = BIT(lut->ht_bits);
123 for (i = 0; i < size; i++) {
124 struct i915_vma *vma;
125 struct hlist_node *tmp;
126
127 hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
128 hlist_add_head(&vma->ctx_node,
129 &new_ht[hash_32(vma->ctx_handle,
130 new_bits)]);
131 }
132 kvfree(lut->ht);
133 lut->ht = new_ht;
134 lut->ht_bits = new_bits;
135out:
136 smp_store_release(&lut->ht_size, BIT(bits));
137 GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
138}
139
140static void vma_lut_free(struct i915_gem_context *ctx)
141{
142 struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
143 unsigned int i, size;
144
145 if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
146 cancel_work_sync(&lut->resize);
147
148 size = BIT(lut->ht_bits);
149 for (i = 0; i < size; i++) {
150 struct i915_vma *vma;
151
152 hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
153 vma->obj->vma_hashed = NULL;
154 vma->ctx = NULL;
155 i915_vma_put(vma);
156 }
157 }
158 kvfree(lut->ht);
159}
160
95void i915_gem_context_free(struct kref *ctx_ref) 161void i915_gem_context_free(struct kref *ctx_ref)
96{ 162{
97 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 163 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -101,6 +167,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
101 trace_i915_context_free(ctx); 167 trace_i915_context_free(ctx);
102 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 168 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
103 169
170 vma_lut_free(ctx);
104 i915_ppgtt_put(ctx->ppgtt); 171 i915_ppgtt_put(ctx->ppgtt);
105 172
106 for (i = 0; i < I915_NUM_ENGINES; i++) { 173 for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -118,6 +185,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
118 185
119 kfree(ctx->name); 186 kfree(ctx->name);
120 put_pid(ctx->pid); 187 put_pid(ctx->pid);
188
121 list_del(&ctx->link); 189 list_del(&ctx->link);
122 190
123 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); 191 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@@ -201,13 +269,24 @@ __create_hw_context(struct drm_i915_private *dev_priv,
201 ctx->i915 = dev_priv; 269 ctx->i915 = dev_priv;
202 ctx->priority = I915_PRIORITY_NORMAL; 270 ctx->priority = I915_PRIORITY_NORMAL;
203 271
272 ctx->vma_lut.ht_bits = VMA_HT_BITS;
273 ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
274 BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
275 ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
276 sizeof(*ctx->vma_lut.ht),
277 GFP_KERNEL);
278 if (!ctx->vma_lut.ht)
279 goto err_out;
280
281 INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
282
204 /* Default context will never have a file_priv */ 283 /* Default context will never have a file_priv */
205 ret = DEFAULT_CONTEXT_HANDLE; 284 ret = DEFAULT_CONTEXT_HANDLE;
206 if (file_priv) { 285 if (file_priv) {
207 ret = idr_alloc(&file_priv->context_idr, ctx, 286 ret = idr_alloc(&file_priv->context_idr, ctx,
208 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); 287 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
209 if (ret < 0) 288 if (ret < 0)
210 goto err_out; 289 goto err_lut;
211 } 290 }
212 ctx->user_handle = ret; 291 ctx->user_handle = ret;
213 292
@@ -248,6 +327,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
248err_pid: 327err_pid:
249 put_pid(ctx->pid); 328 put_pid(ctx->pid);
250 idr_remove(&file_priv->context_idr, ctx->user_handle); 329 idr_remove(&file_priv->context_idr, ctx->user_handle);
330err_lut:
331 kvfree(ctx->vma_lut.ht);
251err_out: 332err_out:
252 context_close(ctx); 333 context_close(ctx);
253 return ERR_PTR(ret); 334 return ERR_PTR(ret);
@@ -1034,9 +1115,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1034 if (args->flags || args->pad) 1115 if (args->flags || args->pad)
1035 return -EINVAL; 1116 return -EINVAL;
1036 1117
1037 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1038 return -EPERM;
1039
1040 ret = i915_mutex_lock_interruptible(dev); 1118 ret = i915_mutex_lock_interruptible(dev);
1041 if (ret) 1119 if (ret)
1042 return ret; 1120 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 4af2ab94558b..82c99ba92ad3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -143,6 +143,32 @@ struct i915_gem_context {
143 /** ggtt_offset_bias: placement restriction for context objects */ 143 /** ggtt_offset_bias: placement restriction for context objects */
144 u32 ggtt_offset_bias; 144 u32 ggtt_offset_bias;
145 145
146 struct i915_gem_context_vma_lut {
147 /** ht_size: last request size to allocate the hashtable for. */
148 unsigned int ht_size;
149#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
150 /** ht_bits: real log2(size) of hashtable. */
151 unsigned int ht_bits;
152 /** ht_count: current number of entries inside the hashtable */
153 unsigned int ht_count;
154
155 /** ht: the array of buckets comprising the simple hashtable */
156 struct hlist_head *ht;
157
158 /**
159 * resize: After an execbuf completes, we check the load factor
160 * of the hashtable. If the hashtable is too full, or too empty,
161 * we schedule a task to resize the hashtable. During the
162 * resize, the entries are moved between different buckets and
163 * so we cannot simultaneously read the hashtable as it is
164 * being resized (unlike rhashtable). Therefore we treat the
165 * active work as a strong barrier, pausing a subsequent
166 * execbuf to wait for the resize worker to complete, if
167 * required.
168 */
169 struct work_struct resize;
170 } vma_lut;
171
146 /** engine: per-engine logical HW state */ 172 /** engine: per-engine logical HW state */
147 struct intel_context { 173 struct intel_context {
148 struct i915_vma *state; 174 struct i915_vma *state;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 51e365f70464..a193f1b36c67 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
50 return true; 50 return true;
51} 51}
52 52
53static int ggtt_flush(struct drm_i915_private *i915)
54{
55 int err;
56
57 /* Not everything in the GGTT is tracked via vma (otherwise we
58 * could evict as required with minimal stalling) so we are forced
59 * to idle the GPU and explicitly retire outstanding requests in
60 * the hopes that we can then remove contexts and the like only
61 * bound by their active reference.
62 */
63 err = i915_gem_switch_to_kernel_context(i915);
64 if (err)
65 return err;
66
67 err = i915_gem_wait_for_idle(i915,
68 I915_WAIT_INTERRUPTIBLE |
69 I915_WAIT_LOCKED);
70 if (err)
71 return err;
72
73 return 0;
74}
75
53static bool 76static bool
54mark_free(struct drm_mm_scan *scan, 77mark_free(struct drm_mm_scan *scan,
55 struct i915_vma *vma, 78 struct i915_vma *vma,
@@ -59,13 +82,10 @@ mark_free(struct drm_mm_scan *scan,
59 if (i915_vma_is_pinned(vma)) 82 if (i915_vma_is_pinned(vma))
60 return false; 83 return false;
61 84
62 if (WARN_ON(!list_empty(&vma->exec_list)))
63 return false;
64
65 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) 85 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
66 return false; 86 return false;
67 87
68 list_add(&vma->exec_list, unwind); 88 list_add(&vma->evict_link, unwind);
69 return drm_mm_scan_add_block(scan, &vma->node); 89 return drm_mm_scan_add_block(scan, &vma->node);
70} 90}
71 91
@@ -157,11 +177,9 @@ search_again:
157 } while (*++phase); 177 } while (*++phase);
158 178
159 /* Nothing found, clean up and bail out! */ 179 /* Nothing found, clean up and bail out! */
160 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 180 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
161 ret = drm_mm_scan_remove_block(&scan, &vma->node); 181 ret = drm_mm_scan_remove_block(&scan, &vma->node);
162 BUG_ON(ret); 182 BUG_ON(ret);
163
164 INIT_LIST_HEAD(&vma->exec_list);
165 } 183 }
166 184
167 /* Can we unpin some objects such as idle hw contents, 185 /* Can we unpin some objects such as idle hw contents,
@@ -180,19 +198,7 @@ search_again:
180 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; 198 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
181 } 199 }
182 200
183 /* Not everything in the GGTT is tracked via vma (otherwise we 201 ret = ggtt_flush(dev_priv);
184 * could evict as required with minimal stalling) so we are forced
185 * to idle the GPU and explicitly retire outstanding requests in
186 * the hopes that we can then remove contexts and the like only
187 * bound by their active reference.
188 */
189 ret = i915_gem_switch_to_kernel_context(dev_priv);
190 if (ret)
191 return ret;
192
193 ret = i915_gem_wait_for_idle(dev_priv,
194 I915_WAIT_INTERRUPTIBLE |
195 I915_WAIT_LOCKED);
196 if (ret) 202 if (ret)
197 return ret; 203 return ret;
198 204
@@ -205,21 +211,16 @@ found:
205 * calling unbind (which may remove the active reference 211 * calling unbind (which may remove the active reference
206 * of any of our objects, thus corrupting the list). 212 * of any of our objects, thus corrupting the list).
207 */ 213 */
208 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 214 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
209 if (drm_mm_scan_remove_block(&scan, &vma->node)) 215 if (drm_mm_scan_remove_block(&scan, &vma->node))
210 __i915_vma_pin(vma); 216 __i915_vma_pin(vma);
211 else 217 else
212 list_del_init(&vma->exec_list); 218 list_del(&vma->evict_link);
213 } 219 }
214 220
215 /* Unbinding will emit any required flushes */ 221 /* Unbinding will emit any required flushes */
216 ret = 0; 222 ret = 0;
217 while (!list_empty(&eviction_list)) { 223 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
218 vma = list_first_entry(&eviction_list,
219 struct i915_vma,
220 exec_list);
221
222 list_del_init(&vma->exec_list);
223 __i915_vma_unpin(vma); 224 __i915_vma_unpin(vma);
224 if (ret == 0) 225 if (ret == 0)
225 ret = i915_vma_unbind(vma); 226 ret = i915_vma_unbind(vma);
@@ -315,7 +316,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
315 } 316 }
316 317
317 /* Overlap of objects in the same batch? */ 318 /* Overlap of objects in the same batch? */
318 if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) { 319 if (i915_vma_is_pinned(vma)) {
319 ret = -ENOSPC; 320 ret = -ENOSPC;
320 if (vma->exec_entry && 321 if (vma->exec_entry &&
321 vma->exec_entry->flags & EXEC_OBJECT_PINNED) 322 vma->exec_entry->flags & EXEC_OBJECT_PINNED)
@@ -332,11 +333,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
332 * reference) another in our eviction list. 333 * reference) another in our eviction list.
333 */ 334 */
334 __i915_vma_pin(vma); 335 __i915_vma_pin(vma);
335 list_add(&vma->exec_list, &eviction_list); 336 list_add(&vma->evict_link, &eviction_list);
336 } 337 }
337 338
338 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { 339 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
339 list_del_init(&vma->exec_list);
340 __i915_vma_unpin(vma); 340 __i915_vma_unpin(vma);
341 if (ret == 0) 341 if (ret == 0)
342 ret = i915_vma_unbind(vma); 342 ret = i915_vma_unbind(vma);
@@ -348,10 +348,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
348/** 348/**
349 * i915_gem_evict_vm - Evict all idle vmas from a vm 349 * i915_gem_evict_vm - Evict all idle vmas from a vm
350 * @vm: Address space to cleanse 350 * @vm: Address space to cleanse
351 * @do_idle: Boolean directing whether to idle first.
352 * 351 *
353 * This function evicts all idles vmas from a vm. If all unpinned vmas should be 352 * This function evicts all vmas from a vm.
354 * evicted the @do_idle needs to be set to true.
355 * 353 *
356 * This is used by the execbuf code as a last-ditch effort to defragment the 354 * This is used by the execbuf code as a last-ditch effort to defragment the
357 * address space. 355 * address space.
@@ -359,37 +357,50 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
359 * To clarify: This is for freeing up virtual address space, not for freeing 357 * To clarify: This is for freeing up virtual address space, not for freeing
360 * memory in e.g. the shrinker. 358 * memory in e.g. the shrinker.
361 */ 359 */
362int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 360int i915_gem_evict_vm(struct i915_address_space *vm)
363{ 361{
362 struct list_head *phases[] = {
363 &vm->inactive_list,
364 &vm->active_list,
365 NULL
366 }, **phase;
367 struct list_head eviction_list;
364 struct i915_vma *vma, *next; 368 struct i915_vma *vma, *next;
365 int ret; 369 int ret;
366 370
367 lockdep_assert_held(&vm->i915->drm.struct_mutex); 371 lockdep_assert_held(&vm->i915->drm.struct_mutex);
368 trace_i915_gem_evict_vm(vm); 372 trace_i915_gem_evict_vm(vm);
369 373
370 if (do_idle) { 374 /* Switch back to the default context in order to unpin
371 struct drm_i915_private *dev_priv = vm->i915; 375 * the existing context objects. However, such objects only
372 376 * pin themselves inside the global GTT and performing the
373 if (i915_is_ggtt(vm)) { 377 * switch otherwise is ineffective.
374 ret = i915_gem_switch_to_kernel_context(dev_priv); 378 */
375 if (ret) 379 if (i915_is_ggtt(vm)) {
376 return ret; 380 ret = ggtt_flush(vm->i915);
377 }
378
379 ret = i915_gem_wait_for_idle(dev_priv,
380 I915_WAIT_INTERRUPTIBLE |
381 I915_WAIT_LOCKED);
382 if (ret) 381 if (ret)
383 return ret; 382 return ret;
384
385 WARN_ON(!list_empty(&vm->active_list));
386 } 383 }
387 384
388 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) 385 INIT_LIST_HEAD(&eviction_list);
389 if (!i915_vma_is_pinned(vma)) 386 phase = phases;
390 WARN_ON(i915_vma_unbind(vma)); 387 do {
388 list_for_each_entry(vma, *phase, vm_link) {
389 if (i915_vma_is_pinned(vma))
390 continue;
391 391
392 return 0; 392 __i915_vma_pin(vma);
393 list_add(&vma->evict_link, &eviction_list);
394 }
395 } while (*++phase);
396
397 ret = 0;
398 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
399 __i915_vma_unpin(vma);
400 if (ret == 0)
401 ret = i915_vma_unbind(vma);
402 }
403 return ret;
393} 404}
394 405
395#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 406#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 04211c970b9f..eb46dfa374a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -40,143 +40,726 @@
40#include "intel_drv.h" 40#include "intel_drv.h"
41#include "intel_frontbuffer.h" 41#include "intel_frontbuffer.h"
42 42
43#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */ 43enum {
44 FORCE_CPU_RELOC = 1,
45 FORCE_GTT_RELOC,
46 FORCE_GPU_RELOC,
47#define DBG_FORCE_RELOC 0 /* choose one of the above! */
48};
49
50#define __EXEC_OBJECT_HAS_REF BIT(31)
51#define __EXEC_OBJECT_HAS_PIN BIT(30)
52#define __EXEC_OBJECT_HAS_FENCE BIT(29)
53#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
54#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
55#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
56#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
44 57
45#define __EXEC_OBJECT_HAS_PIN (1<<31) 58#define __EXEC_HAS_RELOC BIT(31)
46#define __EXEC_OBJECT_HAS_FENCE (1<<30) 59#define __EXEC_VALIDATED BIT(30)
47#define __EXEC_OBJECT_NEEDS_MAP (1<<29) 60#define UPDATE PIN_OFFSET_FIXED
48#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
49#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
50 61
51#define BATCH_OFFSET_BIAS (256*1024) 62#define BATCH_OFFSET_BIAS (256*1024)
52 63
53struct i915_execbuffer_params { 64#define __I915_EXEC_ILLEGAL_FLAGS \
54 struct drm_device *dev; 65 (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
55 struct drm_file *file; 66
56 struct i915_vma *batch; 67/**
57 u32 dispatch_flags; 68 * DOC: User command execution
58 u32 args_batch_start_offset; 69 *
59 struct intel_engine_cs *engine; 70 * Userspace submits commands to be executed on the GPU as an instruction
60 struct i915_gem_context *ctx; 71 * stream within a GEM object we call a batchbuffer. This instructions may
61 struct drm_i915_gem_request *request; 72 * refer to other GEM objects containing auxiliary state such as kernels,
62}; 73 * samplers, render targets and even secondary batchbuffers. Userspace does
74 * not know where in the GPU memory these objects reside and so before the
75 * batchbuffer is passed to the GPU for execution, those addresses in the
76 * batchbuffer and auxiliary objects are updated. This is known as relocation,
77 * or patching. To try and avoid having to relocate each object on the next
78 * execution, userspace is told the location of those objects in this pass,
79 * but this remains just a hint as the kernel may choose a new location for
80 * any object in the future.
81 *
82 * Processing an execbuf ioctl is conceptually split up into a few phases.
83 *
84 * 1. Validation - Ensure all the pointers, handles and flags are valid.
85 * 2. Reservation - Assign GPU address space for every object
86 * 3. Relocation - Update any addresses to point to the final locations
87 * 4. Serialisation - Order the request with respect to its dependencies
88 * 5. Construction - Construct a request to execute the batchbuffer
89 * 6. Submission (at some point in the future execution)
90 *
91 * Reserving resources for the execbuf is the most complicated phase. We
92 * neither want to have to migrate the object in the address space, nor do
93 * we want to have to update any relocations pointing to this object. Ideally,
94 * we want to leave the object where it is and for all the existing relocations
95 * to match. If the object is given a new address, or if userspace thinks the
96 * object is elsewhere, we have to parse all the relocation entries and update
97 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
98 * all the target addresses in all of its objects match the value in the
99 * relocation entries and that they all match the presumed offsets given by the
100 * list of execbuffer objects. Using this knowledge, we know that if we haven't
101 * moved any buffers, all the relocation entries are valid and we can skip
102 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
103 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
104 *
105 * The addresses written in the objects must match the corresponding
106 * reloc.presumed_offset which in turn must match the corresponding
107 * execobject.offset.
108 *
109 * Any render targets written to in the batch must be flagged with
110 * EXEC_OBJECT_WRITE.
111 *
112 * To avoid stalling, execobject.offset should match the current
113 * address of that object within the active context.
114 *
115 * The reservation is done is multiple phases. First we try and keep any
116 * object already bound in its current location - so as long as meets the
117 * constraints imposed by the new execbuffer. Any object left unbound after the
118 * first pass is then fitted into any available idle space. If an object does
119 * not fit, all objects are removed from the reservation and the process rerun
120 * after sorting the objects into a priority order (more difficult to fit
121 * objects are tried first). Failing that, the entire VM is cleared and we try
122 * to fit the execbuf once last time before concluding that it simply will not
123 * fit.
124 *
125 * A small complication to all of this is that we allow userspace not only to
126 * specify an alignment and a size for the object in the address space, but
127 * we also allow userspace to specify the exact offset. This objects are
128 * simpler to place (the location is known a priori) all we have to do is make
129 * sure the space is available.
130 *
131 * Once all the objects are in place, patching up the buried pointers to point
132 * to the final locations is a fairly simple job of walking over the relocation
133 * entry arrays, looking up the right address and rewriting the value into
134 * the object. Simple! ... The relocation entries are stored in user memory
135 * and so to access them we have to copy them into a local buffer. That copy
136 * has to avoid taking any pagefaults as they may lead back to a GEM object
137 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
138 * the relocation into multiple passes. First we try to do everything within an
139 * atomic context (avoid the pagefaults) which requires that we never wait. If
140 * we detect that we may wait, or if we need to fault, then we have to fallback
141 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
142 * bells yet?) Dropping the mutex means that we lose all the state we have
143 * built up so far for the execbuf and we must reset any global data. However,
144 * we do leave the objects pinned in their final locations - which is a
145 * potential issue for concurrent execbufs. Once we have left the mutex, we can
146 * allocate and copy all the relocation entries into a large array at our
147 * leisure, reacquire the mutex, reclaim all the objects and other state and
148 * then proceed to update any incorrect addresses with the objects.
149 *
150 * As we process the relocation entries, we maintain a record of whether the
151 * object is being written to. Using NORELOC, we expect userspace to provide
152 * this information instead. We also check whether we can skip the relocation
153 * by comparing the expected value inside the relocation entry with the target's
154 * final address. If they differ, we have to map the current object and rewrite
155 * the 4 or 8 byte pointer within.
156 *
157 * Serialising an execbuf is quite simple according to the rules of the GEM
158 * ABI. Execution within each context is ordered by the order of submission.
159 * Writes to any GEM object are in order of submission and are exclusive. Reads
160 * from a GEM object are unordered with respect to other reads, but ordered by
161 * writes. A write submitted after a read cannot occur before the read, and
162 * similarly any read submitted after a write cannot occur before the write.
163 * Writes are ordered between engines such that only one write occurs at any
164 * time (completing any reads beforehand) - using semaphores where available
165 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
166 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
167 * reads before starting, and any read (either using set-domain or pread) must
168 * flush all GPU writes before starting. (Note we only employ a barrier before,
169 * we currently rely on userspace not concurrently starting a new execution
170 * whilst reading or writing to an object. This may be an advantage or not
171 * depending on how much you trust userspace not to shoot themselves in the
172 * foot.) Serialisation may just result in the request being inserted into
173 * a DAG awaiting its turn, but most simple is to wait on the CPU until
174 * all dependencies are resolved.
175 *
176 * After all of that, is just a matter of closing the request and handing it to
177 * the hardware (well, leaving it in a queue to be executed). However, we also
178 * offer the ability for batchbuffers to be run with elevated privileges so
179 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
180 * Before any batch is given extra privileges we first must check that it
181 * contains no nefarious instructions, we check that each instruction is from
182 * our whitelist and all registers are also from an allowed list. We first
183 * copy the user's batchbuffer to a shadow (so that the user doesn't have
184 * access to it, either by the CPU or GPU as we scan it) and then parse each
185 * instruction. If everything is ok, we set a flag telling the hardware to run
186 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
187 */
188
189struct i915_execbuffer {
190 struct drm_i915_private *i915; /** i915 backpointer */
191 struct drm_file *file; /** per-file lookup tables and limits */
192 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
193 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
194
195 struct intel_engine_cs *engine; /** engine to queue the request to */
196 struct i915_gem_context *ctx; /** context for building the request */
197 struct i915_address_space *vm; /** GTT and vma for the request */
198
199 struct drm_i915_gem_request *request; /** our request to build */
200 struct i915_vma *batch; /** identity of the batch obj/vma */
201
202 /** actual size of execobj[] as we may extend it for the cmdparser */
203 unsigned int buffer_count;
204
205 /** list of vma not yet bound during reservation phase */
206 struct list_head unbound;
207
208 /** list of vma that have execobj.relocation_count */
209 struct list_head relocs;
63 210
64struct eb_vmas { 211 /**
65 struct drm_i915_private *i915; 212 * Track the most recently used object for relocations, as we
66 struct list_head vmas; 213 * frequently have to perform multiple relocations within the same
67 int and; 214 * obj/page
68 union { 215 */
69 struct i915_vma *lut[0]; 216 struct reloc_cache {
70 struct hlist_head buckets[0]; 217 struct drm_mm_node node; /** temporary GTT binding */
71 }; 218 unsigned long vaddr; /** Current kmap address */
219 unsigned long page; /** Currently mapped page index */
220 unsigned int gen; /** Cached value of INTEL_GEN */
221 bool use_64bit_reloc : 1;
222 bool has_llc : 1;
223 bool has_fence : 1;
224 bool needs_unfenced : 1;
225
226 struct drm_i915_gem_request *rq;
227 u32 *rq_cmd;
228 unsigned int rq_size;
229 } reloc_cache;
230
231 u64 invalid_flags; /** Set of execobj.flags that are invalid */
232 u32 context_flags; /** Set of execobj.flags to insert from the ctx */
233
234 u32 batch_start_offset; /** Location within object of batch */
235 u32 batch_len; /** Length of batch within object */
236 u32 batch_flags; /** Flags composed for emit_bb_start() */
237
238 /**
239 * Indicate either the size of the hastable used to resolve
240 * relocation handles, or if negative that we are using a direct
241 * index into the execobj[].
242 */
243 int lut_size;
244 struct hlist_head *buckets; /** ht for relocation handles */
72}; 245};
73 246
74static struct eb_vmas * 247/*
75eb_create(struct drm_i915_private *i915, 248 * As an alternative to creating a hashtable of handle-to-vma for a batch,
76 struct drm_i915_gem_execbuffer2 *args) 249 * we used the last available reserved field in the execobject[] and stash
250 * a link from the execobj to its vma.
251 */
252#define __exec_to_vma(ee) (ee)->rsvd2
253#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))
254
255/*
256 * Used to convert any address to canonical form.
257 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
258 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
259 * addresses to be in a canonical form:
260 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
261 * canonical form [63:48] == [47]."
262 */
263#define GEN8_HIGH_ADDRESS_BIT 47
264static inline u64 gen8_canonical_addr(u64 address)
77{ 265{
78 struct eb_vmas *eb = NULL; 266 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
79 267}
80 if (args->flags & I915_EXEC_HANDLE_LUT) {
81 unsigned size = args->buffer_count;
82 size *= sizeof(struct i915_vma *);
83 size += sizeof(struct eb_vmas);
84 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
85 }
86
87 if (eb == NULL) {
88 unsigned size = args->buffer_count;
89 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
90 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
91 while (count > 2*size)
92 count >>= 1;
93 eb = kzalloc(count*sizeof(struct hlist_head) +
94 sizeof(struct eb_vmas),
95 GFP_TEMPORARY);
96 if (eb == NULL)
97 return eb;
98
99 eb->and = count - 1;
100 } else
101 eb->and = -args->buffer_count;
102 268
103 eb->i915 = i915; 269static inline u64 gen8_noncanonical_addr(u64 address)
104 INIT_LIST_HEAD(&eb->vmas); 270{
105 return eb; 271 return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
106} 272}
107 273
108static void 274static int eb_create(struct i915_execbuffer *eb)
109eb_reset(struct eb_vmas *eb)
110{ 275{
111 if (eb->and >= 0) 276 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
112 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 277 unsigned int size = 1 + ilog2(eb->buffer_count);
278
279 /*
280 * Without a 1:1 association between relocation handles and
281 * the execobject[] index, we instead create a hashtable.
282 * We size it dynamically based on available memory, starting
283 * first with 1:1 assocative hash and scaling back until
284 * the allocation succeeds.
285 *
286 * Later on we use a positive lut_size to indicate we are
287 * using this hashtable, and a negative value to indicate a
288 * direct lookup.
289 */
290 do {
291 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
292 GFP_TEMPORARY |
293 __GFP_NORETRY |
294 __GFP_NOWARN);
295 if (eb->buckets)
296 break;
297 } while (--size);
298
299 if (unlikely(!eb->buckets)) {
300 eb->buckets = kzalloc(sizeof(struct hlist_head),
301 GFP_TEMPORARY);
302 if (unlikely(!eb->buckets))
303 return -ENOMEM;
304 }
305
306 eb->lut_size = size;
307 } else {
308 eb->lut_size = -eb->buffer_count;
309 }
310
311 return 0;
113} 312}
114 313
115static struct i915_vma * 314static bool
116eb_get_batch(struct eb_vmas *eb) 315eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
316 const struct i915_vma *vma)
317{
318 if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
319 return true;
320
321 if (vma->node.size < entry->pad_to_size)
322 return true;
323
324 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
325 return true;
326
327 if (entry->flags & EXEC_OBJECT_PINNED &&
328 vma->node.start != entry->offset)
329 return true;
330
331 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
332 vma->node.start < BATCH_OFFSET_BIAS)
333 return true;
334
335 if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
336 (vma->node.start + vma->node.size - 1) >> 32)
337 return true;
338
339 return false;
340}
341
342static inline void
343eb_pin_vma(struct i915_execbuffer *eb,
344 struct drm_i915_gem_exec_object2 *entry,
345 struct i915_vma *vma)
346{
347 u64 flags;
348
349 if (vma->node.size)
350 flags = vma->node.start;
351 else
352 flags = entry->offset & PIN_OFFSET_MASK;
353
354 flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
355 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_GTT))
356 flags |= PIN_GLOBAL;
357
358 if (unlikely(i915_vma_pin(vma, 0, 0, flags)))
359 return;
360
361 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
362 if (unlikely(i915_vma_get_fence(vma))) {
363 i915_vma_unpin(vma);
364 return;
365 }
366
367 if (i915_vma_pin_fence(vma))
368 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
369 }
370
371 entry->flags |= __EXEC_OBJECT_HAS_PIN;
372}
373
374static inline void
375__eb_unreserve_vma(struct i915_vma *vma,
376 const struct drm_i915_gem_exec_object2 *entry)
377{
378 GEM_BUG_ON(!(entry->flags & __EXEC_OBJECT_HAS_PIN));
379
380 if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
381 i915_vma_unpin_fence(vma);
382
383 __i915_vma_unpin(vma);
384}
385
386static inline void
387eb_unreserve_vma(struct i915_vma *vma,
388 struct drm_i915_gem_exec_object2 *entry)
117{ 389{
118 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); 390 if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
391 return;
392
393 __eb_unreserve_vma(vma, entry);
394 entry->flags &= ~__EXEC_OBJECT_RESERVED;
395}
396
397static int
398eb_validate_vma(struct i915_execbuffer *eb,
399 struct drm_i915_gem_exec_object2 *entry,
400 struct i915_vma *vma)
401{
402 if (unlikely(entry->flags & eb->invalid_flags))
403 return -EINVAL;
404
405 if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
406 return -EINVAL;
119 407
120 /* 408 /*
121 * SNA is doing fancy tricks with compressing batch buffers, which leads 409 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
122 * to negative relocation deltas. Usually that works out ok since the 410 * any non-page-aligned or non-canonical addresses.
123 * relocate address is still positive, except when the batch is placed
124 * very low in the GTT. Ensure this doesn't happen.
125 *
126 * Note that actual hangs have only been observed on gen7, but for
127 * paranoia do it everywhere.
128 */ 411 */
129 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0) 412 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
130 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; 413 entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
414 return -EINVAL;
131 415
132 return vma; 416 /* pad_to_size was once a reserved field, so sanitize it */
417 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
418 if (unlikely(offset_in_page(entry->pad_to_size)))
419 return -EINVAL;
420 } else {
421 entry->pad_to_size = 0;
422 }
423
424 if (unlikely(vma->exec_entry)) {
425 DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
426 entry->handle, (int)(entry - eb->exec));
427 return -EINVAL;
428 }
429
430 /*
431 * From drm_mm perspective address space is continuous,
432 * so from this point we're always using non-canonical
433 * form internally.
434 */
435 entry->offset = gen8_noncanonical_addr(entry->offset);
436
437 return 0;
133} 438}
134 439
135static int 440static int
136eb_lookup_vmas(struct eb_vmas *eb, 441eb_add_vma(struct i915_execbuffer *eb,
137 struct drm_i915_gem_exec_object2 *exec, 442 struct drm_i915_gem_exec_object2 *entry,
138 const struct drm_i915_gem_execbuffer2 *args, 443 struct i915_vma *vma)
139 struct i915_address_space *vm,
140 struct drm_file *file)
141{ 444{
142 struct drm_i915_gem_object *obj; 445 int err;
143 struct list_head objects;
144 int i, ret;
145 446
146 INIT_LIST_HEAD(&objects); 447 GEM_BUG_ON(i915_vma_is_closed(vma));
147 spin_lock(&file->table_lock); 448
148 /* Grab a reference to the object and release the lock so we can lookup 449 if (!(eb->args->flags & __EXEC_VALIDATED)) {
149 * or create the VMA without using GFP_ATOMIC */ 450 err = eb_validate_vma(eb, entry, vma);
150 for (i = 0; i < args->buffer_count; i++) { 451 if (unlikely(err))
151 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); 452 return err;
152 if (obj == NULL) { 453 }
153 spin_unlock(&file->table_lock); 454
154 DRM_DEBUG("Invalid object handle %d at index %d\n", 455 if (eb->lut_size >= 0) {
155 exec[i].handle, i); 456 vma->exec_handle = entry->handle;
156 ret = -ENOENT; 457 hlist_add_head(&vma->exec_node,
157 goto err; 458 &eb->buckets[hash_32(entry->handle,
459 eb->lut_size)]);
460 }
461
462 if (entry->relocation_count)
463 list_add_tail(&vma->reloc_link, &eb->relocs);
464
465 if (!eb->reloc_cache.has_fence) {
466 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
467 } else {
468 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
469 eb->reloc_cache.needs_unfenced) &&
470 i915_gem_object_is_tiled(vma->obj))
471 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
472 }
473
474 if (!(entry->flags & EXEC_OBJECT_PINNED))
475 entry->flags |= eb->context_flags;
476
477 /*
478 * Stash a pointer from the vma to execobj, so we can query its flags,
479 * size, alignment etc as provided by the user. Also we stash a pointer
480 * to the vma inside the execobj so that we can use a direct lookup
481 * to find the right target VMA when doing relocations.
482 */
483 vma->exec_entry = entry;
484 __exec_to_vma(entry) = (uintptr_t)vma;
485
486 err = 0;
487 eb_pin_vma(eb, entry, vma);
488 if (eb_vma_misplaced(entry, vma)) {
489 eb_unreserve_vma(vma, entry);
490
491 list_add_tail(&vma->exec_link, &eb->unbound);
492 if (drm_mm_node_allocated(&vma->node))
493 err = i915_vma_unbind(vma);
494 } else {
495 if (entry->offset != vma->node.start) {
496 entry->offset = vma->node.start | UPDATE;
497 eb->args->flags |= __EXEC_HAS_RELOC;
498 }
499 }
500 return err;
501}
502
503static inline int use_cpu_reloc(const struct reloc_cache *cache,
504 const struct drm_i915_gem_object *obj)
505{
506 if (!i915_gem_object_has_struct_page(obj))
507 return false;
508
509 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
510 return true;
511
512 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
513 return false;
514
515 return (cache->has_llc ||
516 obj->cache_dirty ||
517 obj->cache_level != I915_CACHE_NONE);
518}
519
520static int eb_reserve_vma(const struct i915_execbuffer *eb,
521 struct i915_vma *vma)
522{
523 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
524 u64 flags;
525 int err;
526
527 flags = PIN_USER | PIN_NONBLOCK;
528 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
529 flags |= PIN_GLOBAL;
530
531 /*
532 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
533 * limit address to the first 4GBs for unflagged objects.
534 */
535 if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
536 flags |= PIN_ZONE_4G;
537
538 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
539 flags |= PIN_MAPPABLE;
540
541 if (entry->flags & EXEC_OBJECT_PINNED) {
542 flags |= entry->offset | PIN_OFFSET_FIXED;
543 flags &= ~PIN_NONBLOCK; /* force overlapping PINNED checks */
544 } else if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) {
545 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
546 }
547
548 err = i915_vma_pin(vma, entry->pad_to_size, entry->alignment, flags);
549 if (err)
550 return err;
551
552 if (entry->offset != vma->node.start) {
553 entry->offset = vma->node.start | UPDATE;
554 eb->args->flags |= __EXEC_HAS_RELOC;
555 }
556
557 entry->flags |= __EXEC_OBJECT_HAS_PIN;
558 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
559
560 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
561 err = i915_vma_get_fence(vma);
562 if (unlikely(err)) {
563 i915_vma_unpin(vma);
564 return err;
565 }
566
567 if (i915_vma_pin_fence(vma))
568 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
569 }
570
571 return 0;
572}
573
574static int eb_reserve(struct i915_execbuffer *eb)
575{
576 const unsigned int count = eb->buffer_count;
577 struct list_head last;
578 struct i915_vma *vma;
579 unsigned int i, pass;
580 int err;
581
582 /*
583 * Attempt to pin all of the buffers into the GTT.
584 * This is done in 3 phases:
585 *
586 * 1a. Unbind all objects that do not match the GTT constraints for
587 * the execbuffer (fenceable, mappable, alignment etc).
588 * 1b. Increment pin count for already bound objects.
589 * 2. Bind new objects.
590 * 3. Decrement pin count.
591 *
592 * This avoid unnecessary unbinding of later objects in order to make
593 * room for the earlier objects *unless* we need to defragment.
594 */
595
596 pass = 0;
597 err = 0;
598 do {
599 list_for_each_entry(vma, &eb->unbound, exec_link) {
600 err = eb_reserve_vma(eb, vma);
601 if (err)
602 break;
603 }
604 if (err != -ENOSPC)
605 return err;
606
607 /* Resort *all* the objects into priority order */
608 INIT_LIST_HEAD(&eb->unbound);
609 INIT_LIST_HEAD(&last);
610 for (i = 0; i < count; i++) {
611 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
612
613 if (entry->flags & EXEC_OBJECT_PINNED &&
614 entry->flags & __EXEC_OBJECT_HAS_PIN)
615 continue;
616
617 vma = exec_to_vma(entry);
618 eb_unreserve_vma(vma, entry);
619
620 if (entry->flags & EXEC_OBJECT_PINNED)
621 list_add(&vma->exec_link, &eb->unbound);
622 else if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
623 list_add_tail(&vma->exec_link, &eb->unbound);
624 else
625 list_add_tail(&vma->exec_link, &last);
626 }
627 list_splice_tail(&last, &eb->unbound);
628
629 switch (pass++) {
630 case 0:
631 break;
632
633 case 1:
634 /* Too fragmented, unbind everything and retry */
635 err = i915_gem_evict_vm(eb->vm);
636 if (err)
637 return err;
638 break;
639
640 default:
641 return -ENOSPC;
158 } 642 }
643 } while (1);
644}
645
646static inline struct hlist_head *
647ht_head(const struct i915_gem_context_vma_lut *lut, u32 handle)
648{
649 return &lut->ht[hash_32(handle, lut->ht_bits)];
650}
159 651
160 if (!list_empty(&obj->obj_exec_link)) { 652static inline bool
161 spin_unlock(&file->table_lock); 653ht_needs_resize(const struct i915_gem_context_vma_lut *lut)
162 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 654{
163 obj, exec[i].handle, i); 655 return (4*lut->ht_count > 3*lut->ht_size ||
164 ret = -EINVAL; 656 4*lut->ht_count + 1 < lut->ht_size);
657}
658
659static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
660{
661 if (eb->args->flags & I915_EXEC_BATCH_FIRST)
662 return 0;
663 else
664 return eb->buffer_count - 1;
665}
666
667static int eb_select_context(struct i915_execbuffer *eb)
668{
669 struct i915_gem_context *ctx;
670
671 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
672 if (unlikely(IS_ERR(ctx)))
673 return PTR_ERR(ctx);
674
675 if (unlikely(i915_gem_context_is_banned(ctx))) {
676 DRM_DEBUG("Context %u tried to submit while banned\n",
677 ctx->user_handle);
678 return -EIO;
679 }
680
681 eb->ctx = i915_gem_context_get(ctx);
682 eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
683
684 eb->context_flags = 0;
685 if (ctx->flags & CONTEXT_NO_ZEROMAP)
686 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
687
688 return 0;
689}
690
691static int eb_lookup_vmas(struct i915_execbuffer *eb)
692{
693#define INTERMEDIATE BIT(0)
694 const unsigned int count = eb->buffer_count;
695 struct i915_gem_context_vma_lut *lut = &eb->ctx->vma_lut;
696 struct i915_vma *vma;
697 struct idr *idr;
698 unsigned int i;
699 int slow_pass = -1;
700 int err;
701
702 INIT_LIST_HEAD(&eb->relocs);
703 INIT_LIST_HEAD(&eb->unbound);
704
705 if (unlikely(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS))
706 flush_work(&lut->resize);
707 GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
708
709 for (i = 0; i < count; i++) {
710 __exec_to_vma(&eb->exec[i]) = 0;
711
712 hlist_for_each_entry(vma,
713 ht_head(lut, eb->exec[i].handle),
714 ctx_node) {
715 if (vma->ctx_handle != eb->exec[i].handle)
716 continue;
717
718 err = eb_add_vma(eb, &eb->exec[i], vma);
719 if (unlikely(err))
720 return err;
721
722 goto next_vma;
723 }
724
725 if (slow_pass < 0)
726 slow_pass = i;
727next_vma: ;
728 }
729
730 if (slow_pass < 0)
731 goto out;
732
733 spin_lock(&eb->file->table_lock);
734 /*
735 * Grab a reference to the object and release the lock so we can lookup
736 * or create the VMA without using GFP_ATOMIC
737 */
738 idr = &eb->file->object_idr;
739 for (i = slow_pass; i < count; i++) {
740 struct drm_i915_gem_object *obj;
741
742 if (__exec_to_vma(&eb->exec[i]))
743 continue;
744
745 obj = to_intel_bo(idr_find(idr, eb->exec[i].handle));
746 if (unlikely(!obj)) {
747 spin_unlock(&eb->file->table_lock);
748 DRM_DEBUG("Invalid object handle %d at index %d\n",
749 eb->exec[i].handle, i);
750 err = -ENOENT;
165 goto err; 751 goto err;
166 } 752 }
167 753
168 i915_gem_object_get(obj); 754 __exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
169 list_add_tail(&obj->obj_exec_link, &objects);
170 } 755 }
171 spin_unlock(&file->table_lock); 756 spin_unlock(&eb->file->table_lock);
172 757
173 i = 0; 758 for (i = slow_pass; i < count; i++) {
174 while (!list_empty(&objects)) { 759 struct drm_i915_gem_object *obj;
175 struct i915_vma *vma;
176 760
177 obj = list_first_entry(&objects, 761 if (!(__exec_to_vma(&eb->exec[i]) & INTERMEDIATE))
178 struct drm_i915_gem_object, 762 continue;
179 obj_exec_link);
180 763
181 /* 764 /*
182 * NOTE: We can leak any vmas created here when something fails 765 * NOTE: We can leak any vmas created here when something fails
@@ -186,59 +769,93 @@ eb_lookup_vmas(struct eb_vmas *eb,
186 * from the (obj, vm) we don't run the risk of creating 769 * from the (obj, vm) we don't run the risk of creating
187 * duplicated vmas for the same vm. 770 * duplicated vmas for the same vm.
188 */ 771 */
189 vma = i915_vma_instance(obj, vm, NULL); 772 obj = u64_to_ptr(typeof(*obj),
773 __exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
774 vma = i915_vma_instance(obj, eb->vm, NULL);
190 if (unlikely(IS_ERR(vma))) { 775 if (unlikely(IS_ERR(vma))) {
191 DRM_DEBUG("Failed to lookup VMA\n"); 776 DRM_DEBUG("Failed to lookup VMA\n");
192 ret = PTR_ERR(vma); 777 err = PTR_ERR(vma);
193 goto err; 778 goto err;
194 } 779 }
195 780
196 /* Transfer ownership from the objects list to the vmas list. */ 781 /* First come, first served */
197 list_add_tail(&vma->exec_list, &eb->vmas); 782 if (!vma->ctx) {
198 list_del_init(&obj->obj_exec_link); 783 vma->ctx = eb->ctx;
784 vma->ctx_handle = eb->exec[i].handle;
785 hlist_add_head(&vma->ctx_node,
786 ht_head(lut, eb->exec[i].handle));
787 lut->ht_count++;
788 lut->ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
789 if (i915_vma_is_ggtt(vma)) {
790 GEM_BUG_ON(obj->vma_hashed);
791 obj->vma_hashed = vma;
792 }
199 793
200 vma->exec_entry = &exec[i]; 794 i915_vma_get(vma);
201 if (eb->and < 0) {
202 eb->lut[i] = vma;
203 } else {
204 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
205 vma->exec_handle = handle;
206 hlist_add_head(&vma->exec_node,
207 &eb->buckets[handle & eb->and]);
208 } 795 }
209 ++i;
210 }
211 796
212 return 0; 797 err = eb_add_vma(eb, &eb->exec[i], vma);
798 if (unlikely(err))
799 goto err;
213 800
801 /* Only after we validated the user didn't use our bits */
802 if (vma->ctx != eb->ctx) {
803 i915_vma_get(vma);
804 eb->exec[i].flags |= __EXEC_OBJECT_HAS_REF;
805 }
806 }
214 807
215err: 808 if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
216 while (!list_empty(&objects)) { 809 if (ht_needs_resize(lut))
217 obj = list_first_entry(&objects, 810 queue_work(system_highpri_wq, &lut->resize);
218 struct drm_i915_gem_object, 811 else
219 obj_exec_link); 812 lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
220 list_del_init(&obj->obj_exec_link);
221 i915_gem_object_put(obj);
222 } 813 }
814
815out:
816 /* take note of the batch buffer before we might reorder the lists */
817 i = eb_batch_index(eb);
818 eb->batch = exec_to_vma(&eb->exec[i]);
819
223 /* 820 /*
224 * Objects already transfered to the vmas list will be unreferenced by 821 * SNA is doing fancy tricks with compressing batch buffers, which leads
225 * eb_destroy. 822 * to negative relocation deltas. Usually that works out ok since the
823 * relocate address is still positive, except when the batch is placed
824 * very low in the GTT. Ensure this doesn't happen.
825 *
826 * Note that actual hangs have only been observed on gen7, but for
827 * paranoia do it everywhere.
226 */ 828 */
829 if (!(eb->exec[i].flags & EXEC_OBJECT_PINNED))
830 eb->exec[i].flags |= __EXEC_OBJECT_NEEDS_BIAS;
831 if (eb->reloc_cache.has_fence)
832 eb->exec[i].flags |= EXEC_OBJECT_NEEDS_FENCE;
833
834 eb->args->flags |= __EXEC_VALIDATED;
835 return eb_reserve(eb);
227 836
228 return ret; 837err:
838 for (i = slow_pass; i < count; i++) {
839 if (__exec_to_vma(&eb->exec[i]) & INTERMEDIATE)
840 __exec_to_vma(&eb->exec[i]) = 0;
841 }
842 lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
843 return err;
844#undef INTERMEDIATE
229} 845}
230 846
231static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) 847static struct i915_vma *
848eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
232{ 849{
233 if (eb->and < 0) { 850 if (eb->lut_size < 0) {
234 if (handle >= -eb->and) 851 if (handle >= -eb->lut_size)
235 return NULL; 852 return NULL;
236 return eb->lut[handle]; 853 return exec_to_vma(&eb->exec[handle]);
237 } else { 854 } else {
238 struct hlist_head *head; 855 struct hlist_head *head;
239 struct i915_vma *vma; 856 struct i915_vma *vma;
240 857
241 head = &eb->buckets[handle & eb->and]; 858 head = &eb->buckets[hash_32(handle, eb->lut_size)];
242 hlist_for_each_entry(vma, head, exec_node) { 859 hlist_for_each_entry(vma, head, exec_node) {
243 if (vma->exec_handle == handle) 860 if (vma->exec_handle == handle)
244 return vma; 861 return vma;
@@ -247,96 +864,69 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
247 } 864 }
248} 865}
249 866
250static void 867static void eb_release_vmas(const struct i915_execbuffer *eb)
251i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
252{ 868{
253 struct drm_i915_gem_exec_object2 *entry; 869 const unsigned int count = eb->buffer_count;
870 unsigned int i;
254 871
255 if (!drm_mm_node_allocated(&vma->node)) 872 for (i = 0; i < count; i++) {
256 return; 873 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
874 struct i915_vma *vma = exec_to_vma(entry);
257 875
258 entry = vma->exec_entry; 876 if (!vma)
877 continue;
259 878
260 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) 879 GEM_BUG_ON(vma->exec_entry != entry);
261 i915_vma_unpin_fence(vma); 880 vma->exec_entry = NULL;
262 881
263 if (entry->flags & __EXEC_OBJECT_HAS_PIN) 882 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
264 __i915_vma_unpin(vma); 883 __eb_unreserve_vma(vma, entry);
265 884
266 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); 885 if (entry->flags & __EXEC_OBJECT_HAS_REF)
267} 886 i915_vma_put(vma);
268 887
269static void eb_destroy(struct eb_vmas *eb) 888 entry->flags &=
270{ 889 ~(__EXEC_OBJECT_RESERVED | __EXEC_OBJECT_HAS_REF);
271 while (!list_empty(&eb->vmas)) {
272 struct i915_vma *vma;
273
274 vma = list_first_entry(&eb->vmas,
275 struct i915_vma,
276 exec_list);
277 list_del_init(&vma->exec_list);
278 i915_gem_execbuffer_unreserve_vma(vma);
279 vma->exec_entry = NULL;
280 i915_vma_put(vma);
281 } 890 }
282 kfree(eb);
283} 891}
284 892
285static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) 893static void eb_reset_vmas(const struct i915_execbuffer *eb)
286{ 894{
287 if (!i915_gem_object_has_struct_page(obj)) 895 eb_release_vmas(eb);
288 return false; 896 if (eb->lut_size >= 0)
289 897 memset(eb->buckets, 0,
290 if (DBG_USE_CPU_RELOC) 898 sizeof(struct hlist_head) << eb->lut_size);
291 return DBG_USE_CPU_RELOC > 0;
292
293 return (HAS_LLC(to_i915(obj->base.dev)) ||
294 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
295 obj->cache_level != I915_CACHE_NONE);
296} 899}
297 900
298/* Used to convert any address to canonical form. 901static void eb_destroy(const struct i915_execbuffer *eb)
299 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
300 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
301 * addresses to be in a canonical form:
302 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
303 * canonical form [63:48] == [47]."
304 */
305#define GEN8_HIGH_ADDRESS_BIT 47
306static inline uint64_t gen8_canonical_addr(uint64_t address)
307{ 902{
308 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); 903 GEM_BUG_ON(eb->reloc_cache.rq);
309}
310 904
311static inline uint64_t gen8_noncanonical_addr(uint64_t address) 905 if (eb->lut_size >= 0)
312{ 906 kfree(eb->buckets);
313 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
314} 907}
315 908
316static inline uint64_t 909static inline u64
317relocation_target(const struct drm_i915_gem_relocation_entry *reloc, 910relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
318 uint64_t target_offset) 911 const struct i915_vma *target)
319{ 912{
320 return gen8_canonical_addr((int)reloc->delta + target_offset); 913 return gen8_canonical_addr((int)reloc->delta + target->node.start);
321} 914}
322 915
323struct reloc_cache {
324 struct drm_i915_private *i915;
325 struct drm_mm_node node;
326 unsigned long vaddr;
327 unsigned int page;
328 bool use_64bit_reloc;
329};
330
331static void reloc_cache_init(struct reloc_cache *cache, 916static void reloc_cache_init(struct reloc_cache *cache,
332 struct drm_i915_private *i915) 917 struct drm_i915_private *i915)
333{ 918{
334 cache->page = -1; 919 cache->page = -1;
335 cache->vaddr = 0; 920 cache->vaddr = 0;
336 cache->i915 = i915;
337 /* Must be a variable in the struct to allow GCC to unroll. */ 921 /* Must be a variable in the struct to allow GCC to unroll. */
922 cache->gen = INTEL_GEN(i915);
923 cache->has_llc = HAS_LLC(i915);
338 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); 924 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
925 cache->has_fence = cache->gen < 4;
926 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
339 cache->node.allocated = false; 927 cache->node.allocated = false;
928 cache->rq = NULL;
929 cache->rq_size = 0;
340} 930}
341 931
342static inline void *unmask_page(unsigned long p) 932static inline void *unmask_page(unsigned long p)
@@ -351,10 +941,31 @@ static inline unsigned int unmask_flags(unsigned long p)
351 941
352#define KMAP 0x4 /* after CLFLUSH_FLAGS */ 942#define KMAP 0x4 /* after CLFLUSH_FLAGS */
353 943
354static void reloc_cache_fini(struct reloc_cache *cache) 944static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
945{
946 struct drm_i915_private *i915 =
947 container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
948 return &i915->ggtt;
949}
950
951static void reloc_gpu_flush(struct reloc_cache *cache)
952{
953 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
954 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
955 i915_gem_object_unpin_map(cache->rq->batch->obj);
956 i915_gem_chipset_flush(cache->rq->i915);
957
958 __i915_add_request(cache->rq, true);
959 cache->rq = NULL;
960}
961
962static void reloc_cache_reset(struct reloc_cache *cache)
355{ 963{
356 void *vaddr; 964 void *vaddr;
357 965
966 if (cache->rq)
967 reloc_gpu_flush(cache);
968
358 if (!cache->vaddr) 969 if (!cache->vaddr)
359 return; 970 return;
360 971
@@ -369,7 +980,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
369 wmb(); 980 wmb();
370 io_mapping_unmap_atomic((void __iomem *)vaddr); 981 io_mapping_unmap_atomic((void __iomem *)vaddr);
371 if (cache->node.allocated) { 982 if (cache->node.allocated) {
372 struct i915_ggtt *ggtt = &cache->i915->ggtt; 983 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
373 984
374 ggtt->base.clear_range(&ggtt->base, 985 ggtt->base.clear_range(&ggtt->base,
375 cache->node.start, 986 cache->node.start,
@@ -379,11 +990,14 @@ static void reloc_cache_fini(struct reloc_cache *cache)
379 i915_vma_unpin((struct i915_vma *)cache->node.mm); 990 i915_vma_unpin((struct i915_vma *)cache->node.mm);
380 } 991 }
381 } 992 }
993
994 cache->vaddr = 0;
995 cache->page = -1;
382} 996}
383 997
384static void *reloc_kmap(struct drm_i915_gem_object *obj, 998static void *reloc_kmap(struct drm_i915_gem_object *obj,
385 struct reloc_cache *cache, 999 struct reloc_cache *cache,
386 int page) 1000 unsigned long page)
387{ 1001{
388 void *vaddr; 1002 void *vaddr;
389 1003
@@ -391,11 +1005,11 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
391 kunmap_atomic(unmask_page(cache->vaddr)); 1005 kunmap_atomic(unmask_page(cache->vaddr));
392 } else { 1006 } else {
393 unsigned int flushes; 1007 unsigned int flushes;
394 int ret; 1008 int err;
395 1009
396 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes); 1010 err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
397 if (ret) 1011 if (err)
398 return ERR_PTR(ret); 1012 return ERR_PTR(err);
399 1013
400 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); 1014 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
401 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); 1015 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
@@ -415,9 +1029,9 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
415 1029
416static void *reloc_iomap(struct drm_i915_gem_object *obj, 1030static void *reloc_iomap(struct drm_i915_gem_object *obj,
417 struct reloc_cache *cache, 1031 struct reloc_cache *cache,
418 int page) 1032 unsigned long page)
419{ 1033{
420 struct i915_ggtt *ggtt = &cache->i915->ggtt; 1034 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
421 unsigned long offset; 1035 unsigned long offset;
422 void *vaddr; 1036 void *vaddr;
423 1037
@@ -425,31 +1039,31 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
425 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); 1039 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
426 } else { 1040 } else {
427 struct i915_vma *vma; 1041 struct i915_vma *vma;
428 int ret; 1042 int err;
429 1043
430 if (use_cpu_reloc(obj)) 1044 if (use_cpu_reloc(cache, obj))
431 return NULL; 1045 return NULL;
432 1046
433 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1047 err = i915_gem_object_set_to_gtt_domain(obj, true);
434 if (ret) 1048 if (err)
435 return ERR_PTR(ret); 1049 return ERR_PTR(err);
436 1050
437 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1051 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
438 PIN_MAPPABLE | PIN_NONBLOCK); 1052 PIN_MAPPABLE | PIN_NONBLOCK);
439 if (IS_ERR(vma)) { 1053 if (IS_ERR(vma)) {
440 memset(&cache->node, 0, sizeof(cache->node)); 1054 memset(&cache->node, 0, sizeof(cache->node));
441 ret = drm_mm_insert_node_in_range 1055 err = drm_mm_insert_node_in_range
442 (&ggtt->base.mm, &cache->node, 1056 (&ggtt->base.mm, &cache->node,
443 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 1057 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
444 0, ggtt->mappable_end, 1058 0, ggtt->mappable_end,
445 DRM_MM_INSERT_LOW); 1059 DRM_MM_INSERT_LOW);
446 if (ret) /* no inactive aperture space, use cpu reloc */ 1060 if (err) /* no inactive aperture space, use cpu reloc */
447 return NULL; 1061 return NULL;
448 } else { 1062 } else {
449 ret = i915_vma_put_fence(vma); 1063 err = i915_vma_put_fence(vma);
450 if (ret) { 1064 if (err) {
451 i915_vma_unpin(vma); 1065 i915_vma_unpin(vma);
452 return ERR_PTR(ret); 1066 return ERR_PTR(err);
453 } 1067 }
454 1068
455 cache->node.start = vma->node.start; 1069 cache->node.start = vma->node.start;
@@ -467,7 +1081,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
467 offset += page << PAGE_SHIFT; 1081 offset += page << PAGE_SHIFT;
468 } 1082 }
469 1083
470 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset); 1084 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
1085 offset);
471 cache->page = page; 1086 cache->page = page;
472 cache->vaddr = (unsigned long)vaddr; 1087 cache->vaddr = (unsigned long)vaddr;
473 1088
@@ -476,7 +1091,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
476 1091
477static void *reloc_vaddr(struct drm_i915_gem_object *obj, 1092static void *reloc_vaddr(struct drm_i915_gem_object *obj,
478 struct reloc_cache *cache, 1093 struct reloc_cache *cache,
479 int page) 1094 unsigned long page)
480{ 1095{
481 void *vaddr; 1096 void *vaddr;
482 1097
@@ -503,7 +1118,8 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
503 1118
504 *addr = value; 1119 *addr = value;
505 1120
506 /* Writes to the same cacheline are serialised by the CPU 1121 /*
1122 * Writes to the same cacheline are serialised by the CPU
507 * (including clflush). On the write path, we only require 1123 * (including clflush). On the write path, we only require
508 * that it hits memory in an orderly fashion and place 1124 * that it hits memory in an orderly fashion and place
509 * mb barriers at the start and end of the relocation phase 1125 * mb barriers at the start and end of the relocation phase
@@ -515,25 +1131,201 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
515 *addr = value; 1131 *addr = value;
516} 1132}
517 1133
518static int 1134static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
519relocate_entry(struct drm_i915_gem_object *obj, 1135 struct i915_vma *vma,
1136 unsigned int len)
1137{
1138 struct reloc_cache *cache = &eb->reloc_cache;
1139 struct drm_i915_gem_object *obj;
1140 struct drm_i915_gem_request *rq;
1141 struct i915_vma *batch;
1142 u32 *cmd;
1143 int err;
1144
1145 GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
1146
1147 obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
1148 if (IS_ERR(obj))
1149 return PTR_ERR(obj);
1150
1151 cmd = i915_gem_object_pin_map(obj,
1152 cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
1153 i915_gem_object_unpin_pages(obj);
1154 if (IS_ERR(cmd))
1155 return PTR_ERR(cmd);
1156
1157 err = i915_gem_object_set_to_wc_domain(obj, false);
1158 if (err)
1159 goto err_unmap;
1160
1161 batch = i915_vma_instance(obj, vma->vm, NULL);
1162 if (IS_ERR(batch)) {
1163 err = PTR_ERR(batch);
1164 goto err_unmap;
1165 }
1166
1167 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1168 if (err)
1169 goto err_unmap;
1170
1171 rq = i915_gem_request_alloc(eb->engine, eb->ctx);
1172 if (IS_ERR(rq)) {
1173 err = PTR_ERR(rq);
1174 goto err_unpin;
1175 }
1176
1177 err = i915_gem_request_await_object(rq, vma->obj, true);
1178 if (err)
1179 goto err_request;
1180
1181 err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
1182 if (err)
1183 goto err_request;
1184
1185 err = i915_switch_context(rq);
1186 if (err)
1187 goto err_request;
1188
1189 err = eb->engine->emit_bb_start(rq,
1190 batch->node.start, PAGE_SIZE,
1191 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1192 if (err)
1193 goto err_request;
1194
1195 GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1196 i915_vma_move_to_active(batch, rq, 0);
1197 reservation_object_lock(batch->resv, NULL);
1198 reservation_object_add_excl_fence(batch->resv, &rq->fence);
1199 reservation_object_unlock(batch->resv);
1200 i915_vma_unpin(batch);
1201
1202 i915_vma_move_to_active(vma, rq, true);
1203 reservation_object_lock(vma->resv, NULL);
1204 reservation_object_add_excl_fence(vma->resv, &rq->fence);
1205 reservation_object_unlock(vma->resv);
1206
1207 rq->batch = batch;
1208
1209 cache->rq = rq;
1210 cache->rq_cmd = cmd;
1211 cache->rq_size = 0;
1212
1213 /* Return with batch mapping (cmd) still pinned */
1214 return 0;
1215
1216err_request:
1217 i915_add_request(rq);
1218err_unpin:
1219 i915_vma_unpin(batch);
1220err_unmap:
1221 i915_gem_object_unpin_map(obj);
1222 return err;
1223}
1224
1225static u32 *reloc_gpu(struct i915_execbuffer *eb,
1226 struct i915_vma *vma,
1227 unsigned int len)
1228{
1229 struct reloc_cache *cache = &eb->reloc_cache;
1230 u32 *cmd;
1231
1232 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1233 reloc_gpu_flush(cache);
1234
1235 if (unlikely(!cache->rq)) {
1236 int err;
1237
1238 err = __reloc_gpu_alloc(eb, vma, len);
1239 if (unlikely(err))
1240 return ERR_PTR(err);
1241 }
1242
1243 cmd = cache->rq_cmd + cache->rq_size;
1244 cache->rq_size += len;
1245
1246 return cmd;
1247}
1248
1249static u64
1250relocate_entry(struct i915_vma *vma,
520 const struct drm_i915_gem_relocation_entry *reloc, 1251 const struct drm_i915_gem_relocation_entry *reloc,
521 struct reloc_cache *cache, 1252 struct i915_execbuffer *eb,
522 u64 target_offset) 1253 const struct i915_vma *target)
523{ 1254{
524 u64 offset = reloc->offset; 1255 u64 offset = reloc->offset;
525 bool wide = cache->use_64bit_reloc; 1256 u64 target_offset = relocation_target(reloc, target);
1257 bool wide = eb->reloc_cache.use_64bit_reloc;
526 void *vaddr; 1258 void *vaddr;
527 1259
528 target_offset = relocation_target(reloc, target_offset); 1260 if (!eb->reloc_cache.vaddr &&
1261 (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1262 !reservation_object_test_signaled_rcu(vma->resv, true))) {
1263 const unsigned int gen = eb->reloc_cache.gen;
1264 unsigned int len;
1265 u32 *batch;
1266 u64 addr;
1267
1268 if (wide)
1269 len = offset & 7 ? 8 : 5;
1270 else if (gen >= 4)
1271 len = 4;
1272 else if (gen >= 3)
1273 len = 3;
1274 else /* On gen2 MI_STORE_DWORD_IMM uses a physical address */
1275 goto repeat;
1276
1277 batch = reloc_gpu(eb, vma, len);
1278 if (IS_ERR(batch))
1279 goto repeat;
1280
1281 addr = gen8_canonical_addr(vma->node.start + offset);
1282 if (wide) {
1283 if (offset & 7) {
1284 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1285 *batch++ = lower_32_bits(addr);
1286 *batch++ = upper_32_bits(addr);
1287 *batch++ = lower_32_bits(target_offset);
1288
1289 addr = gen8_canonical_addr(addr + 4);
1290
1291 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1292 *batch++ = lower_32_bits(addr);
1293 *batch++ = upper_32_bits(addr);
1294 *batch++ = upper_32_bits(target_offset);
1295 } else {
1296 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1297 *batch++ = lower_32_bits(addr);
1298 *batch++ = upper_32_bits(addr);
1299 *batch++ = lower_32_bits(target_offset);
1300 *batch++ = upper_32_bits(target_offset);
1301 }
1302 } else if (gen >= 6) {
1303 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1304 *batch++ = 0;
1305 *batch++ = addr;
1306 *batch++ = target_offset;
1307 } else if (gen >= 4) {
1308 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1309 *batch++ = 0;
1310 *batch++ = addr;
1311 *batch++ = target_offset;
1312 } else {
1313 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1314 *batch++ = addr;
1315 *batch++ = target_offset;
1316 }
1317
1318 goto out;
1319 }
1320
529repeat: 1321repeat:
530 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT); 1322 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
531 if (IS_ERR(vaddr)) 1323 if (IS_ERR(vaddr))
532 return PTR_ERR(vaddr); 1324 return PTR_ERR(vaddr);
533 1325
534 clflush_write32(vaddr + offset_in_page(offset), 1326 clflush_write32(vaddr + offset_in_page(offset),
535 lower_32_bits(target_offset), 1327 lower_32_bits(target_offset),
536 cache->vaddr); 1328 eb->reloc_cache.vaddr);
537 1329
538 if (wide) { 1330 if (wide) {
539 offset += sizeof(u32); 1331 offset += sizeof(u32);
@@ -542,48 +1334,29 @@ repeat:
542 goto repeat; 1334 goto repeat;
543 } 1335 }
544 1336
545 return 0; 1337out:
1338 return target->node.start | UPDATE;
546} 1339}
547 1340
548static int 1341static u64
549i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 1342eb_relocate_entry(struct i915_execbuffer *eb,
550 struct eb_vmas *eb, 1343 struct i915_vma *vma,
551 struct drm_i915_gem_relocation_entry *reloc, 1344 const struct drm_i915_gem_relocation_entry *reloc)
552 struct reloc_cache *cache)
553{ 1345{
554 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 1346 struct i915_vma *target;
555 struct drm_gem_object *target_obj; 1347 int err;
556 struct drm_i915_gem_object *target_i915_obj;
557 struct i915_vma *target_vma;
558 uint64_t target_offset;
559 int ret;
560 1348
561 /* we've already hold a reference to all valid objects */ 1349 /* we've already hold a reference to all valid objects */
562 target_vma = eb_get_vma(eb, reloc->target_handle); 1350 target = eb_get_vma(eb, reloc->target_handle);
563 if (unlikely(target_vma == NULL)) 1351 if (unlikely(!target))
564 return -ENOENT; 1352 return -ENOENT;
565 target_i915_obj = target_vma->obj;
566 target_obj = &target_vma->obj->base;
567
568 target_offset = gen8_canonical_addr(target_vma->node.start);
569
570 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
571 * pipe_control writes because the gpu doesn't properly redirect them
572 * through the ppgtt for non_secure batchbuffers. */
573 if (unlikely(IS_GEN6(dev_priv) &&
574 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
575 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
576 PIN_GLOBAL);
577 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
578 return ret;
579 }
580 1353
581 /* Validate that the target is in a valid r/w GPU domain */ 1354 /* Validate that the target is in a valid r/w GPU domain */
582 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 1355 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
583 DRM_DEBUG("reloc with multiple write domains: " 1356 DRM_DEBUG("reloc with multiple write domains: "
584 "obj %p target %d offset %d " 1357 "target %d offset %d "
585 "read %08x write %08x", 1358 "read %08x write %08x",
586 obj, reloc->target_handle, 1359 reloc->target_handle,
587 (int) reloc->offset, 1360 (int) reloc->offset,
588 reloc->read_domains, 1361 reloc->read_domains,
589 reloc->write_domain); 1362 reloc->write_domain);
@@ -592,75 +1365,103 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
592 if (unlikely((reloc->write_domain | reloc->read_domains) 1365 if (unlikely((reloc->write_domain | reloc->read_domains)
593 & ~I915_GEM_GPU_DOMAINS)) { 1366 & ~I915_GEM_GPU_DOMAINS)) {
594 DRM_DEBUG("reloc with read/write non-GPU domains: " 1367 DRM_DEBUG("reloc with read/write non-GPU domains: "
595 "obj %p target %d offset %d " 1368 "target %d offset %d "
596 "read %08x write %08x", 1369 "read %08x write %08x",
597 obj, reloc->target_handle, 1370 reloc->target_handle,
598 (int) reloc->offset, 1371 (int) reloc->offset,
599 reloc->read_domains, 1372 reloc->read_domains,
600 reloc->write_domain); 1373 reloc->write_domain);
601 return -EINVAL; 1374 return -EINVAL;
602 } 1375 }
603 1376
604 target_obj->pending_read_domains |= reloc->read_domains; 1377 if (reloc->write_domain) {
605 target_obj->pending_write_domain |= reloc->write_domain; 1378 target->exec_entry->flags |= EXEC_OBJECT_WRITE;
1379
1380 /*
1381 * Sandybridge PPGTT errata: We need a global gtt mapping
1382 * for MI and pipe_control writes because the gpu doesn't
1383 * properly redirect them through the ppgtt for non_secure
1384 * batchbuffers.
1385 */
1386 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1387 IS_GEN6(eb->i915)) {
1388 err = i915_vma_bind(target, target->obj->cache_level,
1389 PIN_GLOBAL);
1390 if (WARN_ONCE(err,
1391 "Unexpected failure to bind target VMA!"))
1392 return err;
1393 }
1394 }
606 1395
607 /* If the relocation already has the right value in it, no 1396 /*
1397 * If the relocation already has the right value in it, no
608 * more work needs to be done. 1398 * more work needs to be done.
609 */ 1399 */
610 if (target_offset == reloc->presumed_offset) 1400 if (!DBG_FORCE_RELOC &&
1401 gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
611 return 0; 1402 return 0;
612 1403
613 /* Check that the relocation address is valid... */ 1404 /* Check that the relocation address is valid... */
614 if (unlikely(reloc->offset > 1405 if (unlikely(reloc->offset >
615 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) { 1406 vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
616 DRM_DEBUG("Relocation beyond object bounds: " 1407 DRM_DEBUG("Relocation beyond object bounds: "
617 "obj %p target %d offset %d size %d.\n", 1408 "target %d offset %d size %d.\n",
618 obj, reloc->target_handle, 1409 reloc->target_handle,
619 (int) reloc->offset, 1410 (int)reloc->offset,
620 (int) obj->base.size); 1411 (int)vma->size);
621 return -EINVAL; 1412 return -EINVAL;
622 } 1413 }
623 if (unlikely(reloc->offset & 3)) { 1414 if (unlikely(reloc->offset & 3)) {
624 DRM_DEBUG("Relocation not 4-byte aligned: " 1415 DRM_DEBUG("Relocation not 4-byte aligned: "
625 "obj %p target %d offset %d.\n", 1416 "target %d offset %d.\n",
626 obj, reloc->target_handle, 1417 reloc->target_handle,
627 (int) reloc->offset); 1418 (int)reloc->offset);
628 return -EINVAL; 1419 return -EINVAL;
629 } 1420 }
630 1421
631 ret = relocate_entry(obj, reloc, cache, target_offset); 1422 /*
632 if (ret) 1423 * If we write into the object, we need to force the synchronisation
633 return ret; 1424 * barrier, either with an asynchronous clflush or if we executed the
1425 * patching using the GPU (though that should be serialised by the
1426 * timeline). To be completely sure, and since we are required to
1427 * do relocations we are already stalling, disable the user's opt
1428 * of our synchronisation.
1429 */
1430 vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
634 1431
635 /* and update the user's relocation entry */ 1432 /* and update the user's relocation entry */
636 reloc->presumed_offset = target_offset; 1433 return relocate_entry(vma, reloc, eb, target);
637 return 0;
638} 1434}
639 1435
640static int 1436static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
641i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
642 struct eb_vmas *eb)
643{ 1437{
644#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 1438#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
645 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; 1439 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
646 struct drm_i915_gem_relocation_entry __user *user_relocs; 1440 struct drm_i915_gem_relocation_entry __user *urelocs;
647 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 1441 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
648 struct reloc_cache cache; 1442 unsigned int remain;
649 int remain, ret = 0;
650
651 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
652 reloc_cache_init(&cache, eb->i915);
653 1443
1444 urelocs = u64_to_user_ptr(entry->relocs_ptr);
654 remain = entry->relocation_count; 1445 remain = entry->relocation_count;
655 while (remain) { 1446 if (unlikely(remain > N_RELOC(ULONG_MAX)))
656 struct drm_i915_gem_relocation_entry *r = stack_reloc; 1447 return -EINVAL;
657 unsigned long unwritten;
658 unsigned int count;
659 1448
660 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc)); 1449 /*
661 remain -= count; 1450 * We must check that the entire relocation array is safe
1451 * to read. However, if the array is not writable the user loses
1452 * the updated relocation values.
1453 */
1454 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
1455 return -EFAULT;
1456
1457 do {
1458 struct drm_i915_gem_relocation_entry *r = stack;
1459 unsigned int count =
1460 min_t(unsigned int, remain, ARRAY_SIZE(stack));
1461 unsigned int copied;
662 1462
663 /* This is the fast path and we cannot handle a pagefault 1463 /*
1464 * This is the fast path and we cannot handle a pagefault
664 * whilst holding the struct mutex lest the user pass in the 1465 * whilst holding the struct mutex lest the user pass in the
665 * relocations contained within a mmaped bo. For in such a case 1466 * relocations contained within a mmaped bo. For in such a case
666 * we, the page fault handler would call i915_gem_fault() and 1467 * we, the page fault handler would call i915_gem_fault() and
@@ -668,489 +1469,408 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
668 * this is bad and so lockdep complains vehemently. 1469 * this is bad and so lockdep complains vehemently.
669 */ 1470 */
670 pagefault_disable(); 1471 pagefault_disable();
671 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])); 1472 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
672 pagefault_enable(); 1473 pagefault_enable();
673 if (unlikely(unwritten)) { 1474 if (unlikely(copied)) {
674 ret = -EFAULT; 1475 remain = -EFAULT;
675 goto out; 1476 goto out;
676 } 1477 }
677 1478
1479 remain -= count;
678 do { 1480 do {
679 u64 offset = r->presumed_offset; 1481 u64 offset = eb_relocate_entry(eb, vma, r);
680 1482
681 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); 1483 if (likely(offset == 0)) {
682 if (ret) 1484 } else if ((s64)offset < 0) {
1485 remain = (int)offset;
683 goto out; 1486 goto out;
684 1487 } else {
685 if (r->presumed_offset != offset) { 1488 /*
686 pagefault_disable(); 1489 * Note that reporting an error now
687 unwritten = __put_user(r->presumed_offset, 1490 * leaves everything in an inconsistent
688 &user_relocs->presumed_offset); 1491 * state as we have *already* changed
689 pagefault_enable(); 1492 * the relocation value inside the
690 if (unlikely(unwritten)) { 1493 * object. As we have not changed the
691 /* Note that reporting an error now 1494 * reloc.presumed_offset or will not
692 * leaves everything in an inconsistent 1495 * change the execobject.offset, on the
693 * state as we have *already* changed 1496 * call we may not rewrite the value
694 * the relocation value inside the 1497 * inside the object, leaving it
695 * object. As we have not changed the 1498 * dangling and causing a GPU hang. Unless
696 * reloc.presumed_offset or will not 1499 * userspace dynamically rebuilds the
697 * change the execobject.offset, on the 1500 * relocations on each execbuf rather than
698 * call we may not rewrite the value 1501 * presume a static tree.
699 * inside the object, leaving it 1502 *
700 * dangling and causing a GPU hang. 1503 * We did previously check if the relocations
701 */ 1504 * were writable (access_ok), an error now
702 ret = -EFAULT; 1505 * would be a strange race with mprotect,
703 goto out; 1506 * having already demonstrated that we
704 } 1507 * can read from this userspace address.
1508 */
1509 offset = gen8_canonical_addr(offset & ~UPDATE);
1510 __put_user(offset,
1511 &urelocs[r-stack].presumed_offset);
705 } 1512 }
706 1513 } while (r++, --count);
707 user_relocs++; 1514 urelocs += ARRAY_SIZE(stack);
708 r++; 1515 } while (remain);
709 } while (--count);
710 }
711
712out: 1516out:
713 reloc_cache_fini(&cache); 1517 reloc_cache_reset(&eb->reloc_cache);
714 return ret; 1518 return remain;
715#undef N_RELOC
716} 1519}
717 1520
718static int 1521static int
719i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, 1522eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
720 struct eb_vmas *eb,
721 struct drm_i915_gem_relocation_entry *relocs)
722{ 1523{
723 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 1524 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
724 struct reloc_cache cache; 1525 struct drm_i915_gem_relocation_entry *relocs =
725 int i, ret = 0; 1526 u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1527 unsigned int i;
1528 int err;
726 1529
727 reloc_cache_init(&cache, eb->i915);
728 for (i = 0; i < entry->relocation_count; i++) { 1530 for (i = 0; i < entry->relocation_count; i++) {
729 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); 1531 u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
730 if (ret)
731 break;
732 }
733 reloc_cache_fini(&cache);
734 1532
735 return ret; 1533 if ((s64)offset < 0) {
1534 err = (int)offset;
1535 goto err;
1536 }
1537 }
1538 err = 0;
1539err:
1540 reloc_cache_reset(&eb->reloc_cache);
1541 return err;
736} 1542}
737 1543
738static int 1544static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
739i915_gem_execbuffer_relocate(struct eb_vmas *eb)
740{ 1545{
741 struct i915_vma *vma; 1546 const char __user *addr, *end;
742 int ret = 0; 1547 unsigned long size;
1548 char __maybe_unused c;
743 1549
744 list_for_each_entry(vma, &eb->vmas, exec_list) { 1550 size = entry->relocation_count;
745 ret = i915_gem_execbuffer_relocate_vma(vma, eb); 1551 if (size == 0)
746 if (ret) 1552 return 0;
747 break;
748 }
749 1553
750 return ret; 1554 if (size > N_RELOC(ULONG_MAX))
751} 1555 return -EINVAL;
752 1556
753static bool only_mappable_for_reloc(unsigned int flags) 1557 addr = u64_to_user_ptr(entry->relocs_ptr);
754{ 1558 size *= sizeof(struct drm_i915_gem_relocation_entry);
755 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) == 1559 if (!access_ok(VERIFY_READ, addr, size))
756 __EXEC_OBJECT_NEEDS_MAP; 1560 return -EFAULT;
1561
1562 end = addr + size;
1563 for (; addr < end; addr += PAGE_SIZE) {
1564 int err = __get_user(c, addr);
1565 if (err)
1566 return err;
1567 }
1568 return __get_user(c, end - 1);
757} 1569}
758 1570
759static int 1571static int eb_copy_relocations(const struct i915_execbuffer *eb)
760i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
761 struct intel_engine_cs *engine,
762 bool *need_reloc)
763{ 1572{
764 struct drm_i915_gem_object *obj = vma->obj; 1573 const unsigned int count = eb->buffer_count;
765 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 1574 unsigned int i;
766 uint64_t flags; 1575 int err;
767 int ret;
768 1576
769 flags = PIN_USER; 1577 for (i = 0; i < count; i++) {
770 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 1578 const unsigned int nreloc = eb->exec[i].relocation_count;
771 flags |= PIN_GLOBAL; 1579 struct drm_i915_gem_relocation_entry __user *urelocs;
1580 struct drm_i915_gem_relocation_entry *relocs;
1581 unsigned long size;
1582 unsigned long copied;
772 1583
773 if (!drm_mm_node_allocated(&vma->node)) { 1584 if (nreloc == 0)
774 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, 1585 continue;
775 * limit address to the first 4GBs for unflagged objects.
776 */
777 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
778 flags |= PIN_ZONE_4G;
779 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
780 flags |= PIN_GLOBAL | PIN_MAPPABLE;
781 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
782 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
783 if (entry->flags & EXEC_OBJECT_PINNED)
784 flags |= entry->offset | PIN_OFFSET_FIXED;
785 if ((flags & PIN_MAPPABLE) == 0)
786 flags |= PIN_HIGH;
787 }
788
789 ret = i915_vma_pin(vma,
790 entry->pad_to_size,
791 entry->alignment,
792 flags);
793 if ((ret == -ENOSPC || ret == -E2BIG) &&
794 only_mappable_for_reloc(entry->flags))
795 ret = i915_vma_pin(vma,
796 entry->pad_to_size,
797 entry->alignment,
798 flags & ~PIN_MAPPABLE);
799 if (ret)
800 return ret;
801 1586
802 entry->flags |= __EXEC_OBJECT_HAS_PIN; 1587 err = check_relocations(&eb->exec[i]);
1588 if (err)
1589 goto err;
803 1590
804 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 1591 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
805 ret = i915_vma_get_fence(vma); 1592 size = nreloc * sizeof(*relocs);
806 if (ret)
807 return ret;
808 1593
809 if (i915_vma_pin_fence(vma)) 1594 relocs = kvmalloc_array(size, 1, GFP_TEMPORARY);
810 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 1595 if (!relocs) {
811 } 1596 kvfree(relocs);
1597 err = -ENOMEM;
1598 goto err;
1599 }
812 1600
813 if (entry->offset != vma->node.start) { 1601 /* copy_from_user is limited to < 4GiB */
814 entry->offset = vma->node.start; 1602 copied = 0;
815 *need_reloc = true; 1603 do {
816 } 1604 unsigned int len =
1605 min_t(u64, BIT_ULL(31), size - copied);
1606
1607 if (__copy_from_user((char *)relocs + copied,
1608 (char *)urelocs + copied,
1609 len)) {
1610 kvfree(relocs);
1611 err = -EFAULT;
1612 goto err;
1613 }
1614
1615 copied += len;
1616 } while (copied < size);
817 1617
818 if (entry->flags & EXEC_OBJECT_WRITE) { 1618 /*
819 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; 1619 * As we do not update the known relocation offsets after
820 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 1620 * relocating (due to the complexities in lock handling),
1621 * we need to mark them as invalid now so that we force the
1622 * relocation processing next time. Just in case the target
1623 * object is evicted and then rebound into its old
1624 * presumed_offset before the next execbuffer - if that
1625 * happened we would make the mistake of assuming that the
1626 * relocations were valid.
1627 */
1628 user_access_begin();
1629 for (copied = 0; copied < nreloc; copied++)
1630 unsafe_put_user(-1,
1631 &urelocs[copied].presumed_offset,
1632 end_user);
1633end_user:
1634 user_access_end();
1635
1636 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
821 } 1637 }
822 1638
823 return 0; 1639 return 0;
1640
1641err:
1642 while (i--) {
1643 struct drm_i915_gem_relocation_entry *relocs =
1644 u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1645 if (eb->exec[i].relocation_count)
1646 kvfree(relocs);
1647 }
1648 return err;
824} 1649}
825 1650
826static bool 1651static int eb_prefault_relocations(const struct i915_execbuffer *eb)
827need_reloc_mappable(struct i915_vma *vma)
828{ 1652{
829 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 1653 const unsigned int count = eb->buffer_count;
1654 unsigned int i;
830 1655
831 if (entry->relocation_count == 0) 1656 if (unlikely(i915.prefault_disable))
832 return false; 1657 return 0;
833
834 if (!i915_vma_is_ggtt(vma))
835 return false;
836 1658
837 /* See also use_cpu_reloc() */ 1659 for (i = 0; i < count; i++) {
838 if (HAS_LLC(to_i915(vma->obj->base.dev))) 1660 int err;
839 return false;
840 1661
841 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU) 1662 err = check_relocations(&eb->exec[i]);
842 return false; 1663 if (err)
1664 return err;
1665 }
843 1666
844 return true; 1667 return 0;
845} 1668}
846 1669
847static bool 1670static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
848eb_vma_misplaced(struct i915_vma *vma)
849{ 1671{
850 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 1672 struct drm_device *dev = &eb->i915->drm;
851 1673 bool have_copy = false;
852 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && 1674 struct i915_vma *vma;
853 !i915_vma_is_ggtt(vma)); 1675 int err = 0;
854
855 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
856 return true;
857
858 if (vma->node.size < entry->pad_to_size)
859 return true;
860
861 if (entry->flags & EXEC_OBJECT_PINNED &&
862 vma->node.start != entry->offset)
863 return true;
864
865 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
866 vma->node.start < BATCH_OFFSET_BIAS)
867 return true;
868
869 /* avoid costly ping-pong once a batch bo ended up non-mappable */
870 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
871 !i915_vma_is_map_and_fenceable(vma))
872 return !only_mappable_for_reloc(entry->flags);
873 1676
874 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && 1677repeat:
875 (vma->node.start + vma->node.size - 1) >> 32) 1678 if (signal_pending(current)) {
876 return true; 1679 err = -ERESTARTSYS;
1680 goto out;
1681 }
877 1682
878 return false; 1683 /* We may process another execbuffer during the unlock... */
879} 1684 eb_reset_vmas(eb);
1685 mutex_unlock(&dev->struct_mutex);
880 1686
881static int 1687 /*
882i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, 1688 * We take 3 passes through the slowpatch.
883 struct list_head *vmas,
884 struct i915_gem_context *ctx,
885 bool *need_relocs)
886{
887 struct drm_i915_gem_object *obj;
888 struct i915_vma *vma;
889 struct i915_address_space *vm;
890 struct list_head ordered_vmas;
891 struct list_head pinned_vmas;
892 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
893 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
894 int retry;
895
896 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
897
898 INIT_LIST_HEAD(&ordered_vmas);
899 INIT_LIST_HEAD(&pinned_vmas);
900 while (!list_empty(vmas)) {
901 struct drm_i915_gem_exec_object2 *entry;
902 bool need_fence, need_mappable;
903
904 vma = list_first_entry(vmas, struct i915_vma, exec_list);
905 obj = vma->obj;
906 entry = vma->exec_entry;
907
908 if (ctx->flags & CONTEXT_NO_ZEROMAP)
909 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
910
911 if (!has_fenced_gpu_access)
912 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
913 need_fence =
914 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
915 needs_unfenced_map) &&
916 i915_gem_object_is_tiled(obj);
917 need_mappable = need_fence || need_reloc_mappable(vma);
918
919 if (entry->flags & EXEC_OBJECT_PINNED)
920 list_move_tail(&vma->exec_list, &pinned_vmas);
921 else if (need_mappable) {
922 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
923 list_move(&vma->exec_list, &ordered_vmas);
924 } else
925 list_move_tail(&vma->exec_list, &ordered_vmas);
926
927 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
928 obj->base.pending_write_domain = 0;
929 }
930 list_splice(&ordered_vmas, vmas);
931 list_splice(&pinned_vmas, vmas);
932
933 /* Attempt to pin all of the buffers into the GTT.
934 * This is done in 3 phases:
935 * 1689 *
936 * 1a. Unbind all objects that do not match the GTT constraints for 1690 * 1 - we try to just prefault all the user relocation entries and
937 * the execbuffer (fenceable, mappable, alignment etc). 1691 * then attempt to reuse the atomic pagefault disabled fast path again.
938 * 1b. Increment pin count for already bound objects.
939 * 2. Bind new objects.
940 * 3. Decrement pin count.
941 * 1692 *
942 * This avoid unnecessary unbinding of later objects in order to make 1693 * 2 - we copy the user entries to a local buffer here outside of the
943 * room for the earlier objects *unless* we need to defragment. 1694 * local and allow ourselves to wait upon any rendering before
1695 * relocations
1696 *
1697 * 3 - we already have a local copy of the relocation entries, but
1698 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
944 */ 1699 */
945 retry = 0; 1700 if (!err) {
946 do { 1701 err = eb_prefault_relocations(eb);
947 int ret = 0; 1702 } else if (!have_copy) {
1703 err = eb_copy_relocations(eb);
1704 have_copy = err == 0;
1705 } else {
1706 cond_resched();
1707 err = 0;
1708 }
1709 if (err) {
1710 mutex_lock(&dev->struct_mutex);
1711 goto out;
1712 }
948 1713
949 /* Unbind any ill-fitting objects or pin. */ 1714 /* A frequent cause for EAGAIN are currently unavailable client pages */
950 list_for_each_entry(vma, vmas, exec_list) { 1715 flush_workqueue(eb->i915->mm.userptr_wq);
951 if (!drm_mm_node_allocated(&vma->node))
952 continue;
953 1716
954 if (eb_vma_misplaced(vma)) 1717 err = i915_mutex_lock_interruptible(dev);
955 ret = i915_vma_unbind(vma); 1718 if (err) {
956 else 1719 mutex_lock(&dev->struct_mutex);
957 ret = i915_gem_execbuffer_reserve_vma(vma, 1720 goto out;
958 engine, 1721 }
959 need_relocs);
960 if (ret)
961 goto err;
962 }
963 1722
964 /* Bind fresh objects */ 1723 /* reacquire the objects */
965 list_for_each_entry(vma, vmas, exec_list) { 1724 err = eb_lookup_vmas(eb);
966 if (drm_mm_node_allocated(&vma->node)) 1725 if (err)
967 continue; 1726 goto err;
968 1727
969 ret = i915_gem_execbuffer_reserve_vma(vma, engine, 1728 list_for_each_entry(vma, &eb->relocs, reloc_link) {
970 need_relocs); 1729 if (!have_copy) {
971 if (ret) 1730 pagefault_disable();
1731 err = eb_relocate_vma(eb, vma);
1732 pagefault_enable();
1733 if (err)
1734 goto repeat;
1735 } else {
1736 err = eb_relocate_vma_slow(eb, vma);
1737 if (err)
972 goto err; 1738 goto err;
973 } 1739 }
1740 }
974 1741
975err: 1742 /*
976 if (ret != -ENOSPC || retry++) 1743 * Leave the user relocations as are, this is the painfully slow path,
977 return ret; 1744 * and we want to avoid the complication of dropping the lock whilst
978 1745 * having buffers reserved in the aperture and so causing spurious
979 /* Decrement pin count for bound objects */ 1746 * ENOSPC for random operations.
980 list_for_each_entry(vma, vmas, exec_list) 1747 */
981 i915_gem_execbuffer_unreserve_vma(vma);
982
983 ret = i915_gem_evict_vm(vm, true);
984 if (ret)
985 return ret;
986 } while (1);
987}
988
989static int
990i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
991 struct drm_i915_gem_execbuffer2 *args,
992 struct drm_file *file,
993 struct intel_engine_cs *engine,
994 struct eb_vmas *eb,
995 struct drm_i915_gem_exec_object2 *exec,
996 struct i915_gem_context *ctx)
997{
998 struct drm_i915_gem_relocation_entry *reloc;
999 struct i915_address_space *vm;
1000 struct i915_vma *vma;
1001 bool need_relocs;
1002 int *reloc_offset;
1003 int i, total, ret;
1004 unsigned count = args->buffer_count;
1005 1748
1006 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; 1749err:
1750 if (err == -EAGAIN)
1751 goto repeat;
1007 1752
1008 /* We may process another execbuffer during the unlock... */ 1753out:
1009 while (!list_empty(&eb->vmas)) { 1754 if (have_copy) {
1010 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 1755 const unsigned int count = eb->buffer_count;
1011 list_del_init(&vma->exec_list); 1756 unsigned int i;
1012 i915_gem_execbuffer_unreserve_vma(vma);
1013 i915_vma_put(vma);
1014 }
1015 1757
1016 mutex_unlock(&dev->struct_mutex); 1758 for (i = 0; i < count; i++) {
1759 const struct drm_i915_gem_exec_object2 *entry =
1760 &eb->exec[i];
1761 struct drm_i915_gem_relocation_entry *relocs;
1017 1762
1018 total = 0; 1763 if (!entry->relocation_count)
1019 for (i = 0; i < count; i++) 1764 continue;
1020 total += exec[i].relocation_count;
1021 1765
1022 reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL); 1766 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1023 reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL); 1767 kvfree(relocs);
1024 if (reloc == NULL || reloc_offset == NULL) { 1768 }
1025 kvfree(reloc);
1026 kvfree(reloc_offset);
1027 mutex_lock(&dev->struct_mutex);
1028 return -ENOMEM;
1029 } 1769 }
1030 1770
1031 total = 0; 1771 return err ?: have_copy;
1032 for (i = 0; i < count; i++) { 1772}
1033 struct drm_i915_gem_relocation_entry __user *user_relocs;
1034 u64 invalid_offset = (u64)-1;
1035 int j;
1036 1773
1037 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr); 1774static int eb_relocate(struct i915_execbuffer *eb)
1775{
1776 if (eb_lookup_vmas(eb))
1777 goto slow;
1038 1778
1039 if (copy_from_user(reloc+total, user_relocs, 1779 /* The objects are in their final locations, apply the relocations. */
1040 exec[i].relocation_count * sizeof(*reloc))) { 1780 if (eb->args->flags & __EXEC_HAS_RELOC) {
1041 ret = -EFAULT; 1781 struct i915_vma *vma;
1042 mutex_lock(&dev->struct_mutex);
1043 goto err;
1044 }
1045 1782
1046 /* As we do not update the known relocation offsets after 1783 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1047 * relocating (due to the complexities in lock handling), 1784 if (eb_relocate_vma(eb, vma))
1048 * we need to mark them as invalid now so that we force the 1785 goto slow;
1049 * relocation processing next time. Just in case the target
1050 * object is evicted and then rebound into its old
1051 * presumed_offset before the next execbuffer - if that
1052 * happened we would make the mistake of assuming that the
1053 * relocations were valid.
1054 */
1055 for (j = 0; j < exec[i].relocation_count; j++) {
1056 if (__copy_to_user(&user_relocs[j].presumed_offset,
1057 &invalid_offset,
1058 sizeof(invalid_offset))) {
1059 ret = -EFAULT;
1060 mutex_lock(&dev->struct_mutex);
1061 goto err;
1062 }
1063 } 1786 }
1064
1065 reloc_offset[i] = total;
1066 total += exec[i].relocation_count;
1067 }
1068
1069 ret = i915_mutex_lock_interruptible(dev);
1070 if (ret) {
1071 mutex_lock(&dev->struct_mutex);
1072 goto err;
1073 } 1787 }
1074 1788
1075 /* reacquire the objects */ 1789 return 0;
1076 eb_reset(eb);
1077 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1078 if (ret)
1079 goto err;
1080 1790
1081 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1791slow:
1082 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx, 1792 return eb_relocate_slow(eb);
1083 &need_relocs); 1793}
1084 if (ret)
1085 goto err;
1086 1794
1087 list_for_each_entry(vma, &eb->vmas, exec_list) { 1795static void eb_export_fence(struct i915_vma *vma,
1088 int offset = vma->exec_entry - exec; 1796 struct drm_i915_gem_request *req,
1089 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, 1797 unsigned int flags)
1090 reloc + reloc_offset[offset]); 1798{
1091 if (ret) 1799 struct reservation_object *resv = vma->resv;
1092 goto err;
1093 }
1094 1800
1095 /* Leave the user relocations as are, this is the painfully slow path, 1801 /*
1096 * and we want to avoid the complication of dropping the lock whilst 1802 * Ignore errors from failing to allocate the new fence, we can't
1097 * having buffers reserved in the aperture and so causing spurious 1803 * handle an error right now. Worst case should be missed
1098 * ENOSPC for random operations. 1804 * synchronisation leading to rendering corruption.
1099 */ 1805 */
1100 1806 reservation_object_lock(resv, NULL);
1101err: 1807 if (flags & EXEC_OBJECT_WRITE)
1102 kvfree(reloc); 1808 reservation_object_add_excl_fence(resv, &req->fence);
1103 kvfree(reloc_offset); 1809 else if (reservation_object_reserve_shared(resv) == 0)
1104 return ret; 1810 reservation_object_add_shared_fence(resv, &req->fence);
1811 reservation_object_unlock(resv);
1105} 1812}
1106 1813
1107static int 1814static int eb_move_to_gpu(struct i915_execbuffer *eb)
1108i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
1109 struct list_head *vmas)
1110{ 1815{
1111 struct i915_vma *vma; 1816 const unsigned int count = eb->buffer_count;
1112 int ret; 1817 unsigned int i;
1818 int err;
1113 1819
1114 list_for_each_entry(vma, vmas, exec_list) { 1820 for (i = 0; i < count; i++) {
1821 const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
1822 struct i915_vma *vma = exec_to_vma(entry);
1115 struct drm_i915_gem_object *obj = vma->obj; 1823 struct drm_i915_gem_object *obj = vma->obj;
1116 1824
1117 if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) { 1825 if (entry->flags & EXEC_OBJECT_CAPTURE) {
1118 struct i915_gem_capture_list *capture; 1826 struct i915_gem_capture_list *capture;
1119 1827
1120 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 1828 capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1121 if (unlikely(!capture)) 1829 if (unlikely(!capture))
1122 return -ENOMEM; 1830 return -ENOMEM;
1123 1831
1124 capture->next = req->capture_list; 1832 capture->next = eb->request->capture_list;
1125 capture->vma = vma; 1833 capture->vma = vma;
1126 req->capture_list = capture; 1834 eb->request->capture_list = capture;
1127 } 1835 }
1128 1836
1129 if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC) 1837 if (entry->flags & EXEC_OBJECT_ASYNC)
1130 continue; 1838 goto skip_flushes;
1131 1839
1132 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) { 1840 if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1133 i915_gem_clflush_object(obj, 0); 1841 i915_gem_clflush_object(obj, 0);
1134 obj->base.write_domain = 0;
1135 }
1136 1842
1137 ret = i915_gem_request_await_object 1843 err = i915_gem_request_await_object
1138 (req, obj, obj->base.pending_write_domain); 1844 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
1139 if (ret) 1845 if (err)
1140 return ret; 1846 return err;
1847
1848skip_flushes:
1849 i915_vma_move_to_active(vma, eb->request, entry->flags);
1850 __eb_unreserve_vma(vma, entry);
1851 vma->exec_entry = NULL;
1852 }
1853
1854 for (i = 0; i < count; i++) {
1855 const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
1856 struct i915_vma *vma = exec_to_vma(entry);
1857
1858 eb_export_fence(vma, eb->request, entry->flags);
1859 if (unlikely(entry->flags & __EXEC_OBJECT_HAS_REF))
1860 i915_vma_put(vma);
1141 } 1861 }
1862 eb->exec = NULL;
1142 1863
1143 /* Unconditionally flush any chipset caches (for streaming writes). */ 1864 /* Unconditionally flush any chipset caches (for streaming writes). */
1144 i915_gem_chipset_flush(req->engine->i915); 1865 i915_gem_chipset_flush(eb->i915);
1145 1866
1146 /* Unconditionally invalidate GPU caches and TLBs. */ 1867 /* Unconditionally invalidate GPU caches and TLBs. */
1147 return req->engine->emit_flush(req, EMIT_INVALIDATE); 1868 return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
1148} 1869}
1149 1870
1150static bool 1871static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1151i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1152{ 1872{
1153 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) 1873 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1154 return false; 1874 return false;
1155 1875
1156 /* Kernel clipping was a DRI1 misfeature */ 1876 /* Kernel clipping was a DRI1 misfeature */
@@ -1170,107 +1890,6 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1170 return true; 1890 return true;
1171} 1891}
1172 1892
1173static int
1174validate_exec_list(struct drm_device *dev,
1175 struct drm_i915_gem_exec_object2 *exec,
1176 int count)
1177{
1178 unsigned relocs_total = 0;
1179 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1180 unsigned invalid_flags;
1181 int i;
1182
1183 /* INTERNAL flags must not overlap with external ones */
1184 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1185
1186 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1187 if (USES_FULL_PPGTT(dev))
1188 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1189
1190 for (i = 0; i < count; i++) {
1191 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1192 int length; /* limited by fault_in_pages_readable() */
1193
1194 if (exec[i].flags & invalid_flags)
1195 return -EINVAL;
1196
1197 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1198 * any non-page-aligned or non-canonical addresses.
1199 */
1200 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1201 if (exec[i].offset !=
1202 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1203 return -EINVAL;
1204 }
1205
1206 /* From drm_mm perspective address space is continuous,
1207 * so from this point we're always using non-canonical
1208 * form internally.
1209 */
1210 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1211
1212 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1213 return -EINVAL;
1214
1215 /* pad_to_size was once a reserved field, so sanitize it */
1216 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1217 if (offset_in_page(exec[i].pad_to_size))
1218 return -EINVAL;
1219 } else {
1220 exec[i].pad_to_size = 0;
1221 }
1222
1223 /* First check for malicious input causing overflow in
1224 * the worst case where we need to allocate the entire
1225 * relocation tree as a single array.
1226 */
1227 if (exec[i].relocation_count > relocs_max - relocs_total)
1228 return -EINVAL;
1229 relocs_total += exec[i].relocation_count;
1230
1231 length = exec[i].relocation_count *
1232 sizeof(struct drm_i915_gem_relocation_entry);
1233 /*
1234 * We must check that the entire relocation array is safe
1235 * to read, but since we may need to update the presumed
1236 * offsets during execution, check for full write access.
1237 */
1238 if (!access_ok(VERIFY_WRITE, ptr, length))
1239 return -EFAULT;
1240
1241 if (likely(!i915.prefault_disable)) {
1242 if (fault_in_pages_readable(ptr, length))
1243 return -EFAULT;
1244 }
1245 }
1246
1247 return 0;
1248}
1249
1250static struct i915_gem_context *
1251i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1252 struct intel_engine_cs *engine, const u32 ctx_id)
1253{
1254 struct i915_gem_context *ctx;
1255
1256 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1257 if (IS_ERR(ctx))
1258 return ctx;
1259
1260 if (i915_gem_context_is_banned(ctx)) {
1261 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1262 return ERR_PTR(-EIO);
1263 }
1264
1265 return ctx;
1266}
1267
1268static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
1269{
1270 return !(obj->cache_level == I915_CACHE_NONE ||
1271 obj->cache_level == I915_CACHE_WT);
1272}
1273
1274void i915_vma_move_to_active(struct i915_vma *vma, 1893void i915_vma_move_to_active(struct i915_vma *vma,
1275 struct drm_i915_gem_request *req, 1894 struct drm_i915_gem_request *req,
1276 unsigned int flags) 1895 unsigned int flags)
@@ -1281,7 +1900,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
1281 lockdep_assert_held(&req->i915->drm.struct_mutex); 1900 lockdep_assert_held(&req->i915->drm.struct_mutex);
1282 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1901 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1283 1902
1284 /* Add a reference if we're newly entering the active list. 1903 /*
1904 * Add a reference if we're newly entering the active list.
1285 * The order in which we add operations to the retirement queue is 1905 * The order in which we add operations to the retirement queue is
1286 * vital here: mark_active adds to the start of the callback list, 1906 * vital here: mark_active adds to the start of the callback list,
1287 * such that subsequent callbacks are called first. Therefore we 1907 * such that subsequent callbacks are called first. Therefore we
@@ -1294,61 +1914,22 @@ void i915_vma_move_to_active(struct i915_vma *vma,
1294 i915_gem_active_set(&vma->last_read[idx], req); 1914 i915_gem_active_set(&vma->last_read[idx], req);
1295 list_move_tail(&vma->vm_link, &vma->vm->active_list); 1915 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1296 1916
1917 obj->base.write_domain = 0;
1297 if (flags & EXEC_OBJECT_WRITE) { 1918 if (flags & EXEC_OBJECT_WRITE) {
1919 obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
1920
1298 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 1921 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1299 i915_gem_active_set(&obj->frontbuffer_write, req); 1922 i915_gem_active_set(&obj->frontbuffer_write, req);
1300 1923
1301 /* update for the implicit flush after a batch */ 1924 obj->base.read_domains = 0;
1302 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1303 if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
1304 obj->cache_dirty = true;
1305 } 1925 }
1926 obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
1306 1927
1307 if (flags & EXEC_OBJECT_NEEDS_FENCE) 1928 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1308 i915_gem_active_set(&vma->last_fence, req); 1929 i915_gem_active_set(&vma->last_fence, req);
1309} 1930}
1310 1931
1311static void eb_export_fence(struct drm_i915_gem_object *obj, 1932static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1312 struct drm_i915_gem_request *req,
1313 unsigned int flags)
1314{
1315 struct reservation_object *resv = obj->resv;
1316
1317 /* Ignore errors from failing to allocate the new fence, we can't
1318 * handle an error right now. Worst case should be missed
1319 * synchronisation leading to rendering corruption.
1320 */
1321 reservation_object_lock(resv, NULL);
1322 if (flags & EXEC_OBJECT_WRITE)
1323 reservation_object_add_excl_fence(resv, &req->fence);
1324 else if (reservation_object_reserve_shared(resv) == 0)
1325 reservation_object_add_shared_fence(resv, &req->fence);
1326 reservation_object_unlock(resv);
1327}
1328
1329static void
1330i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1331 struct drm_i915_gem_request *req)
1332{
1333 struct i915_vma *vma;
1334
1335 list_for_each_entry(vma, vmas, exec_list) {
1336 struct drm_i915_gem_object *obj = vma->obj;
1337
1338 obj->base.write_domain = obj->base.pending_write_domain;
1339 if (obj->base.write_domain)
1340 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1341 else
1342 obj->base.pending_read_domains |= obj->base.read_domains;
1343 obj->base.read_domains = obj->base.pending_read_domains;
1344
1345 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
1346 eb_export_fence(obj, req, vma->exec_entry->flags);
1347 }
1348}
1349
1350static int
1351i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1352{ 1933{
1353 u32 *cs; 1934 u32 *cs;
1354 int i; 1935 int i;
@@ -1358,50 +1939,43 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1358 return -EINVAL; 1939 return -EINVAL;
1359 } 1940 }
1360 1941
1361 cs = intel_ring_begin(req, 4 * 3); 1942 cs = intel_ring_begin(req, 4 * 2 + 2);
1362 if (IS_ERR(cs)) 1943 if (IS_ERR(cs))
1363 return PTR_ERR(cs); 1944 return PTR_ERR(cs);
1364 1945
1946 *cs++ = MI_LOAD_REGISTER_IMM(4);
1365 for (i = 0; i < 4; i++) { 1947 for (i = 0; i < 4; i++) {
1366 *cs++ = MI_LOAD_REGISTER_IMM(1);
1367 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); 1948 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1368 *cs++ = 0; 1949 *cs++ = 0;
1369 } 1950 }
1370 1951 *cs++ = MI_NOOP;
1371 intel_ring_advance(req, cs); 1952 intel_ring_advance(req, cs);
1372 1953
1373 return 0; 1954 return 0;
1374} 1955}
1375 1956
1376static struct i915_vma * 1957static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1377i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1378 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1379 struct drm_i915_gem_object *batch_obj,
1380 struct eb_vmas *eb,
1381 u32 batch_start_offset,
1382 u32 batch_len,
1383 bool is_master)
1384{ 1958{
1385 struct drm_i915_gem_object *shadow_batch_obj; 1959 struct drm_i915_gem_object *shadow_batch_obj;
1386 struct i915_vma *vma; 1960 struct i915_vma *vma;
1387 int ret; 1961 int err;
1388 1962
1389 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool, 1963 shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
1390 PAGE_ALIGN(batch_len)); 1964 PAGE_ALIGN(eb->batch_len));
1391 if (IS_ERR(shadow_batch_obj)) 1965 if (IS_ERR(shadow_batch_obj))
1392 return ERR_CAST(shadow_batch_obj); 1966 return ERR_CAST(shadow_batch_obj);
1393 1967
1394 ret = intel_engine_cmd_parser(engine, 1968 err = intel_engine_cmd_parser(eb->engine,
1395 batch_obj, 1969 eb->batch->obj,
1396 shadow_batch_obj, 1970 shadow_batch_obj,
1397 batch_start_offset, 1971 eb->batch_start_offset,
1398 batch_len, 1972 eb->batch_len,
1399 is_master); 1973 is_master);
1400 if (ret) { 1974 if (err) {
1401 if (ret == -EACCES) /* unhandled chained batch */ 1975 if (err == -EACCES) /* unhandled chained batch */
1402 vma = NULL; 1976 vma = NULL;
1403 else 1977 else
1404 vma = ERR_PTR(ret); 1978 vma = ERR_PTR(err);
1405 goto out; 1979 goto out;
1406 } 1980 }
1407 1981
@@ -1409,12 +1983,11 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1409 if (IS_ERR(vma)) 1983 if (IS_ERR(vma))
1410 goto out; 1984 goto out;
1411 1985
1412 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); 1986 vma->exec_entry =
1413 1987 memset(&eb->exec[eb->buffer_count++],
1414 vma->exec_entry = shadow_exec_entry; 1988 0, sizeof(*vma->exec_entry));
1415 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; 1989 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
1416 i915_gem_object_get(shadow_batch_obj); 1990 __exec_to_vma(vma->exec_entry) = (uintptr_t)i915_vma_get(vma);
1417 list_add_tail(&vma->exec_list, &eb->vmas);
1418 1991
1419out: 1992out:
1420 i915_gem_object_unpin_pages(shadow_batch_obj); 1993 i915_gem_object_unpin_pages(shadow_batch_obj);
@@ -1422,54 +1995,37 @@ out:
1422} 1995}
1423 1996
1424static void 1997static void
1425add_to_client(struct drm_i915_gem_request *req, 1998add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
1426 struct drm_file *file)
1427{ 1999{
1428 req->file_priv = file->driver_priv; 2000 req->file_priv = file->driver_priv;
1429 list_add_tail(&req->client_link, &req->file_priv->mm.request_list); 2001 list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
1430} 2002}
1431 2003
1432static int 2004static int eb_submit(struct i915_execbuffer *eb)
1433execbuf_submit(struct i915_execbuffer_params *params,
1434 struct drm_i915_gem_execbuffer2 *args,
1435 struct list_head *vmas)
1436{ 2005{
1437 u64 exec_start, exec_len; 2006 int err;
1438 int ret;
1439 2007
1440 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); 2008 err = eb_move_to_gpu(eb);
1441 if (ret) 2009 if (err)
1442 return ret; 2010 return err;
1443 2011
1444 ret = i915_switch_context(params->request); 2012 err = i915_switch_context(eb->request);
1445 if (ret) 2013 if (err)
1446 return ret; 2014 return err;
1447 2015
1448 if (args->flags & I915_EXEC_CONSTANTS_MASK) { 2016 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1449 DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n"); 2017 err = i915_reset_gen7_sol_offsets(eb->request);
1450 return -EINVAL; 2018 if (err)
1451 } 2019 return err;
1452
1453 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1454 ret = i915_reset_gen7_sol_offsets(params->request);
1455 if (ret)
1456 return ret;
1457 } 2020 }
1458 2021
1459 exec_len = args->batch_len; 2022 err = eb->engine->emit_bb_start(eb->request,
1460 exec_start = params->batch->node.start + 2023 eb->batch->node.start +
1461 params->args_batch_start_offset; 2024 eb->batch_start_offset,
1462 2025 eb->batch_len,
1463 if (exec_len == 0) 2026 eb->batch_flags);
1464 exec_len = params->batch->size - params->args_batch_start_offset; 2027 if (err)
1465 2028 return err;
1466 ret = params->engine->emit_bb_start(params->request,
1467 exec_start, exec_len,
1468 params->dispatch_flags);
1469 if (ret)
1470 return ret;
1471
1472 i915_gem_execbuffer_move_to_active(vmas, params->request);
1473 2029
1474 return 0; 2030 return 0;
1475} 2031}
@@ -1551,66 +2107,62 @@ eb_select_engine(struct drm_i915_private *dev_priv,
1551} 2107}
1552 2108
1553static int 2109static int
1554i915_gem_do_execbuffer(struct drm_device *dev, void *data, 2110i915_gem_do_execbuffer(struct drm_device *dev,
1555 struct drm_file *file, 2111 struct drm_file *file,
1556 struct drm_i915_gem_execbuffer2 *args, 2112 struct drm_i915_gem_execbuffer2 *args,
1557 struct drm_i915_gem_exec_object2 *exec) 2113 struct drm_i915_gem_exec_object2 *exec)
1558{ 2114{
1559 struct drm_i915_private *dev_priv = to_i915(dev); 2115 struct i915_execbuffer eb;
1560 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1561 struct eb_vmas *eb;
1562 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1563 struct intel_engine_cs *engine;
1564 struct i915_gem_context *ctx;
1565 struct i915_address_space *vm;
1566 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1567 struct i915_execbuffer_params *params = &params_master;
1568 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1569 u32 dispatch_flags;
1570 struct dma_fence *in_fence = NULL; 2116 struct dma_fence *in_fence = NULL;
1571 struct sync_file *out_fence = NULL; 2117 struct sync_file *out_fence = NULL;
1572 int out_fence_fd = -1; 2118 int out_fence_fd = -1;
1573 int ret; 2119 int err;
1574 bool need_relocs; 2120
1575 2121 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
1576 if (!i915_gem_check_execbuffer(args)) 2122 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1577 return -EINVAL; 2123
1578 2124 eb.i915 = to_i915(dev);
1579 ret = validate_exec_list(dev, exec, args->buffer_count); 2125 eb.file = file;
1580 if (ret) 2126 eb.args = args;
1581 return ret; 2127 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
1582 2128 args->flags |= __EXEC_HAS_RELOC;
1583 dispatch_flags = 0; 2129 eb.exec = exec;
2130 eb.ctx = NULL;
2131 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2132 if (USES_FULL_PPGTT(eb.i915))
2133 eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
2134 reloc_cache_init(&eb.reloc_cache, eb.i915);
2135
2136 eb.buffer_count = args->buffer_count;
2137 eb.batch_start_offset = args->batch_start_offset;
2138 eb.batch_len = args->batch_len;
2139
2140 eb.batch_flags = 0;
1584 if (args->flags & I915_EXEC_SECURE) { 2141 if (args->flags & I915_EXEC_SECURE) {
1585 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) 2142 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1586 return -EPERM; 2143 return -EPERM;
1587 2144
1588 dispatch_flags |= I915_DISPATCH_SECURE; 2145 eb.batch_flags |= I915_DISPATCH_SECURE;
1589 } 2146 }
1590 if (args->flags & I915_EXEC_IS_PINNED) 2147 if (args->flags & I915_EXEC_IS_PINNED)
1591 dispatch_flags |= I915_DISPATCH_PINNED; 2148 eb.batch_flags |= I915_DISPATCH_PINNED;
1592 2149
1593 engine = eb_select_engine(dev_priv, file, args); 2150 eb.engine = eb_select_engine(eb.i915, file, args);
1594 if (!engine) 2151 if (!eb.engine)
1595 return -EINVAL; 2152 return -EINVAL;
1596 2153
1597 if (args->buffer_count < 1) {
1598 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1599 return -EINVAL;
1600 }
1601
1602 if (args->flags & I915_EXEC_RESOURCE_STREAMER) { 2154 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1603 if (!HAS_RESOURCE_STREAMER(dev_priv)) { 2155 if (!HAS_RESOURCE_STREAMER(eb.i915)) {
1604 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); 2156 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1605 return -EINVAL; 2157 return -EINVAL;
1606 } 2158 }
1607 if (engine->id != RCS) { 2159 if (eb.engine->id != RCS) {
1608 DRM_DEBUG("RS is not available on %s\n", 2160 DRM_DEBUG("RS is not available on %s\n",
1609 engine->name); 2161 eb.engine->name);
1610 return -EINVAL; 2162 return -EINVAL;
1611 } 2163 }
1612 2164
1613 dispatch_flags |= I915_DISPATCH_RS; 2165 eb.batch_flags |= I915_DISPATCH_RS;
1614 } 2166 }
1615 2167
1616 if (args->flags & I915_EXEC_FENCE_IN) { 2168 if (args->flags & I915_EXEC_FENCE_IN) {
@@ -1622,102 +2174,62 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1622 if (args->flags & I915_EXEC_FENCE_OUT) { 2174 if (args->flags & I915_EXEC_FENCE_OUT) {
1623 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 2175 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1624 if (out_fence_fd < 0) { 2176 if (out_fence_fd < 0) {
1625 ret = out_fence_fd; 2177 err = out_fence_fd;
1626 goto err_in_fence; 2178 goto err_in_fence;
1627 } 2179 }
1628 } 2180 }
1629 2181
1630 /* Take a local wakeref for preparing to dispatch the execbuf as 2182 if (eb_create(&eb))
2183 return -ENOMEM;
2184
2185 /*
2186 * Take a local wakeref for preparing to dispatch the execbuf as
1631 * we expect to access the hardware fairly frequently in the 2187 * we expect to access the hardware fairly frequently in the
1632 * process. Upon first dispatch, we acquire another prolonged 2188 * process. Upon first dispatch, we acquire another prolonged
1633 * wakeref that we hold until the GPU has been idle for at least 2189 * wakeref that we hold until the GPU has been idle for at least
1634 * 100ms. 2190 * 100ms.
1635 */ 2191 */
1636 intel_runtime_pm_get(dev_priv); 2192 intel_runtime_pm_get(eb.i915);
1637 2193 err = i915_mutex_lock_interruptible(dev);
1638 ret = i915_mutex_lock_interruptible(dev); 2194 if (err)
1639 if (ret) 2195 goto err_rpm;
1640 goto pre_mutex_err;
1641
1642 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1643 if (IS_ERR(ctx)) {
1644 mutex_unlock(&dev->struct_mutex);
1645 ret = PTR_ERR(ctx);
1646 goto pre_mutex_err;
1647 }
1648
1649 i915_gem_context_get(ctx);
1650
1651 if (ctx->ppgtt)
1652 vm = &ctx->ppgtt->base;
1653 else
1654 vm = &ggtt->base;
1655 2196
1656 memset(&params_master, 0x00, sizeof(params_master)); 2197 err = eb_select_context(&eb);
2198 if (unlikely(err))
2199 goto err_unlock;
1657 2200
1658 eb = eb_create(dev_priv, args); 2201 err = eb_relocate(&eb);
1659 if (eb == NULL) { 2202 if (err)
1660 i915_gem_context_put(ctx); 2203 /*
1661 mutex_unlock(&dev->struct_mutex); 2204 * If the user expects the execobject.offset and
1662 ret = -ENOMEM; 2205 * reloc.presumed_offset to be an exact match,
1663 goto pre_mutex_err; 2206 * as for using NO_RELOC, then we cannot update
1664 } 2207 * the execobject.offset until we have completed
1665 2208 * relocation.
1666 /* Look up object handles */ 2209 */
1667 ret = eb_lookup_vmas(eb, exec, args, vm, file); 2210 args->flags &= ~__EXEC_HAS_RELOC;
1668 if (ret) 2211 if (err < 0)
1669 goto err; 2212 goto err_vma;
1670
1671 /* take note of the batch buffer before we might reorder the lists */
1672 params->batch = eb_get_batch(eb);
1673
1674 /* Move the objects en-masse into the GTT, evicting if necessary. */
1675 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1676 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1677 &need_relocs);
1678 if (ret)
1679 goto err;
1680
1681 /* The objects are in their final locations, apply the relocations. */
1682 if (need_relocs)
1683 ret = i915_gem_execbuffer_relocate(eb);
1684 if (ret) {
1685 if (ret == -EFAULT) {
1686 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1687 engine,
1688 eb, exec, ctx);
1689 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1690 }
1691 if (ret)
1692 goto err;
1693 }
1694 2213
1695 /* Set the pending read domains for the batch buffer to COMMAND */ 2214 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
1696 if (params->batch->obj->base.pending_write_domain) {
1697 DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); 2215 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1698 ret = -EINVAL; 2216 err = -EINVAL;
1699 goto err; 2217 goto err_vma;
1700 } 2218 }
1701 if (args->batch_start_offset > params->batch->size || 2219 if (eb.batch_start_offset > eb.batch->size ||
1702 args->batch_len > params->batch->size - args->batch_start_offset) { 2220 eb.batch_len > eb.batch->size - eb.batch_start_offset) {
1703 DRM_DEBUG("Attempting to use out-of-bounds batch\n"); 2221 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1704 ret = -EINVAL; 2222 err = -EINVAL;
1705 goto err; 2223 goto err_vma;
1706 } 2224 }
1707 2225
1708 params->args_batch_start_offset = args->batch_start_offset; 2226 if (eb.engine->needs_cmd_parser && eb.batch_len) {
1709 if (engine->needs_cmd_parser && args->batch_len) {
1710 struct i915_vma *vma; 2227 struct i915_vma *vma;
1711 2228
1712 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry, 2229 vma = eb_parse(&eb, drm_is_current_master(file));
1713 params->batch->obj,
1714 eb,
1715 args->batch_start_offset,
1716 args->batch_len,
1717 drm_is_current_master(file));
1718 if (IS_ERR(vma)) { 2230 if (IS_ERR(vma)) {
1719 ret = PTR_ERR(vma); 2231 err = PTR_ERR(vma);
1720 goto err; 2232 goto err_vma;
1721 } 2233 }
1722 2234
1723 if (vma) { 2235 if (vma) {
@@ -1730,19 +2242,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1730 * specifically don't want that set on batches the 2242 * specifically don't want that set on batches the
1731 * command parser has accepted. 2243 * command parser has accepted.
1732 */ 2244 */
1733 dispatch_flags |= I915_DISPATCH_SECURE; 2245 eb.batch_flags |= I915_DISPATCH_SECURE;
1734 params->args_batch_start_offset = 0; 2246 eb.batch_start_offset = 0;
1735 params->batch = vma; 2247 eb.batch = vma;
1736 } 2248 }
1737 } 2249 }
1738 2250
1739 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 2251 if (eb.batch_len == 0)
2252 eb.batch_len = eb.batch->size - eb.batch_start_offset;
1740 2253
1741 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 2254 /*
2255 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1742 * batch" bit. Hence we need to pin secure batches into the global gtt. 2256 * batch" bit. Hence we need to pin secure batches into the global gtt.
1743 * hsw should have this fixed, but bdw mucks it up again. */ 2257 * hsw should have this fixed, but bdw mucks it up again. */
1744 if (dispatch_flags & I915_DISPATCH_SECURE) { 2258 if (eb.batch_flags & I915_DISPATCH_SECURE) {
1745 struct drm_i915_gem_object *obj = params->batch->obj;
1746 struct i915_vma *vma; 2259 struct i915_vma *vma;
1747 2260
1748 /* 2261 /*
@@ -1755,66 +2268,56 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1755 * fitting due to fragmentation. 2268 * fitting due to fragmentation.
1756 * So this is actually safe. 2269 * So this is actually safe.
1757 */ 2270 */
1758 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); 2271 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
1759 if (IS_ERR(vma)) { 2272 if (IS_ERR(vma)) {
1760 ret = PTR_ERR(vma); 2273 err = PTR_ERR(vma);
1761 goto err; 2274 goto err_vma;
1762 } 2275 }
1763 2276
1764 params->batch = vma; 2277 eb.batch = vma;
1765 } 2278 }
1766 2279
2280 /* All GPU relocation batches must be submitted prior to the user rq */
2281 GEM_BUG_ON(eb.reloc_cache.rq);
2282
1767 /* Allocate a request for this batch buffer nice and early. */ 2283 /* Allocate a request for this batch buffer nice and early. */
1768 params->request = i915_gem_request_alloc(engine, ctx); 2284 eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
1769 if (IS_ERR(params->request)) { 2285 if (IS_ERR(eb.request)) {
1770 ret = PTR_ERR(params->request); 2286 err = PTR_ERR(eb.request);
1771 goto err_batch_unpin; 2287 goto err_batch_unpin;
1772 } 2288 }
1773 2289
1774 if (in_fence) { 2290 if (in_fence) {
1775 ret = i915_gem_request_await_dma_fence(params->request, 2291 err = i915_gem_request_await_dma_fence(eb.request, in_fence);
1776 in_fence); 2292 if (err < 0)
1777 if (ret < 0)
1778 goto err_request; 2293 goto err_request;
1779 } 2294 }
1780 2295
1781 if (out_fence_fd != -1) { 2296 if (out_fence_fd != -1) {
1782 out_fence = sync_file_create(&params->request->fence); 2297 out_fence = sync_file_create(&eb.request->fence);
1783 if (!out_fence) { 2298 if (!out_fence) {
1784 ret = -ENOMEM; 2299 err = -ENOMEM;
1785 goto err_request; 2300 goto err_request;
1786 } 2301 }
1787 } 2302 }
1788 2303
1789 /* Whilst this request exists, batch_obj will be on the 2304 /*
2305 * Whilst this request exists, batch_obj will be on the
1790 * active_list, and so will hold the active reference. Only when this 2306 * active_list, and so will hold the active reference. Only when this
1791 * request is retired will the the batch_obj be moved onto the 2307 * request is retired will the the batch_obj be moved onto the
1792 * inactive_list and lose its active reference. Hence we do not need 2308 * inactive_list and lose its active reference. Hence we do not need
1793 * to explicitly hold another reference here. 2309 * to explicitly hold another reference here.
1794 */ 2310 */
1795 params->request->batch = params->batch; 2311 eb.request->batch = eb.batch;
1796
1797 /*
1798 * Save assorted stuff away to pass through to *_submission().
1799 * NB: This data should be 'persistent' and not local as it will
1800 * kept around beyond the duration of the IOCTL once the GPU
1801 * scheduler arrives.
1802 */
1803 params->dev = dev;
1804 params->file = file;
1805 params->engine = engine;
1806 params->dispatch_flags = dispatch_flags;
1807 params->ctx = ctx;
1808
1809 trace_i915_gem_request_queue(params->request, dispatch_flags);
1810 2312
1811 ret = execbuf_submit(params, args, &eb->vmas); 2313 trace_i915_gem_request_queue(eb.request, eb.batch_flags);
2314 err = eb_submit(&eb);
1812err_request: 2315err_request:
1813 __i915_add_request(params->request, ret == 0); 2316 __i915_add_request(eb.request, err == 0);
1814 add_to_client(params->request, file); 2317 add_to_client(eb.request, file);
1815 2318
1816 if (out_fence) { 2319 if (out_fence) {
1817 if (ret == 0) { 2320 if (err == 0) {
1818 fd_install(out_fence_fd, out_fence->file); 2321 fd_install(out_fence_fd, out_fence->file);
1819 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ 2322 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
1820 args->rsvd2 |= (u64)out_fence_fd << 32; 2323 args->rsvd2 |= (u64)out_fence_fd << 32;
@@ -1825,30 +2328,22 @@ err_request:
1825 } 2328 }
1826 2329
1827err_batch_unpin: 2330err_batch_unpin:
1828 /* 2331 if (eb.batch_flags & I915_DISPATCH_SECURE)
1829 * FIXME: We crucially rely upon the active tracking for the (ppgtt) 2332 i915_vma_unpin(eb.batch);
1830 * batch vma for correctness. For less ugly and less fragility this 2333err_vma:
1831 * needs to be adjusted to also track the ggtt batch vma properly as 2334 if (eb.exec)
1832 * active. 2335 eb_release_vmas(&eb);
1833 */ 2336 i915_gem_context_put(eb.ctx);
1834 if (dispatch_flags & I915_DISPATCH_SECURE) 2337err_unlock:
1835 i915_vma_unpin(params->batch);
1836err:
1837 /* the request owns the ref now */
1838 i915_gem_context_put(ctx);
1839 eb_destroy(eb);
1840
1841 mutex_unlock(&dev->struct_mutex); 2338 mutex_unlock(&dev->struct_mutex);
1842 2339err_rpm:
1843pre_mutex_err: 2340 intel_runtime_pm_put(eb.i915);
1844 /* intel_gpu_busy should also get a ref, so it will free when the device 2341 eb_destroy(&eb);
1845 * is really idle. */
1846 intel_runtime_pm_put(dev_priv);
1847 if (out_fence_fd != -1) 2342 if (out_fence_fd != -1)
1848 put_unused_fd(out_fence_fd); 2343 put_unused_fd(out_fence_fd);
1849err_in_fence: 2344err_in_fence:
1850 dma_fence_put(in_fence); 2345 dma_fence_put(in_fence);
1851 return ret; 2346 return err;
1852} 2347}
1853 2348
1854/* 2349/*
@@ -1859,20 +2354,38 @@ int
1859i915_gem_execbuffer(struct drm_device *dev, void *data, 2354i915_gem_execbuffer(struct drm_device *dev, void *data,
1860 struct drm_file *file) 2355 struct drm_file *file)
1861{ 2356{
2357 const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
1862 struct drm_i915_gem_execbuffer *args = data; 2358 struct drm_i915_gem_execbuffer *args = data;
1863 struct drm_i915_gem_execbuffer2 exec2; 2359 struct drm_i915_gem_execbuffer2 exec2;
1864 struct drm_i915_gem_exec_object *exec_list = NULL; 2360 struct drm_i915_gem_exec_object *exec_list = NULL;
1865 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 2361 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1866 int ret, i; 2362 unsigned int i;
2363 int err;
1867 2364
1868 if (args->buffer_count < 1) { 2365 if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
1869 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); 2366 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1870 return -EINVAL; 2367 return -EINVAL;
1871 } 2368 }
1872 2369
2370 exec2.buffers_ptr = args->buffers_ptr;
2371 exec2.buffer_count = args->buffer_count;
2372 exec2.batch_start_offset = args->batch_start_offset;
2373 exec2.batch_len = args->batch_len;
2374 exec2.DR1 = args->DR1;
2375 exec2.DR4 = args->DR4;
2376 exec2.num_cliprects = args->num_cliprects;
2377 exec2.cliprects_ptr = args->cliprects_ptr;
2378 exec2.flags = I915_EXEC_RENDER;
2379 i915_execbuffer2_set_context_id(exec2, 0);
2380
2381 if (!i915_gem_check_execbuffer(&exec2))
2382 return -EINVAL;
2383
1873 /* Copy in the exec list from userland */ 2384 /* Copy in the exec list from userland */
1874 exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL); 2385 exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
1875 exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL); 2386 __GFP_NOWARN | GFP_TEMPORARY);
2387 exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
2388 __GFP_NOWARN | GFP_TEMPORARY);
1876 if (exec_list == NULL || exec2_list == NULL) { 2389 if (exec_list == NULL || exec2_list == NULL) {
1877 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2390 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1878 args->buffer_count); 2391 args->buffer_count);
@@ -1880,12 +2393,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1880 kvfree(exec2_list); 2393 kvfree(exec2_list);
1881 return -ENOMEM; 2394 return -ENOMEM;
1882 } 2395 }
1883 ret = copy_from_user(exec_list, 2396 err = copy_from_user(exec_list,
1884 u64_to_user_ptr(args->buffers_ptr), 2397 u64_to_user_ptr(args->buffers_ptr),
1885 sizeof(*exec_list) * args->buffer_count); 2398 sizeof(*exec_list) * args->buffer_count);
1886 if (ret != 0) { 2399 if (err) {
1887 DRM_DEBUG("copy %d exec entries failed %d\n", 2400 DRM_DEBUG("copy %d exec entries failed %d\n",
1888 args->buffer_count, ret); 2401 args->buffer_count, err);
1889 kvfree(exec_list); 2402 kvfree(exec_list);
1890 kvfree(exec2_list); 2403 kvfree(exec2_list);
1891 return -EFAULT; 2404 return -EFAULT;
@@ -1903,99 +2416,94 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1903 exec2_list[i].flags = 0; 2416 exec2_list[i].flags = 0;
1904 } 2417 }
1905 2418
1906 exec2.buffers_ptr = args->buffers_ptr; 2419 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
1907 exec2.buffer_count = args->buffer_count; 2420 if (exec2.flags & __EXEC_HAS_RELOC) {
1908 exec2.batch_start_offset = args->batch_start_offset;
1909 exec2.batch_len = args->batch_len;
1910 exec2.DR1 = args->DR1;
1911 exec2.DR4 = args->DR4;
1912 exec2.num_cliprects = args->num_cliprects;
1913 exec2.cliprects_ptr = args->cliprects_ptr;
1914 exec2.flags = I915_EXEC_RENDER;
1915 i915_execbuffer2_set_context_id(exec2, 0);
1916
1917 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1918 if (!ret) {
1919 struct drm_i915_gem_exec_object __user *user_exec_list = 2421 struct drm_i915_gem_exec_object __user *user_exec_list =
1920 u64_to_user_ptr(args->buffers_ptr); 2422 u64_to_user_ptr(args->buffers_ptr);
1921 2423
1922 /* Copy the new buffer offsets back to the user's exec list. */ 2424 /* Copy the new buffer offsets back to the user's exec list. */
1923 for (i = 0; i < args->buffer_count; i++) { 2425 for (i = 0; i < args->buffer_count; i++) {
2426 if (!(exec2_list[i].offset & UPDATE))
2427 continue;
2428
1924 exec2_list[i].offset = 2429 exec2_list[i].offset =
1925 gen8_canonical_addr(exec2_list[i].offset); 2430 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
1926 ret = __copy_to_user(&user_exec_list[i].offset, 2431 exec2_list[i].offset &= PIN_OFFSET_MASK;
1927 &exec2_list[i].offset, 2432 if (__copy_to_user(&user_exec_list[i].offset,
1928 sizeof(user_exec_list[i].offset)); 2433 &exec2_list[i].offset,
1929 if (ret) { 2434 sizeof(user_exec_list[i].offset)))
1930 ret = -EFAULT;
1931 DRM_DEBUG("failed to copy %d exec entries "
1932 "back to user (%d)\n",
1933 args->buffer_count, ret);
1934 break; 2435 break;
1935 }
1936 } 2436 }
1937 } 2437 }
1938 2438
1939 kvfree(exec_list); 2439 kvfree(exec_list);
1940 kvfree(exec2_list); 2440 kvfree(exec2_list);
1941 return ret; 2441 return err;
1942} 2442}
1943 2443
1944int 2444int
1945i915_gem_execbuffer2(struct drm_device *dev, void *data, 2445i915_gem_execbuffer2(struct drm_device *dev, void *data,
1946 struct drm_file *file) 2446 struct drm_file *file)
1947{ 2447{
2448 const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
1948 struct drm_i915_gem_execbuffer2 *args = data; 2449 struct drm_i915_gem_execbuffer2 *args = data;
1949 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 2450 struct drm_i915_gem_exec_object2 *exec2_list;
1950 int ret; 2451 int err;
1951 2452
1952 if (args->buffer_count < 1 || 2453 if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
1953 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1954 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); 2454 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1955 return -EINVAL; 2455 return -EINVAL;
1956 } 2456 }
1957 2457
1958 exec2_list = kvmalloc_array(args->buffer_count, 2458 if (!i915_gem_check_execbuffer(args))
1959 sizeof(*exec2_list), 2459 return -EINVAL;
1960 GFP_TEMPORARY); 2460
2461 /* Allocate an extra slot for use by the command parser */
2462 exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
2463 __GFP_NOWARN | GFP_TEMPORARY);
1961 if (exec2_list == NULL) { 2464 if (exec2_list == NULL) {
1962 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2465 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1963 args->buffer_count); 2466 args->buffer_count);
1964 return -ENOMEM; 2467 return -ENOMEM;
1965 } 2468 }
1966 ret = copy_from_user(exec2_list, 2469 if (copy_from_user(exec2_list,
1967 u64_to_user_ptr(args->buffers_ptr), 2470 u64_to_user_ptr(args->buffers_ptr),
1968 sizeof(*exec2_list) * args->buffer_count); 2471 sizeof(*exec2_list) * args->buffer_count)) {
1969 if (ret != 0) { 2472 DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
1970 DRM_DEBUG("copy %d exec entries failed %d\n",
1971 args->buffer_count, ret);
1972 kvfree(exec2_list); 2473 kvfree(exec2_list);
1973 return -EFAULT; 2474 return -EFAULT;
1974 } 2475 }
1975 2476
1976 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 2477 err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
1977 if (!ret) { 2478
1978 /* Copy the new buffer offsets back to the user's exec list. */ 2479 /*
2480 * Now that we have begun execution of the batchbuffer, we ignore
2481 * any new error after this point. Also given that we have already
2482 * updated the associated relocations, we try to write out the current
2483 * object locations irrespective of any error.
2484 */
2485 if (args->flags & __EXEC_HAS_RELOC) {
1979 struct drm_i915_gem_exec_object2 __user *user_exec_list = 2486 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1980 u64_to_user_ptr(args->buffers_ptr); 2487 u64_to_user_ptr(args->buffers_ptr);
1981 int i; 2488 unsigned int i;
1982 2489
2490 /* Copy the new buffer offsets back to the user's exec list. */
2491 user_access_begin();
1983 for (i = 0; i < args->buffer_count; i++) { 2492 for (i = 0; i < args->buffer_count; i++) {
2493 if (!(exec2_list[i].offset & UPDATE))
2494 continue;
2495
1984 exec2_list[i].offset = 2496 exec2_list[i].offset =
1985 gen8_canonical_addr(exec2_list[i].offset); 2497 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
1986 ret = __copy_to_user(&user_exec_list[i].offset, 2498 unsafe_put_user(exec2_list[i].offset,
1987 &exec2_list[i].offset, 2499 &user_exec_list[i].offset,
1988 sizeof(user_exec_list[i].offset)); 2500 end_user);
1989 if (ret) {
1990 ret = -EFAULT;
1991 DRM_DEBUG("failed to copy %d exec entries "
1992 "back to user\n",
1993 args->buffer_count);
1994 break;
1995 }
1996 } 2501 }
2502end_user:
2503 user_access_end();
1997 } 2504 }
1998 2505
2506 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
1999 kvfree(exec2_list); 2507 kvfree(exec2_list);
2000 return ret; 2508 return err;
2001} 2509}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1489c3af7145..61fc7e90a7da 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1884,7 +1884,7 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
1884 * called on driver load and after a GPU reset, so you can place 1884 * called on driver load and after a GPU reset, so you can place
1885 * workarounds here even if they get overwritten by GPU reset. 1885 * workarounds here even if they get overwritten by GPU reset.
1886 */ 1886 */
1887 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */ 1887 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
1888 if (IS_BROADWELL(dev_priv)) 1888 if (IS_BROADWELL(dev_priv))
1889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); 1889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
1890 else if (IS_CHERRYVIEW(dev_priv)) 1890 else if (IS_CHERRYVIEW(dev_priv))
@@ -3095,13 +3095,17 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3095 3095
3096void i915_ggtt_enable_guc(struct drm_i915_private *i915) 3096void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3097{ 3097{
3098 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3099
3098 i915->ggtt.invalidate = guc_ggtt_invalidate; 3100 i915->ggtt.invalidate = guc_ggtt_invalidate;
3099} 3101}
3100 3102
3101void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3103void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3102{ 3104{
3103 if (i915->ggtt.invalidate == guc_ggtt_invalidate) 3105 /* We should only be called after i915_ggtt_enable_guc() */
3104 i915->ggtt.invalidate = gen6_ggtt_invalidate; 3106 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3107
3108 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3105} 3109}
3106 3110
3107void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) 3111void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -3398,6 +3402,9 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
3398 if (err != -ENOSPC) 3402 if (err != -ENOSPC)
3399 return err; 3403 return err;
3400 3404
3405 if (flags & PIN_NOEVICT)
3406 return -ENOSPC;
3407
3401 err = i915_gem_evict_for_node(vm, node, flags); 3408 err = i915_gem_evict_for_node(vm, node, flags);
3402 if (err == 0) 3409 if (err == 0)
3403 err = drm_mm_reserve_node(&vm->mm, node); 3410 err = drm_mm_reserve_node(&vm->mm, node);
@@ -3512,6 +3519,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
3512 if (err != -ENOSPC) 3519 if (err != -ENOSPC)
3513 return err; 3520 return err;
3514 3521
3522 if (flags & PIN_NOEVICT)
3523 return -ENOSPC;
3524
3515 /* No free space, pick a slot at random. 3525 /* No free space, pick a slot at random.
3516 * 3526 *
3517 * There is a pathological case here using a GTT shared between 3527 * There is a pathological case here using a GTT shared between
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index fb15684c1d83..1b2a56c3e5d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -255,6 +255,7 @@ struct i915_address_space {
255 struct drm_i915_file_private *file; 255 struct drm_i915_file_private *file;
256 struct list_head global_link; 256 struct list_head global_link;
257 u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 257 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
258 u64 reserved; /* size addr space reserved */
258 259
259 bool closed; 260 bool closed;
260 261
@@ -588,6 +589,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
588#define PIN_MAPPABLE BIT(1) 589#define PIN_MAPPABLE BIT(1)
589#define PIN_ZONE_4G BIT(2) 590#define PIN_ZONE_4G BIT(2)
590#define PIN_NONFAULT BIT(3) 591#define PIN_NONFAULT BIT(3)
592#define PIN_NOEVICT BIT(4)
591 593
592#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ 594#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
593#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ 595#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index fc950abbe400..568bf83af1f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -188,9 +188,11 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
188 drm_gem_private_object_init(&i915->drm, &obj->base, size); 188 drm_gem_private_object_init(&i915->drm, &obj->base, size);
189 i915_gem_object_init(obj, &i915_gem_object_internal_ops); 189 i915_gem_object_init(obj, &i915_gem_object_internal_ops);
190 190
191 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
192 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 191 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
192 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
193 obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 193 obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
194 obj->cache_coherent = i915_gem_object_is_coherent(obj);
195 obj->cache_dirty = !obj->cache_coherent;
194 196
195 return obj; 197 return obj;
196} 198}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 35e1a27729dc..5b19a4916a4d 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -68,9 +68,25 @@ struct drm_i915_gem_object {
68 68
69 const struct drm_i915_gem_object_ops *ops; 69 const struct drm_i915_gem_object_ops *ops;
70 70
71 /** List of VMAs backed by this object */ 71 /**
72 * @vma_list: List of VMAs backed by this object
73 *
74 * The VMA on this list are ordered by type, all GGTT vma are placed
75 * at the head and all ppGTT vma are placed at the tail. The different
76 * types of GGTT vma are unordered between themselves, use the
77 * @vma_tree (which has a defined order between all VMA) to find an
78 * exact match.
79 */
72 struct list_head vma_list; 80 struct list_head vma_list;
81 /**
82 * @vma_tree: Ordered tree of VMAs backed by this object
83 *
84 * All VMA created for this object are placed in the @vma_tree for
85 * fast retrieval via a binary search in i915_vma_instance().
86 * They are also added to @vma_list for easy iteration.
87 */
73 struct rb_root vma_tree; 88 struct rb_root vma_tree;
89 struct i915_vma *vma_hashed;
74 90
75 /** Stolen memory for this object, instead of being backed by shmem. */ 91 /** Stolen memory for this object, instead of being backed by shmem. */
76 struct drm_mm_node *stolen; 92 struct drm_mm_node *stolen;
@@ -85,9 +101,6 @@ struct drm_i915_gem_object {
85 */ 101 */
86 struct list_head userfault_link; 102 struct list_head userfault_link;
87 103
88 /** Used in execbuf to temporarily hold a ref */
89 struct list_head obj_exec_link;
90
91 struct list_head batch_pool_link; 104 struct list_head batch_pool_link;
92 I915_SELFTEST_DECLARE(struct list_head st_link); 105 I915_SELFTEST_DECLARE(struct list_head st_link);
93 106
@@ -106,6 +119,7 @@ struct drm_i915_gem_object {
106 unsigned long gt_ro:1; 119 unsigned long gt_ro:1;
107 unsigned int cache_level:3; 120 unsigned int cache_level:3;
108 unsigned int cache_dirty:1; 121 unsigned int cache_dirty:1;
122 unsigned int cache_coherent:1;
109 123
110 atomic_t frontbuffer_bits; 124 atomic_t frontbuffer_bits;
111 unsigned int frontbuffer_ggtt_origin; /* write once */ 125 unsigned int frontbuffer_ggtt_origin; /* write once */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0d1e0d8873ef..8c59c79cbd8b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -62,7 +62,7 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
62 return false; 62 return false;
63 63
64 intel_engine_enable_signaling(to_request(fence), true); 64 intel_engine_enable_signaling(to_request(fence), true);
65 return true; 65 return !i915_fence_signaled(fence);
66} 66}
67 67
68static signed long i915_fence_wait(struct dma_fence *fence, 68static signed long i915_fence_wait(struct dma_fence *fence,
@@ -683,7 +683,6 @@ static int
683i915_gem_request_await_request(struct drm_i915_gem_request *to, 683i915_gem_request_await_request(struct drm_i915_gem_request *to,
684 struct drm_i915_gem_request *from) 684 struct drm_i915_gem_request *from)
685{ 685{
686 u32 seqno;
687 int ret; 686 int ret;
688 687
689 GEM_BUG_ON(to == from); 688 GEM_BUG_ON(to == from);
@@ -707,18 +706,14 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
707 return ret < 0 ? ret : 0; 706 return ret < 0 ? ret : 0;
708 } 707 }
709 708
710 seqno = i915_gem_request_global_seqno(from); 709 if (to->engine->semaphore.sync_to) {
711 if (!seqno) 710 u32 seqno;
712 goto await_dma_fence;
713 711
714 if (!to->engine->semaphore.sync_to) { 712 GEM_BUG_ON(!from->engine->semaphore.signal);
715 if (!__i915_gem_request_started(from, seqno))
716 goto await_dma_fence;
717 713
718 if (!__i915_spin_request(from, seqno, TASK_INTERRUPTIBLE, 2)) 714 seqno = i915_gem_request_global_seqno(from);
715 if (!seqno)
719 goto await_dma_fence; 716 goto await_dma_fence;
720 } else {
721 GEM_BUG_ON(!from->engine->semaphore.signal);
722 717
723 if (seqno <= to->timeline->global_sync[from->engine->id]) 718 if (seqno <= to->timeline->global_sync[from->engine->id])
724 return 0; 719 return 0;
@@ -729,10 +724,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
729 return ret; 724 return ret;
730 725
731 to->timeline->global_sync[from->engine->id] = seqno; 726 to->timeline->global_sync[from->engine->id] = seqno;
727 return 0;
732 } 728 }
733 729
734 return 0;
735
736await_dma_fence: 730await_dma_fence:
737 ret = i915_sw_fence_await_dma_fence(&to->submit, 731 ret = i915_sw_fence_await_dma_fence(&to->submit,
738 &from->fence, 0, 732 &from->fence, 0,
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 0fd2b58ce475..1032f98add11 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -38,16 +38,21 @@
38static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) 38static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
39{ 39{
40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) { 40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
41 case MUTEX_TRYLOCK_FAILED:
42 return false;
43
44 case MUTEX_TRYLOCK_SUCCESS:
45 *unlock = true;
46 return true;
47
48 case MUTEX_TRYLOCK_RECURSIVE: 41 case MUTEX_TRYLOCK_RECURSIVE:
49 *unlock = false; 42 *unlock = false;
50 return true; 43 return true;
44
45 case MUTEX_TRYLOCK_FAILED:
46 do {
47 cpu_relax();
48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
49 case MUTEX_TRYLOCK_SUCCESS:
50 *unlock = true;
51 return true;
52 }
53 } while (!need_resched());
54
55 return false;
51 } 56 }
52 57
53 BUG(); 58 BUG();
@@ -332,6 +337,15 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
332 sc->nr_to_scan - freed, 337 sc->nr_to_scan - freed,
333 I915_SHRINK_BOUND | 338 I915_SHRINK_BOUND |
334 I915_SHRINK_UNBOUND); 339 I915_SHRINK_UNBOUND);
340 if (freed < sc->nr_to_scan && current_is_kswapd()) {
341 intel_runtime_pm_get(dev_priv);
342 freed += i915_gem_shrink(dev_priv,
343 sc->nr_to_scan - freed,
344 I915_SHRINK_ACTIVE |
345 I915_SHRINK_BOUND |
346 I915_SHRINK_UNBOUND);
347 intel_runtime_pm_put(dev_priv);
348 }
335 349
336 shrinker_unlock(dev_priv, unlock); 350 shrinker_unlock(dev_priv, unlock);
337 351
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 681db6083f4d..a817b3e0b17e 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -590,6 +590,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
590 obj->stolen = stolen; 590 obj->stolen = stolen;
591 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 591 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
592 obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; 592 obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
593 obj->cache_coherent = true; /* assumptions! more like cache_oblivious */
593 594
594 if (i915_gem_object_pin_pages(obj)) 595 if (i915_gem_object_pin_pages(obj))
595 goto cleanup; 596 goto cleanup;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1a0ce1dc68f5..ccd09e8419f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref)
378 mutex_unlock(&mm->i915->mm_lock); 378 mutex_unlock(&mm->i915->mm_lock);
379 379
380 INIT_WORK(&mm->work, __i915_mm_struct_free__worker); 380 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
381 schedule_work(&mm->work); 381 queue_work(mm->i915->mm.userptr_wq, &mm->work);
382} 382}
383 383
384static void 384static void
@@ -598,7 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
598 get_task_struct(work->task); 598 get_task_struct(work->task);
599 599
600 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); 600 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
601 schedule_work(&work->work); 601 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
602 602
603 return ERR_PTR(-EAGAIN); 603 return ERR_PTR(-EAGAIN);
604} 604}
@@ -802,9 +802,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
802 802
803 drm_gem_private_object_init(dev, &obj->base, args->user_size); 803 drm_gem_private_object_init(dev, &obj->base, args->user_size);
804 i915_gem_object_init(obj, &i915_gem_userptr_ops); 804 i915_gem_object_init(obj, &i915_gem_userptr_ops);
805 obj->cache_level = I915_CACHE_LLC;
806 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
807 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 805 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
806 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
807 obj->cache_level = I915_CACHE_LLC;
808 obj->cache_coherent = i915_gem_object_is_coherent(obj);
809 obj->cache_dirty = !obj->cache_coherent;
808 810
809 obj->userptr.ptr = args->user_ptr; 811 obj->userptr.ptr = args->user_ptr;
810 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); 812 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
@@ -828,8 +830,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
828 return 0; 830 return 0;
829} 831}
830 832
831void i915_gem_init_userptr(struct drm_i915_private *dev_priv) 833int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
832{ 834{
833 mutex_init(&dev_priv->mm_lock); 835 mutex_init(&dev_priv->mm_lock);
834 hash_init(dev_priv->mm_structs); 836 hash_init(dev_priv->mm_structs);
837
838 dev_priv->mm.userptr_wq =
839 alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
840 if (!dev_priv->mm.userptr_wq)
841 return -ENOMEM;
842
843 return 0;
844}
845
846void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
847{
848 destroy_workqueue(dev_priv->mm.userptr_wq);
835} 849}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index e6e0c6ef1084..48a1e9349a2c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -105,7 +105,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
105 end += offset; 105 end += offset;
106 } 106 }
107 107
108 id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end); 108 id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
109 if (id == end) 109 if (id == end)
110 return -ENOSPC; 110 return -ENOSPC;
111 111
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7b7f55a28eec..4cd9ee1ba332 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2548,7 +2548,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2548 I915_WRITE(SDEIIR, iir); 2548 I915_WRITE(SDEIIR, iir);
2549 ret = IRQ_HANDLED; 2549 ret = IRQ_HANDLED;
2550 2550
2551 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2551 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
2552 HAS_PCH_CNP(dev_priv))
2552 spt_irq_handler(dev_priv, iir); 2553 spt_irq_handler(dev_priv, iir);
2553 else 2554 else
2554 cpt_irq_handler(dev_priv, iir); 2555 cpt_irq_handler(dev_priv, iir);
@@ -4289,7 +4290,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4289 dev->driver->disable_vblank = gen8_disable_vblank; 4290 dev->driver->disable_vblank = gen8_disable_vblank;
4290 if (IS_GEN9_LP(dev_priv)) 4291 if (IS_GEN9_LP(dev_priv))
4291 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4292 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4292 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 4293 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4294 HAS_PCH_CNP(dev_priv))
4293 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4295 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4294 else 4296 else
4295 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4297 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
new file mode 100644
index 000000000000..d4462c2aaaee
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -0,0 +1,5376 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_bdw.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_DATA_PORT_READS_COALESCING,
43 METRIC_SET_ID_DATA_PORT_WRITES_COALESCING,
44 METRIC_SET_ID_HDC_AND_SF,
45 METRIC_SET_ID_L3_1,
46 METRIC_SET_ID_L3_2,
47 METRIC_SET_ID_L3_3,
48 METRIC_SET_ID_L3_4,
49 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
50 METRIC_SET_ID_SAMPLER_1,
51 METRIC_SET_ID_SAMPLER_2,
52 METRIC_SET_ID_TDL_1,
53 METRIC_SET_ID_TDL_2,
54 METRIC_SET_ID_COMPUTE_EXTRA,
55 METRIC_SET_ID_VME_PIPE,
56 METRIC_SET_ID_TEST_OA,
57};
58
59int i915_oa_n_builtin_metric_sets_bdw = 22;
60
61static const struct i915_oa_reg b_counter_config_render_basic[] = {
62 { _MMIO(0x2710), 0x00000000 },
63 { _MMIO(0x2714), 0x00800000 },
64 { _MMIO(0x2720), 0x00000000 },
65 { _MMIO(0x2724), 0x00800000 },
66 { _MMIO(0x2740), 0x00000000 },
67};
68
69static const struct i915_oa_reg flex_eu_config_render_basic[] = {
70 { _MMIO(0xe458), 0x00005004 },
71 { _MMIO(0xe558), 0x00010003 },
72 { _MMIO(0xe658), 0x00012011 },
73 { _MMIO(0xe758), 0x00015014 },
74 { _MMIO(0xe45c), 0x00051050 },
75 { _MMIO(0xe55c), 0x00053052 },
76 { _MMIO(0xe65c), 0x00055054 },
77};
78
79static const struct i915_oa_reg mux_config_render_basic_0_slices_0x01[] = {
80 { _MMIO(0x9888), 0x143f000f },
81 { _MMIO(0x9888), 0x14110014 },
82 { _MMIO(0x9888), 0x14310014 },
83 { _MMIO(0x9888), 0x14bf000f },
84 { _MMIO(0x9888), 0x118a0317 },
85 { _MMIO(0x9888), 0x13837be0 },
86 { _MMIO(0x9888), 0x3b800060 },
87 { _MMIO(0x9888), 0x3d800005 },
88 { _MMIO(0x9888), 0x005c4000 },
89 { _MMIO(0x9888), 0x065c8000 },
90 { _MMIO(0x9888), 0x085cc000 },
91 { _MMIO(0x9888), 0x003d8000 },
92 { _MMIO(0x9888), 0x183d0800 },
93 { _MMIO(0x9888), 0x0a3f0023 },
94 { _MMIO(0x9888), 0x103f0000 },
95 { _MMIO(0x9888), 0x00584000 },
96 { _MMIO(0x9888), 0x08584000 },
97 { _MMIO(0x9888), 0x0a5a4000 },
98 { _MMIO(0x9888), 0x005b4000 },
99 { _MMIO(0x9888), 0x0e5b8000 },
100 { _MMIO(0x9888), 0x185b2400 },
101 { _MMIO(0x9888), 0x0a1d4000 },
102 { _MMIO(0x9888), 0x0c1f0800 },
103 { _MMIO(0x9888), 0x0e1faa00 },
104 { _MMIO(0x9888), 0x00384000 },
105 { _MMIO(0x9888), 0x0e384000 },
106 { _MMIO(0x9888), 0x16384000 },
107 { _MMIO(0x9888), 0x18380001 },
108 { _MMIO(0x9888), 0x00392000 },
109 { _MMIO(0x9888), 0x06398000 },
110 { _MMIO(0x9888), 0x0839a000 },
111 { _MMIO(0x9888), 0x0a391000 },
112 { _MMIO(0x9888), 0x00104000 },
113 { _MMIO(0x9888), 0x08104000 },
114 { _MMIO(0x9888), 0x00110030 },
115 { _MMIO(0x9888), 0x08110031 },
116 { _MMIO(0x9888), 0x10110000 },
117 { _MMIO(0x9888), 0x00134000 },
118 { _MMIO(0x9888), 0x16130020 },
119 { _MMIO(0x9888), 0x06308000 },
120 { _MMIO(0x9888), 0x08308000 },
121 { _MMIO(0x9888), 0x06311800 },
122 { _MMIO(0x9888), 0x08311880 },
123 { _MMIO(0x9888), 0x10310000 },
124 { _MMIO(0x9888), 0x0e334000 },
125 { _MMIO(0x9888), 0x16330080 },
126 { _MMIO(0x9888), 0x0abf1180 },
127 { _MMIO(0x9888), 0x10bf0000 },
128 { _MMIO(0x9888), 0x0ada8000 },
129 { _MMIO(0x9888), 0x0a9d8000 },
130 { _MMIO(0x9888), 0x109f0002 },
131 { _MMIO(0x9888), 0x0ab94000 },
132 { _MMIO(0x9888), 0x0d888000 },
133 { _MMIO(0x9888), 0x038a0380 },
134 { _MMIO(0x9888), 0x058a000e },
135 { _MMIO(0x9888), 0x018a8000 },
136 { _MMIO(0x9888), 0x0f8a8000 },
137 { _MMIO(0x9888), 0x198a8000 },
138 { _MMIO(0x9888), 0x1b8a00a0 },
139 { _MMIO(0x9888), 0x078a0000 },
140 { _MMIO(0x9888), 0x098a0000 },
141 { _MMIO(0x9888), 0x238b2820 },
142 { _MMIO(0x9888), 0x258b2550 },
143 { _MMIO(0x9888), 0x198c1000 },
144 { _MMIO(0x9888), 0x0b8d8000 },
145 { _MMIO(0x9888), 0x1f85aa80 },
146 { _MMIO(0x9888), 0x2185aaa0 },
147 { _MMIO(0x9888), 0x2385002a },
148 { _MMIO(0x9888), 0x0d831021 },
149 { _MMIO(0x9888), 0x0f83572f },
150 { _MMIO(0x9888), 0x01835680 },
151 { _MMIO(0x9888), 0x0383002c },
152 { _MMIO(0x9888), 0x11830000 },
153 { _MMIO(0x9888), 0x19835400 },
154 { _MMIO(0x9888), 0x1b830001 },
155 { _MMIO(0x9888), 0x05830000 },
156 { _MMIO(0x9888), 0x07834000 },
157 { _MMIO(0x9888), 0x09834000 },
158 { _MMIO(0x9888), 0x0184c000 },
159 { _MMIO(0x9888), 0x07848000 },
160 { _MMIO(0x9888), 0x0984c000 },
161 { _MMIO(0x9888), 0x0b84c000 },
162 { _MMIO(0x9888), 0x0d84c000 },
163 { _MMIO(0x9888), 0x0f84c000 },
164 { _MMIO(0x9888), 0x0384c000 },
165 { _MMIO(0x9888), 0x05844000 },
166 { _MMIO(0x9888), 0x1b80c137 },
167 { _MMIO(0x9888), 0x1d80c147 },
168 { _MMIO(0x9888), 0x21800000 },
169 { _MMIO(0x9888), 0x1180c000 },
170 { _MMIO(0x9888), 0x17808000 },
171 { _MMIO(0x9888), 0x1980c000 },
172 { _MMIO(0x9888), 0x1f80c000 },
173 { _MMIO(0x9888), 0x1380c000 },
174 { _MMIO(0x9888), 0x15804000 },
175 { _MMIO(0x9888), 0x4d801110 },
176 { _MMIO(0x9888), 0x4f800331 },
177 { _MMIO(0x9888), 0x43800802 },
178 { _MMIO(0x9888), 0x51800000 },
179 { _MMIO(0x9888), 0x45801465 },
180 { _MMIO(0x9888), 0x53801111 },
181 { _MMIO(0x9888), 0x478014a5 },
182 { _MMIO(0x9888), 0x31800000 },
183 { _MMIO(0x9888), 0x3f800ca5 },
184 { _MMIO(0x9888), 0x41800003 },
185};
186
187static const struct i915_oa_reg mux_config_render_basic_1_slices_0x02[] = {
188 { _MMIO(0x9888), 0x143f000f },
189 { _MMIO(0x9888), 0x14bf000f },
190 { _MMIO(0x9888), 0x14910014 },
191 { _MMIO(0x9888), 0x14b10014 },
192 { _MMIO(0x9888), 0x118a0317 },
193 { _MMIO(0x9888), 0x13837be0 },
194 { _MMIO(0x9888), 0x3b800060 },
195 { _MMIO(0x9888), 0x3d800005 },
196 { _MMIO(0x9888), 0x0a3f0023 },
197 { _MMIO(0x9888), 0x103f0000 },
198 { _MMIO(0x9888), 0x0a5a4000 },
199 { _MMIO(0x9888), 0x0a1d4000 },
200 { _MMIO(0x9888), 0x0e1f8000 },
201 { _MMIO(0x9888), 0x0a391000 },
202 { _MMIO(0x9888), 0x00dc4000 },
203 { _MMIO(0x9888), 0x06dc8000 },
204 { _MMIO(0x9888), 0x08dcc000 },
205 { _MMIO(0x9888), 0x00bd8000 },
206 { _MMIO(0x9888), 0x18bd0800 },
207 { _MMIO(0x9888), 0x0abf1180 },
208 { _MMIO(0x9888), 0x10bf0000 },
209 { _MMIO(0x9888), 0x00d84000 },
210 { _MMIO(0x9888), 0x08d84000 },
211 { _MMIO(0x9888), 0x0ada8000 },
212 { _MMIO(0x9888), 0x00db4000 },
213 { _MMIO(0x9888), 0x0edb8000 },
214 { _MMIO(0x9888), 0x18db2400 },
215 { _MMIO(0x9888), 0x0a9d8000 },
216 { _MMIO(0x9888), 0x0c9f0800 },
217 { _MMIO(0x9888), 0x0e9f2a00 },
218 { _MMIO(0x9888), 0x109f0002 },
219 { _MMIO(0x9888), 0x00b84000 },
220 { _MMIO(0x9888), 0x0eb84000 },
221 { _MMIO(0x9888), 0x16b84000 },
222 { _MMIO(0x9888), 0x18b80001 },
223 { _MMIO(0x9888), 0x00b92000 },
224 { _MMIO(0x9888), 0x06b98000 },
225 { _MMIO(0x9888), 0x08b9a000 },
226 { _MMIO(0x9888), 0x0ab94000 },
227 { _MMIO(0x9888), 0x00904000 },
228 { _MMIO(0x9888), 0x08904000 },
229 { _MMIO(0x9888), 0x00910030 },
230 { _MMIO(0x9888), 0x08910031 },
231 { _MMIO(0x9888), 0x10910000 },
232 { _MMIO(0x9888), 0x00934000 },
233 { _MMIO(0x9888), 0x16930020 },
234 { _MMIO(0x9888), 0x06b08000 },
235 { _MMIO(0x9888), 0x08b08000 },
236 { _MMIO(0x9888), 0x06b11800 },
237 { _MMIO(0x9888), 0x08b11880 },
238 { _MMIO(0x9888), 0x10b10000 },
239 { _MMIO(0x9888), 0x0eb34000 },
240 { _MMIO(0x9888), 0x16b30080 },
241 { _MMIO(0x9888), 0x01888000 },
242 { _MMIO(0x9888), 0x0d88b800 },
243 { _MMIO(0x9888), 0x038a0380 },
244 { _MMIO(0x9888), 0x058a000e },
245 { _MMIO(0x9888), 0x1b8a0080 },
246 { _MMIO(0x9888), 0x078a0000 },
247 { _MMIO(0x9888), 0x098a0000 },
248 { _MMIO(0x9888), 0x238b2840 },
249 { _MMIO(0x9888), 0x258b26a0 },
250 { _MMIO(0x9888), 0x018c4000 },
251 { _MMIO(0x9888), 0x0f8c4000 },
252 { _MMIO(0x9888), 0x178c2000 },
253 { _MMIO(0x9888), 0x198c1100 },
254 { _MMIO(0x9888), 0x018d2000 },
255 { _MMIO(0x9888), 0x078d8000 },
256 { _MMIO(0x9888), 0x098da000 },
257 { _MMIO(0x9888), 0x0b8d8000 },
258 { _MMIO(0x9888), 0x1f85aa80 },
259 { _MMIO(0x9888), 0x2185aaa0 },
260 { _MMIO(0x9888), 0x2385002a },
261 { _MMIO(0x9888), 0x0d831021 },
262 { _MMIO(0x9888), 0x0f83572f },
263 { _MMIO(0x9888), 0x01835680 },
264 { _MMIO(0x9888), 0x0383002c },
265 { _MMIO(0x9888), 0x11830000 },
266 { _MMIO(0x9888), 0x19835400 },
267 { _MMIO(0x9888), 0x1b830001 },
268 { _MMIO(0x9888), 0x05830000 },
269 { _MMIO(0x9888), 0x07834000 },
270 { _MMIO(0x9888), 0x09834000 },
271 { _MMIO(0x9888), 0x0184c000 },
272 { _MMIO(0x9888), 0x07848000 },
273 { _MMIO(0x9888), 0x0984c000 },
274 { _MMIO(0x9888), 0x0b84c000 },
275 { _MMIO(0x9888), 0x0d84c000 },
276 { _MMIO(0x9888), 0x0f84c000 },
277 { _MMIO(0x9888), 0x0384c000 },
278 { _MMIO(0x9888), 0x05844000 },
279 { _MMIO(0x9888), 0x1b80c137 },
280 { _MMIO(0x9888), 0x1d80c147 },
281 { _MMIO(0x9888), 0x21800000 },
282 { _MMIO(0x9888), 0x1180c000 },
283 { _MMIO(0x9888), 0x17808000 },
284 { _MMIO(0x9888), 0x1980c000 },
285 { _MMIO(0x9888), 0x1f80c000 },
286 { _MMIO(0x9888), 0x1380c000 },
287 { _MMIO(0x9888), 0x15804000 },
288 { _MMIO(0x9888), 0x4d801550 },
289 { _MMIO(0x9888), 0x4f800331 },
290 { _MMIO(0x9888), 0x43800802 },
291 { _MMIO(0x9888), 0x51800400 },
292 { _MMIO(0x9888), 0x458004a1 },
293 { _MMIO(0x9888), 0x53805555 },
294 { _MMIO(0x9888), 0x47800421 },
295 { _MMIO(0x9888), 0x31800000 },
296 { _MMIO(0x9888), 0x3f801421 },
297 { _MMIO(0x9888), 0x41800845 },
298};
299
300static int
301get_render_basic_mux_config(struct drm_i915_private *dev_priv,
302 const struct i915_oa_reg **regs,
303 int *lens)
304{
305 int n = 0;
306
307 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
308 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
309
310 if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) {
311 regs[n] = mux_config_render_basic_0_slices_0x01;
312 lens[n] = ARRAY_SIZE(mux_config_render_basic_0_slices_0x01);
313 n++;
314 }
315 if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x02) {
316 regs[n] = mux_config_render_basic_1_slices_0x02;
317 lens[n] = ARRAY_SIZE(mux_config_render_basic_1_slices_0x02);
318 n++;
319 }
320
321 return n;
322}
323
324static const struct i915_oa_reg b_counter_config_compute_basic[] = {
325 { _MMIO(0x2710), 0x00000000 },
326 { _MMIO(0x2714), 0x00800000 },
327 { _MMIO(0x2720), 0x00000000 },
328 { _MMIO(0x2724), 0x00800000 },
329 { _MMIO(0x2740), 0x00000000 },
330};
331
332static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
333 { _MMIO(0xe458), 0x00005004 },
334 { _MMIO(0xe558), 0x00000003 },
335 { _MMIO(0xe658), 0x00002001 },
336 { _MMIO(0xe758), 0x00778008 },
337 { _MMIO(0xe45c), 0x00088078 },
338 { _MMIO(0xe55c), 0x00808708 },
339 { _MMIO(0xe65c), 0x00a08908 },
340};
341
342static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01[] = {
343 { _MMIO(0x9888), 0x105c00e0 },
344 { _MMIO(0x9888), 0x105800e0 },
345 { _MMIO(0x9888), 0x103800e0 },
346 { _MMIO(0x9888), 0x3580001a },
347 { _MMIO(0x9888), 0x3b800060 },
348 { _MMIO(0x9888), 0x3d800005 },
349 { _MMIO(0x9888), 0x065c2100 },
350 { _MMIO(0x9888), 0x0a5c0041 },
351 { _MMIO(0x9888), 0x0c5c6600 },
352 { _MMIO(0x9888), 0x005c6580 },
353 { _MMIO(0x9888), 0x085c8000 },
354 { _MMIO(0x9888), 0x0e5c8000 },
355 { _MMIO(0x9888), 0x00580042 },
356 { _MMIO(0x9888), 0x08582080 },
357 { _MMIO(0x9888), 0x0c58004c },
358 { _MMIO(0x9888), 0x0e582580 },
359 { _MMIO(0x9888), 0x005b4000 },
360 { _MMIO(0x9888), 0x185b1000 },
361 { _MMIO(0x9888), 0x1a5b0104 },
362 { _MMIO(0x9888), 0x0c1fa800 },
363 { _MMIO(0x9888), 0x0e1faa00 },
364 { _MMIO(0x9888), 0x101f02aa },
365 { _MMIO(0x9888), 0x08380042 },
366 { _MMIO(0x9888), 0x0a382080 },
367 { _MMIO(0x9888), 0x0e38404c },
368 { _MMIO(0x9888), 0x0238404b },
369 { _MMIO(0x9888), 0x00384000 },
370 { _MMIO(0x9888), 0x16380000 },
371 { _MMIO(0x9888), 0x18381145 },
372 { _MMIO(0x9888), 0x04380000 },
373 { _MMIO(0x9888), 0x0039a000 },
374 { _MMIO(0x9888), 0x06398000 },
375 { _MMIO(0x9888), 0x0839a000 },
376 { _MMIO(0x9888), 0x0a39a000 },
377 { _MMIO(0x9888), 0x0c39a000 },
378 { _MMIO(0x9888), 0x0e39a000 },
379 { _MMIO(0x9888), 0x02392000 },
380 { _MMIO(0x9888), 0x018a8000 },
381 { _MMIO(0x9888), 0x0f8a8000 },
382 { _MMIO(0x9888), 0x198a8000 },
383 { _MMIO(0x9888), 0x1b8aaaa0 },
384 { _MMIO(0x9888), 0x1d8a0002 },
385 { _MMIO(0x9888), 0x038a8000 },
386 { _MMIO(0x9888), 0x058a8000 },
387 { _MMIO(0x9888), 0x238b02a0 },
388 { _MMIO(0x9888), 0x258b5550 },
389 { _MMIO(0x9888), 0x278b0015 },
390 { _MMIO(0x9888), 0x1f850a80 },
391 { _MMIO(0x9888), 0x2185aaa0 },
392 { _MMIO(0x9888), 0x2385002a },
393 { _MMIO(0x9888), 0x01834000 },
394 { _MMIO(0x9888), 0x0f834000 },
395 { _MMIO(0x9888), 0x19835400 },
396 { _MMIO(0x9888), 0x1b830155 },
397 { _MMIO(0x9888), 0x03834000 },
398 { _MMIO(0x9888), 0x05834000 },
399 { _MMIO(0x9888), 0x0184c000 },
400 { _MMIO(0x9888), 0x07848000 },
401 { _MMIO(0x9888), 0x0984c000 },
402 { _MMIO(0x9888), 0x0b84c000 },
403 { _MMIO(0x9888), 0x0d84c000 },
404 { _MMIO(0x9888), 0x0f84c000 },
405 { _MMIO(0x9888), 0x03844000 },
406 { _MMIO(0x9888), 0x17808137 },
407 { _MMIO(0x9888), 0x1980c147 },
408 { _MMIO(0x9888), 0x1b80c0e5 },
409 { _MMIO(0x9888), 0x1d80c0e3 },
410 { _MMIO(0x9888), 0x21800000 },
411 { _MMIO(0x9888), 0x1180c000 },
412 { _MMIO(0x9888), 0x1f80c000 },
413 { _MMIO(0x9888), 0x13804000 },
414 { _MMIO(0x9888), 0x15800000 },
415 { _MMIO(0xd24), 0x00000000 },
416 { _MMIO(0x9888), 0x4d801000 },
417 { _MMIO(0x9888), 0x4f800111 },
418 { _MMIO(0x9888), 0x43800062 },
419 { _MMIO(0x9888), 0x51800000 },
420 { _MMIO(0x9888), 0x45800062 },
421 { _MMIO(0x9888), 0x53800000 },
422 { _MMIO(0x9888), 0x47800062 },
423 { _MMIO(0x9888), 0x31800000 },
424 { _MMIO(0x9888), 0x3f801062 },
425 { _MMIO(0x9888), 0x41801084 },
426};
427
428static const struct i915_oa_reg mux_config_compute_basic_2_slices_0x02[] = {
429 { _MMIO(0x9888), 0x10dc00e0 },
430 { _MMIO(0x9888), 0x10d800e0 },
431 { _MMIO(0x9888), 0x10b800e0 },
432 { _MMIO(0x9888), 0x3580001a },
433 { _MMIO(0x9888), 0x3b800060 },
434 { _MMIO(0x9888), 0x3d800005 },
435 { _MMIO(0x9888), 0x06dc2100 },
436 { _MMIO(0x9888), 0x0adc0041 },
437 { _MMIO(0x9888), 0x0cdc6600 },
438 { _MMIO(0x9888), 0x00dc6580 },
439 { _MMIO(0x9888), 0x08dc8000 },
440 { _MMIO(0x9888), 0x0edc8000 },
441 { _MMIO(0x9888), 0x00d80042 },
442 { _MMIO(0x9888), 0x08d82080 },
443 { _MMIO(0x9888), 0x0cd8004c },
444 { _MMIO(0x9888), 0x0ed82580 },
445 { _MMIO(0x9888), 0x00db4000 },
446 { _MMIO(0x9888), 0x18db1000 },
447 { _MMIO(0x9888), 0x1adb0104 },
448 { _MMIO(0x9888), 0x0c9fa800 },
449 { _MMIO(0x9888), 0x0e9faa00 },
450 { _MMIO(0x9888), 0x109f02aa },
451 { _MMIO(0x9888), 0x08b80042 },
452 { _MMIO(0x9888), 0x0ab82080 },
453 { _MMIO(0x9888), 0x0eb8404c },
454 { _MMIO(0x9888), 0x02b8404b },
455 { _MMIO(0x9888), 0x00b84000 },
456 { _MMIO(0x9888), 0x16b80000 },
457 { _MMIO(0x9888), 0x18b81145 },
458 { _MMIO(0x9888), 0x04b80000 },
459 { _MMIO(0x9888), 0x00b9a000 },
460 { _MMIO(0x9888), 0x06b98000 },
461 { _MMIO(0x9888), 0x08b9a000 },
462 { _MMIO(0x9888), 0x0ab9a000 },
463 { _MMIO(0x9888), 0x0cb9a000 },
464 { _MMIO(0x9888), 0x0eb9a000 },
465 { _MMIO(0x9888), 0x02b92000 },
466 { _MMIO(0x9888), 0x01888000 },
467 { _MMIO(0x9888), 0x0d88f800 },
468 { _MMIO(0x9888), 0x0f88000f },
469 { _MMIO(0x9888), 0x03888000 },
470 { _MMIO(0x9888), 0x05888000 },
471 { _MMIO(0x9888), 0x238b0540 },
472 { _MMIO(0x9888), 0x258baaa0 },
473 { _MMIO(0x9888), 0x278b002a },
474 { _MMIO(0x9888), 0x018c4000 },
475 { _MMIO(0x9888), 0x0f8c4000 },
476 { _MMIO(0x9888), 0x178c2000 },
477 { _MMIO(0x9888), 0x198c5500 },
478 { _MMIO(0x9888), 0x1b8c0015 },
479 { _MMIO(0x9888), 0x038c4000 },
480 { _MMIO(0x9888), 0x058c4000 },
481 { _MMIO(0x9888), 0x018da000 },
482 { _MMIO(0x9888), 0x078d8000 },
483 { _MMIO(0x9888), 0x098da000 },
484 { _MMIO(0x9888), 0x0b8da000 },
485 { _MMIO(0x9888), 0x0d8da000 },
486 { _MMIO(0x9888), 0x0f8da000 },
487 { _MMIO(0x9888), 0x038d2000 },
488 { _MMIO(0x9888), 0x1f850a80 },
489 { _MMIO(0x9888), 0x2185aaa0 },
490 { _MMIO(0x9888), 0x2385002a },
491 { _MMIO(0x9888), 0x01834000 },
492 { _MMIO(0x9888), 0x0f834000 },
493 { _MMIO(0x9888), 0x19835400 },
494 { _MMIO(0x9888), 0x1b830155 },
495 { _MMIO(0x9888), 0x03834000 },
496 { _MMIO(0x9888), 0x05834000 },
497 { _MMIO(0x9888), 0x0184c000 },
498 { _MMIO(0x9888), 0x07848000 },
499 { _MMIO(0x9888), 0x0984c000 },
500 { _MMIO(0x9888), 0x0b84c000 },
501 { _MMIO(0x9888), 0x0d84c000 },
502 { _MMIO(0x9888), 0x0f84c000 },
503 { _MMIO(0x9888), 0x03844000 },
504 { _MMIO(0x9888), 0x17808137 },
505 { _MMIO(0x9888), 0x1980c147 },
506 { _MMIO(0x9888), 0x1b80c0e5 },
507 { _MMIO(0x9888), 0x1d80c0e3 },
508 { _MMIO(0x9888), 0x21800000 },
509 { _MMIO(0x9888), 0x1180c000 },
510 { _MMIO(0x9888), 0x1f80c000 },
511 { _MMIO(0x9888), 0x13804000 },
512 { _MMIO(0x9888), 0x15800000 },
513 { _MMIO(0xd24), 0x00000000 },
514 { _MMIO(0x9888), 0x4d805000 },
515 { _MMIO(0x9888), 0x4f800555 },
516 { _MMIO(0x9888), 0x43800062 },
517 { _MMIO(0x9888), 0x51800000 },
518 { _MMIO(0x9888), 0x45800062 },
519 { _MMIO(0x9888), 0x53800000 },
520 { _MMIO(0x9888), 0x47800062 },
521 { _MMIO(0x9888), 0x31800000 },
522 { _MMIO(0x9888), 0x3f800062 },
523 { _MMIO(0x9888), 0x41800000 },
524};
525
526static int
527get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
528 const struct i915_oa_reg **regs,
529 int *lens)
530{
531 int n = 0;
532
533 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
534 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
535
536 if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) {
537 regs[n] = mux_config_compute_basic_0_slices_0x01;
538 lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01);
539 n++;
540 }
541 if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x02) {
542 regs[n] = mux_config_compute_basic_2_slices_0x02;
543 lens[n] = ARRAY_SIZE(mux_config_compute_basic_2_slices_0x02);
544 n++;
545 }
546
547 return n;
548}
549
550static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
551 { _MMIO(0x2724), 0xf0800000 },
552 { _MMIO(0x2720), 0x00000000 },
553 { _MMIO(0x2714), 0xf0800000 },
554 { _MMIO(0x2710), 0x00000000 },
555 { _MMIO(0x2740), 0x00000000 },
556 { _MMIO(0x2770), 0x0007ffea },
557 { _MMIO(0x2774), 0x00007ffc },
558 { _MMIO(0x2778), 0x0007affa },
559 { _MMIO(0x277c), 0x0000f5fd },
560 { _MMIO(0x2780), 0x00079ffa },
561 { _MMIO(0x2784), 0x0000f3fb },
562 { _MMIO(0x2788), 0x0007bf7a },
563 { _MMIO(0x278c), 0x0000f7e7 },
564 { _MMIO(0x2790), 0x0007fefa },
565 { _MMIO(0x2794), 0x0000f7cf },
566 { _MMIO(0x2798), 0x00077ffa },
567 { _MMIO(0x279c), 0x0000efdf },
568 { _MMIO(0x27a0), 0x0006fffa },
569 { _MMIO(0x27a4), 0x0000cfbf },
570 { _MMIO(0x27a8), 0x0003fffa },
571 { _MMIO(0x27ac), 0x00005f7f },
572};
573
574static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
575 { _MMIO(0xe458), 0x00005004 },
576 { _MMIO(0xe558), 0x00015014 },
577 { _MMIO(0xe658), 0x00025024 },
578 { _MMIO(0xe758), 0x00035034 },
579 { _MMIO(0xe45c), 0x00045044 },
580 { _MMIO(0xe55c), 0x00055054 },
581 { _MMIO(0xe65c), 0x00065064 },
582};
583
584static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
585 { _MMIO(0x9888), 0x0a1e0000 },
586 { _MMIO(0x9888), 0x0c1f000f },
587 { _MMIO(0x9888), 0x10176800 },
588 { _MMIO(0x9888), 0x1191001f },
589 { _MMIO(0x9888), 0x0b880320 },
590 { _MMIO(0x9888), 0x01890c40 },
591 { _MMIO(0x9888), 0x118a1c00 },
592 { _MMIO(0x9888), 0x118d7c00 },
593 { _MMIO(0x9888), 0x118e0020 },
594 { _MMIO(0x9888), 0x118f4c00 },
595 { _MMIO(0x9888), 0x11900000 },
596 { _MMIO(0x9888), 0x13900001 },
597 { _MMIO(0x9888), 0x065c4000 },
598 { _MMIO(0x9888), 0x0c3d8000 },
599 { _MMIO(0x9888), 0x06584000 },
600 { _MMIO(0x9888), 0x0c5b4000 },
601 { _MMIO(0x9888), 0x081e0040 },
602 { _MMIO(0x9888), 0x0e1e0000 },
603 { _MMIO(0x9888), 0x021f5400 },
604 { _MMIO(0x9888), 0x001f0000 },
605 { _MMIO(0x9888), 0x101f0010 },
606 { _MMIO(0x9888), 0x0e1f0080 },
607 { _MMIO(0x9888), 0x0c384000 },
608 { _MMIO(0x9888), 0x06392000 },
609 { _MMIO(0x9888), 0x0c13c000 },
610 { _MMIO(0x9888), 0x06164000 },
611 { _MMIO(0x9888), 0x06170012 },
612 { _MMIO(0x9888), 0x00170000 },
613 { _MMIO(0x9888), 0x01910005 },
614 { _MMIO(0x9888), 0x07880002 },
615 { _MMIO(0x9888), 0x01880c00 },
616 { _MMIO(0x9888), 0x0f880000 },
617 { _MMIO(0x9888), 0x0d880000 },
618 { _MMIO(0x9888), 0x05880000 },
619 { _MMIO(0x9888), 0x09890032 },
620 { _MMIO(0x9888), 0x078a0800 },
621 { _MMIO(0x9888), 0x0f8a0a00 },
622 { _MMIO(0x9888), 0x198a4000 },
623 { _MMIO(0x9888), 0x1b8a2000 },
624 { _MMIO(0x9888), 0x1d8a0000 },
625 { _MMIO(0x9888), 0x038a4000 },
626 { _MMIO(0x9888), 0x0b8a8000 },
627 { _MMIO(0x9888), 0x0d8a8000 },
628 { _MMIO(0x9888), 0x238b54c0 },
629 { _MMIO(0x9888), 0x258baa55 },
630 { _MMIO(0x9888), 0x278b0019 },
631 { _MMIO(0x9888), 0x198c0100 },
632 { _MMIO(0x9888), 0x058c4000 },
633 { _MMIO(0x9888), 0x0f8d0015 },
634 { _MMIO(0x9888), 0x018d1000 },
635 { _MMIO(0x9888), 0x098d8000 },
636 { _MMIO(0x9888), 0x0b8df000 },
637 { _MMIO(0x9888), 0x0d8d3000 },
638 { _MMIO(0x9888), 0x038de000 },
639 { _MMIO(0x9888), 0x058d3000 },
640 { _MMIO(0x9888), 0x0d8e0004 },
641 { _MMIO(0x9888), 0x058e000c },
642 { _MMIO(0x9888), 0x098e0000 },
643 { _MMIO(0x9888), 0x078e0000 },
644 { _MMIO(0x9888), 0x038e0000 },
645 { _MMIO(0x9888), 0x0b8f0020 },
646 { _MMIO(0x9888), 0x198f0c00 },
647 { _MMIO(0x9888), 0x078f8000 },
648 { _MMIO(0x9888), 0x098f4000 },
649 { _MMIO(0x9888), 0x0b900980 },
650 { _MMIO(0x9888), 0x03900d80 },
651 { _MMIO(0x9888), 0x01900000 },
652 { _MMIO(0x9888), 0x1f85aa80 },
653 { _MMIO(0x9888), 0x2185aaaa },
654 { _MMIO(0x9888), 0x2385002a },
655 { _MMIO(0x9888), 0x01834000 },
656 { _MMIO(0x9888), 0x0f834000 },
657 { _MMIO(0x9888), 0x19835400 },
658 { _MMIO(0x9888), 0x1b830155 },
659 { _MMIO(0x9888), 0x03834000 },
660 { _MMIO(0x9888), 0x05834000 },
661 { _MMIO(0x9888), 0x07834000 },
662 { _MMIO(0x9888), 0x09834000 },
663 { _MMIO(0x9888), 0x0b834000 },
664 { _MMIO(0x9888), 0x0d834000 },
665 { _MMIO(0x9888), 0x0184c000 },
666 { _MMIO(0x9888), 0x0784c000 },
667 { _MMIO(0x9888), 0x0984c000 },
668 { _MMIO(0x9888), 0x0b84c000 },
669 { _MMIO(0x9888), 0x0d84c000 },
670 { _MMIO(0x9888), 0x0f84c000 },
671 { _MMIO(0x9888), 0x0384c000 },
672 { _MMIO(0x9888), 0x0584c000 },
673 { _MMIO(0x9888), 0x1180c000 },
674 { _MMIO(0x9888), 0x1780c000 },
675 { _MMIO(0x9888), 0x1980c000 },
676 { _MMIO(0x9888), 0x1b80c000 },
677 { _MMIO(0x9888), 0x1d80c000 },
678 { _MMIO(0x9888), 0x1f80c000 },
679 { _MMIO(0x9888), 0x1380c000 },
680 { _MMIO(0x9888), 0x1580c000 },
681 { _MMIO(0xd24), 0x00000000 },
682 { _MMIO(0x9888), 0x4d801111 },
683 { _MMIO(0x9888), 0x3d800800 },
684 { _MMIO(0x9888), 0x4f801011 },
685 { _MMIO(0x9888), 0x43800443 },
686 { _MMIO(0x9888), 0x51801111 },
687 { _MMIO(0x9888), 0x45800422 },
688 { _MMIO(0x9888), 0x53801111 },
689 { _MMIO(0x9888), 0x47800c60 },
690 { _MMIO(0x9888), 0x21800000 },
691 { _MMIO(0x9888), 0x31800000 },
692 { _MMIO(0x9888), 0x3f800422 },
693 { _MMIO(0x9888), 0x41800021 },
694};
695
696static int
697get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
698 const struct i915_oa_reg **regs,
699 int *lens)
700{
701 int n = 0;
702
703 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
704 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
705
706 regs[n] = mux_config_render_pipe_profile;
707 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
708 n++;
709
710 return n;
711}
712
713static const struct i915_oa_reg b_counter_config_memory_reads[] = {
714 { _MMIO(0x2724), 0xf0800000 },
715 { _MMIO(0x2720), 0x00000000 },
716 { _MMIO(0x2714), 0xf0800000 },
717 { _MMIO(0x2710), 0x00000000 },
718 { _MMIO(0x274c), 0x86543210 },
719 { _MMIO(0x2748), 0x86543210 },
720 { _MMIO(0x2744), 0x00006667 },
721 { _MMIO(0x2740), 0x00000000 },
722 { _MMIO(0x275c), 0x86543210 },
723 { _MMIO(0x2758), 0x86543210 },
724 { _MMIO(0x2754), 0x00006465 },
725 { _MMIO(0x2750), 0x00000000 },
726 { _MMIO(0x2770), 0x0007f81a },
727 { _MMIO(0x2774), 0x0000fe00 },
728 { _MMIO(0x2778), 0x0007f82a },
729 { _MMIO(0x277c), 0x0000fe00 },
730 { _MMIO(0x2780), 0x0007f872 },
731 { _MMIO(0x2784), 0x0000fe00 },
732 { _MMIO(0x2788), 0x0007f8ba },
733 { _MMIO(0x278c), 0x0000fe00 },
734 { _MMIO(0x2790), 0x0007f87a },
735 { _MMIO(0x2794), 0x0000fe00 },
736 { _MMIO(0x2798), 0x0007f8ea },
737 { _MMIO(0x279c), 0x0000fe00 },
738 { _MMIO(0x27a0), 0x0007f8e2 },
739 { _MMIO(0x27a4), 0x0000fe00 },
740 { _MMIO(0x27a8), 0x0007f8f2 },
741 { _MMIO(0x27ac), 0x0000fe00 },
742};
743
744static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
745 { _MMIO(0xe458), 0x00005004 },
746 { _MMIO(0xe558), 0x00015014 },
747 { _MMIO(0xe658), 0x00025024 },
748 { _MMIO(0xe758), 0x00035034 },
749 { _MMIO(0xe45c), 0x00045044 },
750 { _MMIO(0xe55c), 0x00055054 },
751 { _MMIO(0xe65c), 0x00065064 },
752};
753
754static const struct i915_oa_reg mux_config_memory_reads[] = {
755 { _MMIO(0x9888), 0x198b0343 },
756 { _MMIO(0x9888), 0x13845800 },
757 { _MMIO(0x9888), 0x15840018 },
758 { _MMIO(0x9888), 0x3580001a },
759 { _MMIO(0x9888), 0x038b6300 },
760 { _MMIO(0x9888), 0x058b6b62 },
761 { _MMIO(0x9888), 0x078b006a },
762 { _MMIO(0x9888), 0x118b0000 },
763 { _MMIO(0x9888), 0x238b0000 },
764 { _MMIO(0x9888), 0x258b0000 },
765 { _MMIO(0x9888), 0x1f85a080 },
766 { _MMIO(0x9888), 0x2185aaaa },
767 { _MMIO(0x9888), 0x2385000a },
768 { _MMIO(0x9888), 0x07834000 },
769 { _MMIO(0x9888), 0x09834000 },
770 { _MMIO(0x9888), 0x0b834000 },
771 { _MMIO(0x9888), 0x0d834000 },
772 { _MMIO(0x9888), 0x01840018 },
773 { _MMIO(0x9888), 0x07844c80 },
774 { _MMIO(0x9888), 0x09840d9a },
775 { _MMIO(0x9888), 0x0b840e9c },
776 { _MMIO(0x9888), 0x0d840f9e },
777 { _MMIO(0x9888), 0x0f840010 },
778 { _MMIO(0x9888), 0x11840000 },
779 { _MMIO(0x9888), 0x03848000 },
780 { _MMIO(0x9888), 0x0584c000 },
781 { _MMIO(0x9888), 0x2f8000e5 },
782 { _MMIO(0x9888), 0x138080e3 },
783 { _MMIO(0x9888), 0x1580c0e1 },
784 { _MMIO(0x9888), 0x21800000 },
785 { _MMIO(0x9888), 0x11804000 },
786 { _MMIO(0x9888), 0x1780c000 },
787 { _MMIO(0x9888), 0x1980c000 },
788 { _MMIO(0x9888), 0x1b80c000 },
789 { _MMIO(0x9888), 0x1d80c000 },
790 { _MMIO(0x9888), 0x1f804000 },
791 { _MMIO(0xd24), 0x00000000 },
792 { _MMIO(0x9888), 0x4d800000 },
793 { _MMIO(0x9888), 0x3d800800 },
794 { _MMIO(0x9888), 0x4f800000 },
795 { _MMIO(0x9888), 0x43800842 },
796 { _MMIO(0x9888), 0x51800000 },
797 { _MMIO(0x9888), 0x45800842 },
798 { _MMIO(0x9888), 0x53800000 },
799 { _MMIO(0x9888), 0x47801042 },
800 { _MMIO(0x9888), 0x31800000 },
801 { _MMIO(0x9888), 0x3f800084 },
802 { _MMIO(0x9888), 0x41800000 },
803};
804
805static int
806get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
807 const struct i915_oa_reg **regs,
808 int *lens)
809{
810 int n = 0;
811
812 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
813 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
814
815 regs[n] = mux_config_memory_reads;
816 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
817 n++;
818
819 return n;
820}
821
822static const struct i915_oa_reg b_counter_config_memory_writes[] = {
823 { _MMIO(0x2724), 0xf0800000 },
824 { _MMIO(0x2720), 0x00000000 },
825 { _MMIO(0x2714), 0xf0800000 },
826 { _MMIO(0x2710), 0x00000000 },
827 { _MMIO(0x274c), 0x86543210 },
828 { _MMIO(0x2748), 0x86543210 },
829 { _MMIO(0x2744), 0x00006667 },
830 { _MMIO(0x2740), 0x00000000 },
831 { _MMIO(0x275c), 0x86543210 },
832 { _MMIO(0x2758), 0x86543210 },
833 { _MMIO(0x2754), 0x00006465 },
834 { _MMIO(0x2750), 0x00000000 },
835 { _MMIO(0x2770), 0x0007f81a },
836 { _MMIO(0x2774), 0x0000fe00 },
837 { _MMIO(0x2778), 0x0007f82a },
838 { _MMIO(0x277c), 0x0000fe00 },
839 { _MMIO(0x2780), 0x0007f822 },
840 { _MMIO(0x2784), 0x0000fe00 },
841 { _MMIO(0x2788), 0x0007f8ba },
842 { _MMIO(0x278c), 0x0000fe00 },
843 { _MMIO(0x2790), 0x0007f87a },
844 { _MMIO(0x2794), 0x0000fe00 },
845 { _MMIO(0x2798), 0x0007f8ea },
846 { _MMIO(0x279c), 0x0000fe00 },
847 { _MMIO(0x27a0), 0x0007f8e2 },
848 { _MMIO(0x27a4), 0x0000fe00 },
849 { _MMIO(0x27a8), 0x0007f8f2 },
850 { _MMIO(0x27ac), 0x0000fe00 },
851};
852
853static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
854 { _MMIO(0xe458), 0x00005004 },
855 { _MMIO(0xe558), 0x00015014 },
856 { _MMIO(0xe658), 0x00025024 },
857 { _MMIO(0xe758), 0x00035034 },
858 { _MMIO(0xe45c), 0x00045044 },
859 { _MMIO(0xe55c), 0x00055054 },
860 { _MMIO(0xe65c), 0x00065064 },
861};
862
863static const struct i915_oa_reg mux_config_memory_writes[] = {
864 { _MMIO(0x9888), 0x198b0343 },
865 { _MMIO(0x9888), 0x13845400 },
866 { _MMIO(0x9888), 0x3580001a },
867 { _MMIO(0x9888), 0x3d800805 },
868 { _MMIO(0x9888), 0x038b6300 },
869 { _MMIO(0x9888), 0x058b6b62 },
870 { _MMIO(0x9888), 0x078b006a },
871 { _MMIO(0x9888), 0x118b0000 },
872 { _MMIO(0x9888), 0x238b0000 },
873 { _MMIO(0x9888), 0x258b0000 },
874 { _MMIO(0x9888), 0x1f85a080 },
875 { _MMIO(0x9888), 0x2185aaaa },
876 { _MMIO(0x9888), 0x23850002 },
877 { _MMIO(0x9888), 0x07834000 },
878 { _MMIO(0x9888), 0x09834000 },
879 { _MMIO(0x9888), 0x0b834000 },
880 { _MMIO(0x9888), 0x0d834000 },
881 { _MMIO(0x9888), 0x01840010 },
882 { _MMIO(0x9888), 0x07844880 },
883 { _MMIO(0x9888), 0x09840992 },
884 { _MMIO(0x9888), 0x0b840a94 },
885 { _MMIO(0x9888), 0x0d840b96 },
886 { _MMIO(0x9888), 0x11840000 },
887 { _MMIO(0x9888), 0x03848000 },
888 { _MMIO(0x9888), 0x0584c000 },
889 { _MMIO(0x9888), 0x2d800147 },
890 { _MMIO(0x9888), 0x2f8000e5 },
891 { _MMIO(0x9888), 0x138080e3 },
892 { _MMIO(0x9888), 0x1580c0e1 },
893 { _MMIO(0x9888), 0x21800000 },
894 { _MMIO(0x9888), 0x11804000 },
895 { _MMIO(0x9888), 0x1780c000 },
896 { _MMIO(0x9888), 0x1980c000 },
897 { _MMIO(0x9888), 0x1b80c000 },
898 { _MMIO(0x9888), 0x1d80c000 },
899 { _MMIO(0x9888), 0x1f800000 },
900 { _MMIO(0xd24), 0x00000000 },
901 { _MMIO(0x9888), 0x4d800000 },
902 { _MMIO(0x9888), 0x4f800000 },
903 { _MMIO(0x9888), 0x43800842 },
904 { _MMIO(0x9888), 0x51800000 },
905 { _MMIO(0x9888), 0x45800842 },
906 { _MMIO(0x9888), 0x53800000 },
907 { _MMIO(0x9888), 0x47801082 },
908 { _MMIO(0x9888), 0x31800000 },
909 { _MMIO(0x9888), 0x3f800084 },
910 { _MMIO(0x9888), 0x41800000 },
911};
912
913static int
914get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
915 const struct i915_oa_reg **regs,
916 int *lens)
917{
918 int n = 0;
919
920 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
921 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
922
923 regs[n] = mux_config_memory_writes;
924 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
925 n++;
926
927 return n;
928}
929
930static const struct i915_oa_reg b_counter_config_compute_extended[] = {
931 { _MMIO(0x2724), 0xf0800000 },
932 { _MMIO(0x2720), 0x00000000 },
933 { _MMIO(0x2714), 0xf0800000 },
934 { _MMIO(0x2710), 0x00000000 },
935 { _MMIO(0x2740), 0x00000000 },
936 { _MMIO(0x2770), 0x0007fc2a },
937 { _MMIO(0x2774), 0x0000bf00 },
938 { _MMIO(0x2778), 0x0007fc6a },
939 { _MMIO(0x277c), 0x0000bf00 },
940 { _MMIO(0x2780), 0x0007fc92 },
941 { _MMIO(0x2784), 0x0000bf00 },
942 { _MMIO(0x2788), 0x0007fca2 },
943 { _MMIO(0x278c), 0x0000bf00 },
944 { _MMIO(0x2790), 0x0007fc32 },
945 { _MMIO(0x2794), 0x0000bf00 },
946 { _MMIO(0x2798), 0x0007fc9a },
947 { _MMIO(0x279c), 0x0000bf00 },
948 { _MMIO(0x27a0), 0x0007fe6a },
949 { _MMIO(0x27a4), 0x0000bf00 },
950 { _MMIO(0x27a8), 0x0007fe7a },
951 { _MMIO(0x27ac), 0x0000bf00 },
952};
953
954static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
955 { _MMIO(0xe458), 0x00005004 },
956 { _MMIO(0xe558), 0x00000003 },
957 { _MMIO(0xe658), 0x00002001 },
958 { _MMIO(0xe758), 0x00778008 },
959 { _MMIO(0xe45c), 0x00088078 },
960 { _MMIO(0xe55c), 0x00808708 },
961 { _MMIO(0xe65c), 0x00a08908 },
962};
963
964static const struct i915_oa_reg mux_config_compute_extended_0_subslices_0x01[] = {
965 { _MMIO(0x9888), 0x143d0160 },
966 { _MMIO(0x9888), 0x163d2800 },
967 { _MMIO(0x9888), 0x183d0120 },
968 { _MMIO(0x9888), 0x105800e0 },
969 { _MMIO(0x9888), 0x005cc000 },
970 { _MMIO(0x9888), 0x065c8000 },
971 { _MMIO(0x9888), 0x085cc000 },
972 { _MMIO(0x9888), 0x0a5cc000 },
973 { _MMIO(0x9888), 0x0c5cc000 },
974 { _MMIO(0x9888), 0x0e5cc000 },
975 { _MMIO(0x9888), 0x025cc000 },
976 { _MMIO(0x9888), 0x045cc000 },
977 { _MMIO(0x9888), 0x003d0011 },
978 { _MMIO(0x9888), 0x063d0900 },
979 { _MMIO(0x9888), 0x083d0a13 },
980 { _MMIO(0x9888), 0x0a3d0b15 },
981 { _MMIO(0x9888), 0x0c3d2317 },
982 { _MMIO(0x9888), 0x043d21b7 },
983 { _MMIO(0x9888), 0x103d0000 },
984 { _MMIO(0x9888), 0x0e3d0000 },
985 { _MMIO(0x9888), 0x1a3d0000 },
986 { _MMIO(0x9888), 0x0e5825c1 },
987 { _MMIO(0x9888), 0x00586100 },
988 { _MMIO(0x9888), 0x0258204c },
989 { _MMIO(0x9888), 0x06588000 },
990 { _MMIO(0x9888), 0x0858c000 },
991 { _MMIO(0x9888), 0x0a58c000 },
992 { _MMIO(0x9888), 0x0c58c000 },
993 { _MMIO(0x9888), 0x0458c000 },
994 { _MMIO(0x9888), 0x005b4000 },
995 { _MMIO(0x9888), 0x0e5b4000 },
996 { _MMIO(0x9888), 0x185b5400 },
997 { _MMIO(0x9888), 0x1a5b0155 },
998 { _MMIO(0x9888), 0x025b4000 },
999 { _MMIO(0x9888), 0x045b4000 },
1000 { _MMIO(0x9888), 0x065b4000 },
1001 { _MMIO(0x9888), 0x085b4000 },
1002 { _MMIO(0x9888), 0x0a5b4000 },
1003 { _MMIO(0x9888), 0x0c1fa800 },
1004 { _MMIO(0x9888), 0x0e1faa2a },
1005 { _MMIO(0x9888), 0x101f02aa },
1006 { _MMIO(0x9888), 0x00384000 },
1007 { _MMIO(0x9888), 0x0e384000 },
1008 { _MMIO(0x9888), 0x16384000 },
1009 { _MMIO(0x9888), 0x18381555 },
1010 { _MMIO(0x9888), 0x02384000 },
1011 { _MMIO(0x9888), 0x04384000 },
1012 { _MMIO(0x9888), 0x06384000 },
1013 { _MMIO(0x9888), 0x08384000 },
1014 { _MMIO(0x9888), 0x0a384000 },
1015 { _MMIO(0x9888), 0x0039a000 },
1016 { _MMIO(0x9888), 0x06398000 },
1017 { _MMIO(0x9888), 0x0839a000 },
1018 { _MMIO(0x9888), 0x0a39a000 },
1019 { _MMIO(0x9888), 0x0c39a000 },
1020 { _MMIO(0x9888), 0x0e39a000 },
1021 { _MMIO(0x9888), 0x0239a000 },
1022 { _MMIO(0x9888), 0x0439a000 },
1023 { _MMIO(0x9888), 0x018a8000 },
1024 { _MMIO(0x9888), 0x0f8a8000 },
1025 { _MMIO(0x9888), 0x198a8000 },
1026 { _MMIO(0x9888), 0x1b8aaaa0 },
1027 { _MMIO(0x9888), 0x1d8a0002 },
1028 { _MMIO(0x9888), 0x038a8000 },
1029 { _MMIO(0x9888), 0x058a8000 },
1030 { _MMIO(0x9888), 0x078a8000 },
1031 { _MMIO(0x9888), 0x098a8000 },
1032 { _MMIO(0x9888), 0x0b8a8000 },
1033 { _MMIO(0x9888), 0x238b2aa0 },
1034 { _MMIO(0x9888), 0x258b5551 },
1035 { _MMIO(0x9888), 0x278b0015 },
1036 { _MMIO(0x9888), 0x1f85aa80 },
1037 { _MMIO(0x9888), 0x2185aaa2 },
1038 { _MMIO(0x9888), 0x2385002a },
1039 { _MMIO(0x9888), 0x01834000 },
1040 { _MMIO(0x9888), 0x0f834000 },
1041 { _MMIO(0x9888), 0x19835400 },
1042 { _MMIO(0x9888), 0x1b830155 },
1043 { _MMIO(0x9888), 0x03834000 },
1044 { _MMIO(0x9888), 0x05834000 },
1045 { _MMIO(0x9888), 0x07834000 },
1046 { _MMIO(0x9888), 0x09834000 },
1047 { _MMIO(0x9888), 0x0b834000 },
1048 { _MMIO(0x9888), 0x0184c000 },
1049 { _MMIO(0x9888), 0x07848000 },
1050 { _MMIO(0x9888), 0x0984c000 },
1051 { _MMIO(0x9888), 0x0b84c000 },
1052 { _MMIO(0x9888), 0x0d84c000 },
1053 { _MMIO(0x9888), 0x0f84c000 },
1054 { _MMIO(0x9888), 0x0384c000 },
1055 { _MMIO(0x9888), 0x0584c000 },
1056 { _MMIO(0x9888), 0x1180c000 },
1057 { _MMIO(0x9888), 0x17808000 },
1058 { _MMIO(0x9888), 0x1980c000 },
1059 { _MMIO(0x9888), 0x1b80c000 },
1060 { _MMIO(0x9888), 0x1d80c000 },
1061 { _MMIO(0x9888), 0x1f80c000 },
1062 { _MMIO(0x9888), 0x1380c000 },
1063 { _MMIO(0x9888), 0x1580c000 },
1064 { _MMIO(0xd24), 0x00000000 },
1065 { _MMIO(0x9888), 0x4d800000 },
1066 { _MMIO(0x9888), 0x3d800000 },
1067 { _MMIO(0x9888), 0x4f800000 },
1068 { _MMIO(0x9888), 0x43800000 },
1069 { _MMIO(0x9888), 0x51800000 },
1070 { _MMIO(0x9888), 0x45800000 },
1071 { _MMIO(0x9888), 0x53800000 },
1072 { _MMIO(0x9888), 0x47800420 },
1073 { _MMIO(0x9888), 0x21800000 },
1074 { _MMIO(0x9888), 0x31800000 },
1075 { _MMIO(0x9888), 0x3f800421 },
1076 { _MMIO(0x9888), 0x41800000 },
1077};
1078
1079static const struct i915_oa_reg mux_config_compute_extended_2_subslices_0x02[] = {
1080 { _MMIO(0x9888), 0x105c00e0 },
1081 { _MMIO(0x9888), 0x145b0160 },
1082 { _MMIO(0x9888), 0x165b2800 },
1083 { _MMIO(0x9888), 0x185b0120 },
1084 { _MMIO(0x9888), 0x0e5c25c1 },
1085 { _MMIO(0x9888), 0x005c6100 },
1086 { _MMIO(0x9888), 0x025c204c },
1087 { _MMIO(0x9888), 0x065c8000 },
1088 { _MMIO(0x9888), 0x085cc000 },
1089 { _MMIO(0x9888), 0x0a5cc000 },
1090 { _MMIO(0x9888), 0x0c5cc000 },
1091 { _MMIO(0x9888), 0x045cc000 },
1092 { _MMIO(0x9888), 0x005b0011 },
1093 { _MMIO(0x9888), 0x065b0900 },
1094 { _MMIO(0x9888), 0x085b0a13 },
1095 { _MMIO(0x9888), 0x0a5b0b15 },
1096 { _MMIO(0x9888), 0x0c5b2317 },
1097 { _MMIO(0x9888), 0x045b21b7 },
1098 { _MMIO(0x9888), 0x105b0000 },
1099 { _MMIO(0x9888), 0x0e5b0000 },
1100 { _MMIO(0x9888), 0x1a5b0000 },
1101 { _MMIO(0x9888), 0x0c1fa800 },
1102 { _MMIO(0x9888), 0x0e1faa2a },
1103 { _MMIO(0x9888), 0x101f02aa },
1104 { _MMIO(0x9888), 0x00384000 },
1105 { _MMIO(0x9888), 0x0e384000 },
1106 { _MMIO(0x9888), 0x16384000 },
1107 { _MMIO(0x9888), 0x18381555 },
1108 { _MMIO(0x9888), 0x02384000 },
1109 { _MMIO(0x9888), 0x04384000 },
1110 { _MMIO(0x9888), 0x06384000 },
1111 { _MMIO(0x9888), 0x08384000 },
1112 { _MMIO(0x9888), 0x0a384000 },
1113 { _MMIO(0x9888), 0x0039a000 },
1114 { _MMIO(0x9888), 0x06398000 },
1115 { _MMIO(0x9888), 0x0839a000 },
1116 { _MMIO(0x9888), 0x0a39a000 },
1117 { _MMIO(0x9888), 0x0c39a000 },
1118 { _MMIO(0x9888), 0x0e39a000 },
1119 { _MMIO(0x9888), 0x0239a000 },
1120 { _MMIO(0x9888), 0x0439a000 },
1121 { _MMIO(0x9888), 0x018a8000 },
1122 { _MMIO(0x9888), 0x0f8a8000 },
1123 { _MMIO(0x9888), 0x198a8000 },
1124 { _MMIO(0x9888), 0x1b8aaaa0 },
1125 { _MMIO(0x9888), 0x1d8a0002 },
1126 { _MMIO(0x9888), 0x038a8000 },
1127 { _MMIO(0x9888), 0x058a8000 },
1128 { _MMIO(0x9888), 0x078a8000 },
1129 { _MMIO(0x9888), 0x098a8000 },
1130 { _MMIO(0x9888), 0x0b8a8000 },
1131 { _MMIO(0x9888), 0x238b2aa0 },
1132 { _MMIO(0x9888), 0x258b5551 },
1133 { _MMIO(0x9888), 0x278b0015 },
1134 { _MMIO(0x9888), 0x1f85aa80 },
1135 { _MMIO(0x9888), 0x2185aaa2 },
1136 { _MMIO(0x9888), 0x2385002a },
1137 { _MMIO(0x9888), 0x01834000 },
1138 { _MMIO(0x9888), 0x0f834000 },
1139 { _MMIO(0x9888), 0x19835400 },
1140 { _MMIO(0x9888), 0x1b830155 },
1141 { _MMIO(0x9888), 0x03834000 },
1142 { _MMIO(0x9888), 0x05834000 },
1143 { _MMIO(0x9888), 0x07834000 },
1144 { _MMIO(0x9888), 0x09834000 },
1145 { _MMIO(0x9888), 0x0b834000 },
1146 { _MMIO(0x9888), 0x0184c000 },
1147 { _MMIO(0x9888), 0x07848000 },
1148 { _MMIO(0x9888), 0x0984c000 },
1149 { _MMIO(0x9888), 0x0b84c000 },
1150 { _MMIO(0x9888), 0x0d84c000 },
1151 { _MMIO(0x9888), 0x0f84c000 },
1152 { _MMIO(0x9888), 0x0384c000 },
1153 { _MMIO(0x9888), 0x0584c000 },
1154 { _MMIO(0x9888), 0x1180c000 },
1155 { _MMIO(0x9888), 0x17808000 },
1156 { _MMIO(0x9888), 0x1980c000 },
1157 { _MMIO(0x9888), 0x1b80c000 },
1158 { _MMIO(0x9888), 0x1d80c000 },
1159 { _MMIO(0x9888), 0x1f80c000 },
1160 { _MMIO(0x9888), 0x1380c000 },
1161 { _MMIO(0x9888), 0x1580c000 },
1162 { _MMIO(0xd24), 0x00000000 },
1163 { _MMIO(0x9888), 0x4d800000 },
1164 { _MMIO(0x9888), 0x3d800000 },
1165 { _MMIO(0x9888), 0x4f800000 },
1166 { _MMIO(0x9888), 0x43800000 },
1167 { _MMIO(0x9888), 0x51800000 },
1168 { _MMIO(0x9888), 0x45800000 },
1169 { _MMIO(0x9888), 0x53800000 },
1170 { _MMIO(0x9888), 0x47800420 },
1171 { _MMIO(0x9888), 0x21800000 },
1172 { _MMIO(0x9888), 0x31800000 },
1173 { _MMIO(0x9888), 0x3f800421 },
1174 { _MMIO(0x9888), 0x41800000 },
1175};
1176
1177static const struct i915_oa_reg mux_config_compute_extended_4_subslices_0x04[] = {
1178 { _MMIO(0x9888), 0x103800e0 },
1179 { _MMIO(0x9888), 0x143a0160 },
1180 { _MMIO(0x9888), 0x163a2800 },
1181 { _MMIO(0x9888), 0x183a0120 },
1182 { _MMIO(0x9888), 0x0c1fa800 },
1183 { _MMIO(0x9888), 0x0e1faa2a },
1184 { _MMIO(0x9888), 0x101f02aa },
1185 { _MMIO(0x9888), 0x0e38a5c1 },
1186 { _MMIO(0x9888), 0x0038a100 },
1187 { _MMIO(0x9888), 0x0238204c },
1188 { _MMIO(0x9888), 0x16388000 },
1189 { _MMIO(0x9888), 0x183802aa },
1190 { _MMIO(0x9888), 0x04380000 },
1191 { _MMIO(0x9888), 0x06380000 },
1192 { _MMIO(0x9888), 0x08388000 },
1193 { _MMIO(0x9888), 0x0a388000 },
1194 { _MMIO(0x9888), 0x0039a000 },
1195 { _MMIO(0x9888), 0x06398000 },
1196 { _MMIO(0x9888), 0x0839a000 },
1197 { _MMIO(0x9888), 0x0a39a000 },
1198 { _MMIO(0x9888), 0x0c39a000 },
1199 { _MMIO(0x9888), 0x0e39a000 },
1200 { _MMIO(0x9888), 0x0239a000 },
1201 { _MMIO(0x9888), 0x0439a000 },
1202 { _MMIO(0x9888), 0x003a0011 },
1203 { _MMIO(0x9888), 0x063a0900 },
1204 { _MMIO(0x9888), 0x083a0a13 },
1205 { _MMIO(0x9888), 0x0a3a0b15 },
1206 { _MMIO(0x9888), 0x0c3a2317 },
1207 { _MMIO(0x9888), 0x043a21b7 },
1208 { _MMIO(0x9888), 0x103a0000 },
1209 { _MMIO(0x9888), 0x0e3a0000 },
1210 { _MMIO(0x9888), 0x1a3a0000 },
1211 { _MMIO(0x9888), 0x018a8000 },
1212 { _MMIO(0x9888), 0x0f8a8000 },
1213 { _MMIO(0x9888), 0x198a8000 },
1214 { _MMIO(0x9888), 0x1b8aaaa0 },
1215 { _MMIO(0x9888), 0x1d8a0002 },
1216 { _MMIO(0x9888), 0x038a8000 },
1217 { _MMIO(0x9888), 0x058a8000 },
1218 { _MMIO(0x9888), 0x078a8000 },
1219 { _MMIO(0x9888), 0x098a8000 },
1220 { _MMIO(0x9888), 0x0b8a8000 },
1221 { _MMIO(0x9888), 0x238b2aa0 },
1222 { _MMIO(0x9888), 0x258b5551 },
1223 { _MMIO(0x9888), 0x278b0015 },
1224 { _MMIO(0x9888), 0x1f85aa80 },
1225 { _MMIO(0x9888), 0x2185aaa2 },
1226 { _MMIO(0x9888), 0x2385002a },
1227 { _MMIO(0x9888), 0x01834000 },
1228 { _MMIO(0x9888), 0x0f834000 },
1229 { _MMIO(0x9888), 0x19835400 },
1230 { _MMIO(0x9888), 0x1b830155 },
1231 { _MMIO(0x9888), 0x03834000 },
1232 { _MMIO(0x9888), 0x05834000 },
1233 { _MMIO(0x9888), 0x07834000 },
1234 { _MMIO(0x9888), 0x09834000 },
1235 { _MMIO(0x9888), 0x0b834000 },
1236 { _MMIO(0x9888), 0x0184c000 },
1237 { _MMIO(0x9888), 0x07848000 },
1238 { _MMIO(0x9888), 0x0984c000 },
1239 { _MMIO(0x9888), 0x0b84c000 },
1240 { _MMIO(0x9888), 0x0d84c000 },
1241 { _MMIO(0x9888), 0x0f84c000 },
1242 { _MMIO(0x9888), 0x0384c000 },
1243 { _MMIO(0x9888), 0x0584c000 },
1244 { _MMIO(0x9888), 0x1180c000 },
1245 { _MMIO(0x9888), 0x17808000 },
1246 { _MMIO(0x9888), 0x1980c000 },
1247 { _MMIO(0x9888), 0x1b80c000 },
1248 { _MMIO(0x9888), 0x1d80c000 },
1249 { _MMIO(0x9888), 0x1f80c000 },
1250 { _MMIO(0x9888), 0x1380c000 },
1251 { _MMIO(0x9888), 0x1580c000 },
1252 { _MMIO(0xd24), 0x00000000 },
1253 { _MMIO(0x9888), 0x4d800000 },
1254 { _MMIO(0x9888), 0x3d800000 },
1255 { _MMIO(0x9888), 0x4f800000 },
1256 { _MMIO(0x9888), 0x43800000 },
1257 { _MMIO(0x9888), 0x51800000 },
1258 { _MMIO(0x9888), 0x45800000 },
1259 { _MMIO(0x9888), 0x53800000 },
1260 { _MMIO(0x9888), 0x47800420 },
1261 { _MMIO(0x9888), 0x21800000 },
1262 { _MMIO(0x9888), 0x31800000 },
1263 { _MMIO(0x9888), 0x3f800421 },
1264 { _MMIO(0x9888), 0x41800000 },
1265};
1266
1267static const struct i915_oa_reg mux_config_compute_extended_1_subslices_0x08[] = {
1268 { _MMIO(0x9888), 0x14bd0160 },
1269 { _MMIO(0x9888), 0x16bd2800 },
1270 { _MMIO(0x9888), 0x18bd0120 },
1271 { _MMIO(0x9888), 0x10d800e0 },
1272 { _MMIO(0x9888), 0x00dcc000 },
1273 { _MMIO(0x9888), 0x06dc8000 },
1274 { _MMIO(0x9888), 0x08dcc000 },
1275 { _MMIO(0x9888), 0x0adcc000 },
1276 { _MMIO(0x9888), 0x0cdcc000 },
1277 { _MMIO(0x9888), 0x0edcc000 },
1278 { _MMIO(0x9888), 0x02dcc000 },
1279 { _MMIO(0x9888), 0x04dcc000 },
1280 { _MMIO(0x9888), 0x00bd0011 },
1281 { _MMIO(0x9888), 0x06bd0900 },
1282 { _MMIO(0x9888), 0x08bd0a13 },
1283 { _MMIO(0x9888), 0x0abd0b15 },
1284 { _MMIO(0x9888), 0x0cbd2317 },
1285 { _MMIO(0x9888), 0x04bd21b7 },
1286 { _MMIO(0x9888), 0x10bd0000 },
1287 { _MMIO(0x9888), 0x0ebd0000 },
1288 { _MMIO(0x9888), 0x1abd0000 },
1289 { _MMIO(0x9888), 0x0ed825c1 },
1290 { _MMIO(0x9888), 0x00d86100 },
1291 { _MMIO(0x9888), 0x02d8204c },
1292 { _MMIO(0x9888), 0x06d88000 },
1293 { _MMIO(0x9888), 0x08d8c000 },
1294 { _MMIO(0x9888), 0x0ad8c000 },
1295 { _MMIO(0x9888), 0x0cd8c000 },
1296 { _MMIO(0x9888), 0x04d8c000 },
1297 { _MMIO(0x9888), 0x00db4000 },
1298 { _MMIO(0x9888), 0x0edb4000 },
1299 { _MMIO(0x9888), 0x18db5400 },
1300 { _MMIO(0x9888), 0x1adb0155 },
1301 { _MMIO(0x9888), 0x02db4000 },
1302 { _MMIO(0x9888), 0x04db4000 },
1303 { _MMIO(0x9888), 0x06db4000 },
1304 { _MMIO(0x9888), 0x08db4000 },
1305 { _MMIO(0x9888), 0x0adb4000 },
1306 { _MMIO(0x9888), 0x0c9fa800 },
1307 { _MMIO(0x9888), 0x0e9faa2a },
1308 { _MMIO(0x9888), 0x109f02aa },
1309 { _MMIO(0x9888), 0x00b84000 },
1310 { _MMIO(0x9888), 0x0eb84000 },
1311 { _MMIO(0x9888), 0x16b84000 },
1312 { _MMIO(0x9888), 0x18b81555 },
1313 { _MMIO(0x9888), 0x02b84000 },
1314 { _MMIO(0x9888), 0x04b84000 },
1315 { _MMIO(0x9888), 0x06b84000 },
1316 { _MMIO(0x9888), 0x08b84000 },
1317 { _MMIO(0x9888), 0x0ab84000 },
1318 { _MMIO(0x9888), 0x00b9a000 },
1319 { _MMIO(0x9888), 0x06b98000 },
1320 { _MMIO(0x9888), 0x08b9a000 },
1321 { _MMIO(0x9888), 0x0ab9a000 },
1322 { _MMIO(0x9888), 0x0cb9a000 },
1323 { _MMIO(0x9888), 0x0eb9a000 },
1324 { _MMIO(0x9888), 0x02b9a000 },
1325 { _MMIO(0x9888), 0x04b9a000 },
1326 { _MMIO(0x9888), 0x01888000 },
1327 { _MMIO(0x9888), 0x0d88f800 },
1328 { _MMIO(0x9888), 0x0f88000f },
1329 { _MMIO(0x9888), 0x03888000 },
1330 { _MMIO(0x9888), 0x05888000 },
1331 { _MMIO(0x9888), 0x07888000 },
1332 { _MMIO(0x9888), 0x09888000 },
1333 { _MMIO(0x9888), 0x0b888000 },
1334 { _MMIO(0x9888), 0x238b5540 },
1335 { _MMIO(0x9888), 0x258baaa2 },
1336 { _MMIO(0x9888), 0x278b002a },
1337 { _MMIO(0x9888), 0x018c4000 },
1338 { _MMIO(0x9888), 0x0f8c4000 },
1339 { _MMIO(0x9888), 0x178c2000 },
1340 { _MMIO(0x9888), 0x198c5500 },
1341 { _MMIO(0x9888), 0x1b8c0015 },
1342 { _MMIO(0x9888), 0x038c4000 },
1343 { _MMIO(0x9888), 0x058c4000 },
1344 { _MMIO(0x9888), 0x078c4000 },
1345 { _MMIO(0x9888), 0x098c4000 },
1346 { _MMIO(0x9888), 0x0b8c4000 },
1347 { _MMIO(0x9888), 0x018da000 },
1348 { _MMIO(0x9888), 0x078d8000 },
1349 { _MMIO(0x9888), 0x098da000 },
1350 { _MMIO(0x9888), 0x0b8da000 },
1351 { _MMIO(0x9888), 0x0d8da000 },
1352 { _MMIO(0x9888), 0x0f8da000 },
1353 { _MMIO(0x9888), 0x038da000 },
1354 { _MMIO(0x9888), 0x058da000 },
1355 { _MMIO(0x9888), 0x1f85aa80 },
1356 { _MMIO(0x9888), 0x2185aaa2 },
1357 { _MMIO(0x9888), 0x2385002a },
1358 { _MMIO(0x9888), 0x01834000 },
1359 { _MMIO(0x9888), 0x0f834000 },
1360 { _MMIO(0x9888), 0x19835400 },
1361 { _MMIO(0x9888), 0x1b830155 },
1362 { _MMIO(0x9888), 0x03834000 },
1363 { _MMIO(0x9888), 0x05834000 },
1364 { _MMIO(0x9888), 0x07834000 },
1365 { _MMIO(0x9888), 0x09834000 },
1366 { _MMIO(0x9888), 0x0b834000 },
1367 { _MMIO(0x9888), 0x0184c000 },
1368 { _MMIO(0x9888), 0x07848000 },
1369 { _MMIO(0x9888), 0x0984c000 },
1370 { _MMIO(0x9888), 0x0b84c000 },
1371 { _MMIO(0x9888), 0x0d84c000 },
1372 { _MMIO(0x9888), 0x0f84c000 },
1373 { _MMIO(0x9888), 0x0384c000 },
1374 { _MMIO(0x9888), 0x0584c000 },
1375 { _MMIO(0x9888), 0x1180c000 },
1376 { _MMIO(0x9888), 0x17808000 },
1377 { _MMIO(0x9888), 0x1980c000 },
1378 { _MMIO(0x9888), 0x1b80c000 },
1379 { _MMIO(0x9888), 0x1d80c000 },
1380 { _MMIO(0x9888), 0x1f80c000 },
1381 { _MMIO(0x9888), 0x1380c000 },
1382 { _MMIO(0x9888), 0x1580c000 },
1383 { _MMIO(0xd24), 0x00000000 },
1384 { _MMIO(0x9888), 0x4d800000 },
1385 { _MMIO(0x9888), 0x3d800000 },
1386 { _MMIO(0x9888), 0x4f800000 },
1387 { _MMIO(0x9888), 0x43800000 },
1388 { _MMIO(0x9888), 0x51800000 },
1389 { _MMIO(0x9888), 0x45800000 },
1390 { _MMIO(0x9888), 0x53800000 },
1391 { _MMIO(0x9888), 0x47800420 },
1392 { _MMIO(0x9888), 0x21800000 },
1393 { _MMIO(0x9888), 0x31800000 },
1394 { _MMIO(0x9888), 0x3f800421 },
1395 { _MMIO(0x9888), 0x41800000 },
1396};
1397
1398static const struct i915_oa_reg mux_config_compute_extended_3_subslices_0x10[] = {
1399 { _MMIO(0x9888), 0x10dc00e0 },
1400 { _MMIO(0x9888), 0x14db0160 },
1401 { _MMIO(0x9888), 0x16db2800 },
1402 { _MMIO(0x9888), 0x18db0120 },
1403 { _MMIO(0x9888), 0x0edc25c1 },
1404 { _MMIO(0x9888), 0x00dc6100 },
1405 { _MMIO(0x9888), 0x02dc204c },
1406 { _MMIO(0x9888), 0x06dc8000 },
1407 { _MMIO(0x9888), 0x08dcc000 },
1408 { _MMIO(0x9888), 0x0adcc000 },
1409 { _MMIO(0x9888), 0x0cdcc000 },
1410 { _MMIO(0x9888), 0x04dcc000 },
1411 { _MMIO(0x9888), 0x00db0011 },
1412 { _MMIO(0x9888), 0x06db0900 },
1413 { _MMIO(0x9888), 0x08db0a13 },
1414 { _MMIO(0x9888), 0x0adb0b15 },
1415 { _MMIO(0x9888), 0x0cdb2317 },
1416 { _MMIO(0x9888), 0x04db21b7 },
1417 { _MMIO(0x9888), 0x10db0000 },
1418 { _MMIO(0x9888), 0x0edb0000 },
1419 { _MMIO(0x9888), 0x1adb0000 },
1420 { _MMIO(0x9888), 0x0c9fa800 },
1421 { _MMIO(0x9888), 0x0e9faa2a },
1422 { _MMIO(0x9888), 0x109f02aa },
1423 { _MMIO(0x9888), 0x00b84000 },
1424 { _MMIO(0x9888), 0x0eb84000 },
1425 { _MMIO(0x9888), 0x16b84000 },
1426 { _MMIO(0x9888), 0x18b81555 },
1427 { _MMIO(0x9888), 0x02b84000 },
1428 { _MMIO(0x9888), 0x04b84000 },
1429 { _MMIO(0x9888), 0x06b84000 },
1430 { _MMIO(0x9888), 0x08b84000 },
1431 { _MMIO(0x9888), 0x0ab84000 },
1432 { _MMIO(0x9888), 0x00b9a000 },
1433 { _MMIO(0x9888), 0x06b98000 },
1434 { _MMIO(0x9888), 0x08b9a000 },
1435 { _MMIO(0x9888), 0x0ab9a000 },
1436 { _MMIO(0x9888), 0x0cb9a000 },
1437 { _MMIO(0x9888), 0x0eb9a000 },
1438 { _MMIO(0x9888), 0x02b9a000 },
1439 { _MMIO(0x9888), 0x04b9a000 },
1440 { _MMIO(0x9888), 0x01888000 },
1441 { _MMIO(0x9888), 0x0d88f800 },
1442 { _MMIO(0x9888), 0x0f88000f },
1443 { _MMIO(0x9888), 0x03888000 },
1444 { _MMIO(0x9888), 0x05888000 },
1445 { _MMIO(0x9888), 0x07888000 },
1446 { _MMIO(0x9888), 0x09888000 },
1447 { _MMIO(0x9888), 0x0b888000 },
1448 { _MMIO(0x9888), 0x238b5540 },
1449 { _MMIO(0x9888), 0x258baaa2 },
1450 { _MMIO(0x9888), 0x278b002a },
1451 { _MMIO(0x9888), 0x018c4000 },
1452 { _MMIO(0x9888), 0x0f8c4000 },
1453 { _MMIO(0x9888), 0x178c2000 },
1454 { _MMIO(0x9888), 0x198c5500 },
1455 { _MMIO(0x9888), 0x1b8c0015 },
1456 { _MMIO(0x9888), 0x038c4000 },
1457 { _MMIO(0x9888), 0x058c4000 },
1458 { _MMIO(0x9888), 0x078c4000 },
1459 { _MMIO(0x9888), 0x098c4000 },
1460 { _MMIO(0x9888), 0x0b8c4000 },
1461 { _MMIO(0x9888), 0x018da000 },
1462 { _MMIO(0x9888), 0x078d8000 },
1463 { _MMIO(0x9888), 0x098da000 },
1464 { _MMIO(0x9888), 0x0b8da000 },
1465 { _MMIO(0x9888), 0x0d8da000 },
1466 { _MMIO(0x9888), 0x0f8da000 },
1467 { _MMIO(0x9888), 0x038da000 },
1468 { _MMIO(0x9888), 0x058da000 },
1469 { _MMIO(0x9888), 0x1f85aa80 },
1470 { _MMIO(0x9888), 0x2185aaa2 },
1471 { _MMIO(0x9888), 0x2385002a },
1472 { _MMIO(0x9888), 0x01834000 },
1473 { _MMIO(0x9888), 0x0f834000 },
1474 { _MMIO(0x9888), 0x19835400 },
1475 { _MMIO(0x9888), 0x1b830155 },
1476 { _MMIO(0x9888), 0x03834000 },
1477 { _MMIO(0x9888), 0x05834000 },
1478 { _MMIO(0x9888), 0x07834000 },
1479 { _MMIO(0x9888), 0x09834000 },
1480 { _MMIO(0x9888), 0x0b834000 },
1481 { _MMIO(0x9888), 0x0184c000 },
1482 { _MMIO(0x9888), 0x07848000 },
1483 { _MMIO(0x9888), 0x0984c000 },
1484 { _MMIO(0x9888), 0x0b84c000 },
1485 { _MMIO(0x9888), 0x0d84c000 },
1486 { _MMIO(0x9888), 0x0f84c000 },
1487 { _MMIO(0x9888), 0x0384c000 },
1488 { _MMIO(0x9888), 0x0584c000 },
1489 { _MMIO(0x9888), 0x1180c000 },
1490 { _MMIO(0x9888), 0x17808000 },
1491 { _MMIO(0x9888), 0x1980c000 },
1492 { _MMIO(0x9888), 0x1b80c000 },
1493 { _MMIO(0x9888), 0x1d80c000 },
1494 { _MMIO(0x9888), 0x1f80c000 },
1495 { _MMIO(0x9888), 0x1380c000 },
1496 { _MMIO(0x9888), 0x1580c000 },
1497 { _MMIO(0xd24), 0x00000000 },
1498 { _MMIO(0x9888), 0x4d800000 },
1499 { _MMIO(0x9888), 0x3d800000 },
1500 { _MMIO(0x9888), 0x4f800000 },
1501 { _MMIO(0x9888), 0x43800000 },
1502 { _MMIO(0x9888), 0x51800000 },
1503 { _MMIO(0x9888), 0x45800000 },
1504 { _MMIO(0x9888), 0x53800000 },
1505 { _MMIO(0x9888), 0x47800420 },
1506 { _MMIO(0x9888), 0x21800000 },
1507 { _MMIO(0x9888), 0x31800000 },
1508 { _MMIO(0x9888), 0x3f800421 },
1509 { _MMIO(0x9888), 0x41800000 },
1510};
1511
1512static const struct i915_oa_reg mux_config_compute_extended_5_subslices_0x20[] = {
1513 { _MMIO(0x9888), 0x10b800e0 },
1514 { _MMIO(0x9888), 0x14ba0160 },
1515 { _MMIO(0x9888), 0x16ba2800 },
1516 { _MMIO(0x9888), 0x18ba0120 },
1517 { _MMIO(0x9888), 0x0c9fa800 },
1518 { _MMIO(0x9888), 0x0e9faa2a },
1519 { _MMIO(0x9888), 0x109f02aa },
1520 { _MMIO(0x9888), 0x0eb8a5c1 },
1521 { _MMIO(0x9888), 0x00b8a100 },
1522 { _MMIO(0x9888), 0x02b8204c },
1523 { _MMIO(0x9888), 0x16b88000 },
1524 { _MMIO(0x9888), 0x18b802aa },
1525 { _MMIO(0x9888), 0x04b80000 },
1526 { _MMIO(0x9888), 0x06b80000 },
1527 { _MMIO(0x9888), 0x08b88000 },
1528 { _MMIO(0x9888), 0x0ab88000 },
1529 { _MMIO(0x9888), 0x00b9a000 },
1530 { _MMIO(0x9888), 0x06b98000 },
1531 { _MMIO(0x9888), 0x08b9a000 },
1532 { _MMIO(0x9888), 0x0ab9a000 },
1533 { _MMIO(0x9888), 0x0cb9a000 },
1534 { _MMIO(0x9888), 0x0eb9a000 },
1535 { _MMIO(0x9888), 0x02b9a000 },
1536 { _MMIO(0x9888), 0x04b9a000 },
1537 { _MMIO(0x9888), 0x00ba0011 },
1538 { _MMIO(0x9888), 0x06ba0900 },
1539 { _MMIO(0x9888), 0x08ba0a13 },
1540 { _MMIO(0x9888), 0x0aba0b15 },
1541 { _MMIO(0x9888), 0x0cba2317 },
1542 { _MMIO(0x9888), 0x04ba21b7 },
1543 { _MMIO(0x9888), 0x10ba0000 },
1544 { _MMIO(0x9888), 0x0eba0000 },
1545 { _MMIO(0x9888), 0x1aba0000 },
1546 { _MMIO(0x9888), 0x01888000 },
1547 { _MMIO(0x9888), 0x0d88f800 },
1548 { _MMIO(0x9888), 0x0f88000f },
1549 { _MMIO(0x9888), 0x03888000 },
1550 { _MMIO(0x9888), 0x05888000 },
1551 { _MMIO(0x9888), 0x07888000 },
1552 { _MMIO(0x9888), 0x09888000 },
1553 { _MMIO(0x9888), 0x0b888000 },
1554 { _MMIO(0x9888), 0x238b5540 },
1555 { _MMIO(0x9888), 0x258baaa2 },
1556 { _MMIO(0x9888), 0x278b002a },
1557 { _MMIO(0x9888), 0x018c4000 },
1558 { _MMIO(0x9888), 0x0f8c4000 },
1559 { _MMIO(0x9888), 0x178c2000 },
1560 { _MMIO(0x9888), 0x198c5500 },
1561 { _MMIO(0x9888), 0x1b8c0015 },
1562 { _MMIO(0x9888), 0x038c4000 },
1563 { _MMIO(0x9888), 0x058c4000 },
1564 { _MMIO(0x9888), 0x078c4000 },
1565 { _MMIO(0x9888), 0x098c4000 },
1566 { _MMIO(0x9888), 0x0b8c4000 },
1567 { _MMIO(0x9888), 0x018da000 },
1568 { _MMIO(0x9888), 0x078d8000 },
1569 { _MMIO(0x9888), 0x098da000 },
1570 { _MMIO(0x9888), 0x0b8da000 },
1571 { _MMIO(0x9888), 0x0d8da000 },
1572 { _MMIO(0x9888), 0x0f8da000 },
1573 { _MMIO(0x9888), 0x038da000 },
1574 { _MMIO(0x9888), 0x058da000 },
1575 { _MMIO(0x9888), 0x1f85aa80 },
1576 { _MMIO(0x9888), 0x2185aaa2 },
1577 { _MMIO(0x9888), 0x2385002a },
1578 { _MMIO(0x9888), 0x01834000 },
1579 { _MMIO(0x9888), 0x0f834000 },
1580 { _MMIO(0x9888), 0x19835400 },
1581 { _MMIO(0x9888), 0x1b830155 },
1582 { _MMIO(0x9888), 0x03834000 },
1583 { _MMIO(0x9888), 0x05834000 },
1584 { _MMIO(0x9888), 0x07834000 },
1585 { _MMIO(0x9888), 0x09834000 },
1586 { _MMIO(0x9888), 0x0b834000 },
1587 { _MMIO(0x9888), 0x0184c000 },
1588 { _MMIO(0x9888), 0x07848000 },
1589 { _MMIO(0x9888), 0x0984c000 },
1590 { _MMIO(0x9888), 0x0b84c000 },
1591 { _MMIO(0x9888), 0x0d84c000 },
1592 { _MMIO(0x9888), 0x0f84c000 },
1593 { _MMIO(0x9888), 0x0384c000 },
1594 { _MMIO(0x9888), 0x0584c000 },
1595 { _MMIO(0x9888), 0x1180c000 },
1596 { _MMIO(0x9888), 0x17808000 },
1597 { _MMIO(0x9888), 0x1980c000 },
1598 { _MMIO(0x9888), 0x1b80c000 },
1599 { _MMIO(0x9888), 0x1d80c000 },
1600 { _MMIO(0x9888), 0x1f80c000 },
1601 { _MMIO(0x9888), 0x1380c000 },
1602 { _MMIO(0x9888), 0x1580c000 },
1603 { _MMIO(0xd24), 0x00000000 },
1604 { _MMIO(0x9888), 0x4d800000 },
1605 { _MMIO(0x9888), 0x3d800000 },
1606 { _MMIO(0x9888), 0x4f800000 },
1607 { _MMIO(0x9888), 0x43800000 },
1608 { _MMIO(0x9888), 0x51800000 },
1609 { _MMIO(0x9888), 0x45800000 },
1610 { _MMIO(0x9888), 0x53800000 },
1611 { _MMIO(0x9888), 0x47800420 },
1612 { _MMIO(0x9888), 0x21800000 },
1613 { _MMIO(0x9888), 0x31800000 },
1614 { _MMIO(0x9888), 0x3f800421 },
1615 { _MMIO(0x9888), 0x41800000 },
1616};
1617
1618static int
1619get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
1620 const struct i915_oa_reg **regs,
1621 int *lens)
1622{
1623 int n = 0;
1624
1625 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 6);
1626 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 6);
1627
1628 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
1629 regs[n] = mux_config_compute_extended_0_subslices_0x01;
1630 lens[n] = ARRAY_SIZE(mux_config_compute_extended_0_subslices_0x01);
1631 n++;
1632 }
1633 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x08) {
1634 regs[n] = mux_config_compute_extended_1_subslices_0x08;
1635 lens[n] = ARRAY_SIZE(mux_config_compute_extended_1_subslices_0x08);
1636 n++;
1637 }
1638 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x02) {
1639 regs[n] = mux_config_compute_extended_2_subslices_0x02;
1640 lens[n] = ARRAY_SIZE(mux_config_compute_extended_2_subslices_0x02);
1641 n++;
1642 }
1643 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x10) {
1644 regs[n] = mux_config_compute_extended_3_subslices_0x10;
1645 lens[n] = ARRAY_SIZE(mux_config_compute_extended_3_subslices_0x10);
1646 n++;
1647 }
1648 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x04) {
1649 regs[n] = mux_config_compute_extended_4_subslices_0x04;
1650 lens[n] = ARRAY_SIZE(mux_config_compute_extended_4_subslices_0x04);
1651 n++;
1652 }
1653 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x20) {
1654 regs[n] = mux_config_compute_extended_5_subslices_0x20;
1655 lens[n] = ARRAY_SIZE(mux_config_compute_extended_5_subslices_0x20);
1656 n++;
1657 }
1658
1659 return n;
1660}
1661
1662static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
1663 { _MMIO(0x2710), 0x00000000 },
1664 { _MMIO(0x2714), 0x30800000 },
1665 { _MMIO(0x2720), 0x00000000 },
1666 { _MMIO(0x2724), 0x30800000 },
1667 { _MMIO(0x2740), 0x00000000 },
1668 { _MMIO(0x2770), 0x0007fffa },
1669 { _MMIO(0x2774), 0x0000fefe },
1670 { _MMIO(0x2778), 0x0007fffa },
1671 { _MMIO(0x277c), 0x0000fefd },
1672 { _MMIO(0x2790), 0x0007fffa },
1673 { _MMIO(0x2794), 0x0000fbef },
1674 { _MMIO(0x2798), 0x0007fffa },
1675 { _MMIO(0x279c), 0x0000fbdf },
1676};
1677
1678static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
1679 { _MMIO(0xe458), 0x00005004 },
1680 { _MMIO(0xe558), 0x00000003 },
1681 { _MMIO(0xe658), 0x00002001 },
1682 { _MMIO(0xe758), 0x00101100 },
1683 { _MMIO(0xe45c), 0x00201200 },
1684 { _MMIO(0xe55c), 0x00301300 },
1685 { _MMIO(0xe65c), 0x00401400 },
1686};
1687
1688static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
1689 { _MMIO(0x9888), 0x143f00b3 },
1690 { _MMIO(0x9888), 0x14bf00b3 },
1691 { _MMIO(0x9888), 0x138303c0 },
1692 { _MMIO(0x9888), 0x3b800060 },
1693 { _MMIO(0x9888), 0x3d800805 },
1694 { _MMIO(0x9888), 0x003f0029 },
1695 { _MMIO(0x9888), 0x063f1400 },
1696 { _MMIO(0x9888), 0x083f1225 },
1697 { _MMIO(0x9888), 0x0e3f1327 },
1698 { _MMIO(0x9888), 0x103f0000 },
1699 { _MMIO(0x9888), 0x005a4000 },
1700 { _MMIO(0x9888), 0x065a8000 },
1701 { _MMIO(0x9888), 0x085ac000 },
1702 { _MMIO(0x9888), 0x0e5ac000 },
1703 { _MMIO(0x9888), 0x001d4000 },
1704 { _MMIO(0x9888), 0x061d8000 },
1705 { _MMIO(0x9888), 0x081dc000 },
1706 { _MMIO(0x9888), 0x0e1dc000 },
1707 { _MMIO(0x9888), 0x0c1f0800 },
1708 { _MMIO(0x9888), 0x0e1f2a00 },
1709 { _MMIO(0x9888), 0x101f0280 },
1710 { _MMIO(0x9888), 0x00391000 },
1711 { _MMIO(0x9888), 0x06394000 },
1712 { _MMIO(0x9888), 0x08395000 },
1713 { _MMIO(0x9888), 0x0e395000 },
1714 { _MMIO(0x9888), 0x0abf1429 },
1715 { _MMIO(0x9888), 0x0cbf1225 },
1716 { _MMIO(0x9888), 0x00bf1380 },
1717 { _MMIO(0x9888), 0x02bf0026 },
1718 { _MMIO(0x9888), 0x10bf0000 },
1719 { _MMIO(0x9888), 0x0adac000 },
1720 { _MMIO(0x9888), 0x0cdac000 },
1721 { _MMIO(0x9888), 0x00da8000 },
1722 { _MMIO(0x9888), 0x02da4000 },
1723 { _MMIO(0x9888), 0x0a9dc000 },
1724 { _MMIO(0x9888), 0x0c9dc000 },
1725 { _MMIO(0x9888), 0x009d8000 },
1726 { _MMIO(0x9888), 0x029d4000 },
1727 { _MMIO(0x9888), 0x0e9f8000 },
1728 { _MMIO(0x9888), 0x109f002a },
1729 { _MMIO(0x9888), 0x0c9fa000 },
1730 { _MMIO(0x9888), 0x0ab95000 },
1731 { _MMIO(0x9888), 0x0cb95000 },
1732 { _MMIO(0x9888), 0x00b94000 },
1733 { _MMIO(0x9888), 0x02b91000 },
1734 { _MMIO(0x9888), 0x0d88c000 },
1735 { _MMIO(0x9888), 0x0f880003 },
1736 { _MMIO(0x9888), 0x03888000 },
1737 { _MMIO(0x9888), 0x05888000 },
1738 { _MMIO(0x9888), 0x018a8000 },
1739 { _MMIO(0x9888), 0x0f8a8000 },
1740 { _MMIO(0x9888), 0x198a8000 },
1741 { _MMIO(0x9888), 0x1b8a8020 },
1742 { _MMIO(0x9888), 0x1d8a0002 },
1743 { _MMIO(0x9888), 0x238b0520 },
1744 { _MMIO(0x9888), 0x258ba950 },
1745 { _MMIO(0x9888), 0x278b0016 },
1746 { _MMIO(0x9888), 0x198c5400 },
1747 { _MMIO(0x9888), 0x1b8c0001 },
1748 { _MMIO(0x9888), 0x038c4000 },
1749 { _MMIO(0x9888), 0x058c4000 },
1750 { _MMIO(0x9888), 0x0b8da000 },
1751 { _MMIO(0x9888), 0x0d8da000 },
1752 { _MMIO(0x9888), 0x018d8000 },
1753 { _MMIO(0x9888), 0x038d2000 },
1754 { _MMIO(0x9888), 0x1f85aa80 },
1755 { _MMIO(0x9888), 0x2185aaa0 },
1756 { _MMIO(0x9888), 0x2385002a },
1757 { _MMIO(0x9888), 0x03835180 },
1758 { _MMIO(0x9888), 0x05834022 },
1759 { _MMIO(0x9888), 0x11830000 },
1760 { _MMIO(0x9888), 0x01834000 },
1761 { _MMIO(0x9888), 0x0f834000 },
1762 { _MMIO(0x9888), 0x19835400 },
1763 { _MMIO(0x9888), 0x1b830155 },
1764 { _MMIO(0x9888), 0x07830000 },
1765 { _MMIO(0x9888), 0x09830000 },
1766 { _MMIO(0x9888), 0x0184c000 },
1767 { _MMIO(0x9888), 0x07848000 },
1768 { _MMIO(0x9888), 0x0984c000 },
1769 { _MMIO(0x9888), 0x0b84c000 },
1770 { _MMIO(0x9888), 0x0d84c000 },
1771 { _MMIO(0x9888), 0x0f84c000 },
1772 { _MMIO(0x9888), 0x0384c000 },
1773 { _MMIO(0x9888), 0x05844000 },
1774 { _MMIO(0x9888), 0x1b80c137 },
1775 { _MMIO(0x9888), 0x1d80c147 },
1776 { _MMIO(0x9888), 0x21800000 },
1777 { _MMIO(0x9888), 0x1180c000 },
1778 { _MMIO(0x9888), 0x17808000 },
1779 { _MMIO(0x9888), 0x1980c000 },
1780 { _MMIO(0x9888), 0x1f80c000 },
1781 { _MMIO(0x9888), 0x1380c000 },
1782 { _MMIO(0x9888), 0x15804000 },
1783 { _MMIO(0xd24), 0x00000000 },
1784 { _MMIO(0x9888), 0x4d801000 },
1785 { _MMIO(0x9888), 0x4f800111 },
1786 { _MMIO(0x9888), 0x43800842 },
1787 { _MMIO(0x9888), 0x51800000 },
1788 { _MMIO(0x9888), 0x45800000 },
1789 { _MMIO(0x9888), 0x53800000 },
1790 { _MMIO(0x9888), 0x47800840 },
1791 { _MMIO(0x9888), 0x31800000 },
1792 { _MMIO(0x9888), 0x3f800800 },
1793 { _MMIO(0x9888), 0x418014a2 },
1794};
1795
1796static int
1797get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
1798 const struct i915_oa_reg **regs,
1799 int *lens)
1800{
1801 int n = 0;
1802
1803 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1804 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1805
1806 regs[n] = mux_config_compute_l3_cache;
1807 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
1808 n++;
1809
1810 return n;
1811}
1812
1813static const struct i915_oa_reg b_counter_config_data_port_reads_coalescing[] = {
1814 { _MMIO(0x2724), 0xf0800000 },
1815 { _MMIO(0x2720), 0x00000000 },
1816 { _MMIO(0x2714), 0xf0800000 },
1817 { _MMIO(0x2710), 0x00000000 },
1818 { _MMIO(0x274c), 0xba98ba98 },
1819 { _MMIO(0x2748), 0xba98ba98 },
1820 { _MMIO(0x2744), 0x00003377 },
1821 { _MMIO(0x2740), 0x00000000 },
1822 { _MMIO(0x2770), 0x0007fff2 },
1823 { _MMIO(0x2774), 0x00007ff0 },
1824 { _MMIO(0x2778), 0x0007ffe2 },
1825 { _MMIO(0x277c), 0x00007ff0 },
1826 { _MMIO(0x2780), 0x0007ffc2 },
1827 { _MMIO(0x2784), 0x00007ff0 },
1828 { _MMIO(0x2788), 0x0007ff82 },
1829 { _MMIO(0x278c), 0x00007ff0 },
1830 { _MMIO(0x2790), 0x0007fffa },
1831 { _MMIO(0x2794), 0x0000bfef },
1832 { _MMIO(0x2798), 0x0007fffa },
1833 { _MMIO(0x279c), 0x0000bfdf },
1834 { _MMIO(0x27a0), 0x0007fffa },
1835 { _MMIO(0x27a4), 0x0000bfbf },
1836 { _MMIO(0x27a8), 0x0007fffa },
1837 { _MMIO(0x27ac), 0x0000bf7f },
1838};
1839
1840static const struct i915_oa_reg flex_eu_config_data_port_reads_coalescing[] = {
1841 { _MMIO(0xe458), 0x00005004 },
1842 { _MMIO(0xe558), 0x00000003 },
1843 { _MMIO(0xe658), 0x00002001 },
1844 { _MMIO(0xe758), 0x00778008 },
1845 { _MMIO(0xe45c), 0x00088078 },
1846 { _MMIO(0xe55c), 0x00808708 },
1847 { _MMIO(0xe65c), 0x00a08908 },
1848};
1849
1850static const struct i915_oa_reg mux_config_data_port_reads_coalescing_0_subslices_0x01[] = {
1851 { _MMIO(0x9888), 0x103d0005 },
1852 { _MMIO(0x9888), 0x163d240b },
1853 { _MMIO(0x9888), 0x1058022f },
1854 { _MMIO(0x9888), 0x185b5520 },
1855 { _MMIO(0x9888), 0x198b0003 },
1856 { _MMIO(0x9888), 0x005cc000 },
1857 { _MMIO(0x9888), 0x065cc000 },
1858 { _MMIO(0x9888), 0x085cc000 },
1859 { _MMIO(0x9888), 0x0a5cc000 },
1860 { _MMIO(0x9888), 0x0c5cc000 },
1861 { _MMIO(0x9888), 0x0e5cc000 },
1862 { _MMIO(0x9888), 0x025c4000 },
1863 { _MMIO(0x9888), 0x045c8000 },
1864 { _MMIO(0x9888), 0x003d0000 },
1865 { _MMIO(0x9888), 0x063d00b0 },
1866 { _MMIO(0x9888), 0x083d0182 },
1867 { _MMIO(0x9888), 0x0a3d10a0 },
1868 { _MMIO(0x9888), 0x0c3d11a2 },
1869 { _MMIO(0x9888), 0x0e3d0000 },
1870 { _MMIO(0x9888), 0x183d0000 },
1871 { _MMIO(0x9888), 0x1a3d0000 },
1872 { _MMIO(0x9888), 0x0e582242 },
1873 { _MMIO(0x9888), 0x00586700 },
1874 { _MMIO(0x9888), 0x0258004f },
1875 { _MMIO(0x9888), 0x0658c000 },
1876 { _MMIO(0x9888), 0x0858c000 },
1877 { _MMIO(0x9888), 0x0a58c000 },
1878 { _MMIO(0x9888), 0x0c58c000 },
1879 { _MMIO(0x9888), 0x045b6300 },
1880 { _MMIO(0x9888), 0x105b0000 },
1881 { _MMIO(0x9888), 0x005b4000 },
1882 { _MMIO(0x9888), 0x0e5b4000 },
1883 { _MMIO(0x9888), 0x1a5b0155 },
1884 { _MMIO(0x9888), 0x025b4000 },
1885 { _MMIO(0x9888), 0x0a5b0000 },
1886 { _MMIO(0x9888), 0x0c5b4000 },
1887 { _MMIO(0x9888), 0x0c1fa800 },
1888 { _MMIO(0x9888), 0x0e1faaa0 },
1889 { _MMIO(0x9888), 0x101f02aa },
1890 { _MMIO(0x9888), 0x00384000 },
1891 { _MMIO(0x9888), 0x0e384000 },
1892 { _MMIO(0x9888), 0x16384000 },
1893 { _MMIO(0x9888), 0x18381555 },
1894 { _MMIO(0x9888), 0x02384000 },
1895 { _MMIO(0x9888), 0x04384000 },
1896 { _MMIO(0x9888), 0x0a384000 },
1897 { _MMIO(0x9888), 0x0c384000 },
1898 { _MMIO(0x9888), 0x0039a000 },
1899 { _MMIO(0x9888), 0x0639a000 },
1900 { _MMIO(0x9888), 0x0839a000 },
1901 { _MMIO(0x9888), 0x0a39a000 },
1902 { _MMIO(0x9888), 0x0c39a000 },
1903 { _MMIO(0x9888), 0x0e39a000 },
1904 { _MMIO(0x9888), 0x02392000 },
1905 { _MMIO(0x9888), 0x04398000 },
1906 { _MMIO(0x9888), 0x018a8000 },
1907 { _MMIO(0x9888), 0x0f8a8000 },
1908 { _MMIO(0x9888), 0x198a8000 },
1909 { _MMIO(0x9888), 0x1b8aaaa0 },
1910 { _MMIO(0x9888), 0x1d8a0002 },
1911 { _MMIO(0x9888), 0x038a8000 },
1912 { _MMIO(0x9888), 0x058a8000 },
1913 { _MMIO(0x9888), 0x0b8a8000 },
1914 { _MMIO(0x9888), 0x0d8a8000 },
1915 { _MMIO(0x9888), 0x038b6300 },
1916 { _MMIO(0x9888), 0x058b0062 },
1917 { _MMIO(0x9888), 0x118b0000 },
1918 { _MMIO(0x9888), 0x238b02a0 },
1919 { _MMIO(0x9888), 0x258b5555 },
1920 { _MMIO(0x9888), 0x278b0015 },
1921 { _MMIO(0x9888), 0x1f85aa80 },
1922 { _MMIO(0x9888), 0x2185aaaa },
1923 { _MMIO(0x9888), 0x2385002a },
1924 { _MMIO(0x9888), 0x01834000 },
1925 { _MMIO(0x9888), 0x0f834000 },
1926 { _MMIO(0x9888), 0x19835400 },
1927 { _MMIO(0x9888), 0x1b830155 },
1928 { _MMIO(0x9888), 0x03834000 },
1929 { _MMIO(0x9888), 0x05834000 },
1930 { _MMIO(0x9888), 0x07834000 },
1931 { _MMIO(0x9888), 0x09834000 },
1932 { _MMIO(0x9888), 0x0b834000 },
1933 { _MMIO(0x9888), 0x0d834000 },
1934 { _MMIO(0x9888), 0x0184c000 },
1935 { _MMIO(0x9888), 0x0784c000 },
1936 { _MMIO(0x9888), 0x0984c000 },
1937 { _MMIO(0x9888), 0x0b84c000 },
1938 { _MMIO(0x9888), 0x0d84c000 },
1939 { _MMIO(0x9888), 0x0f84c000 },
1940 { _MMIO(0x9888), 0x0384c000 },
1941 { _MMIO(0x9888), 0x0584c000 },
1942 { _MMIO(0x9888), 0x1180c000 },
1943 { _MMIO(0x9888), 0x1780c000 },
1944 { _MMIO(0x9888), 0x1980c000 },
1945 { _MMIO(0x9888), 0x1b80c000 },
1946 { _MMIO(0x9888), 0x1d80c000 },
1947 { _MMIO(0x9888), 0x1f80c000 },
1948 { _MMIO(0x9888), 0x1380c000 },
1949 { _MMIO(0x9888), 0x1580c000 },
1950 { _MMIO(0xd24), 0x00000000 },
1951 { _MMIO(0x9888), 0x4d801000 },
1952 { _MMIO(0x9888), 0x3d800000 },
1953 { _MMIO(0x9888), 0x4f800001 },
1954 { _MMIO(0x9888), 0x43800000 },
1955 { _MMIO(0x9888), 0x51800000 },
1956 { _MMIO(0x9888), 0x45800000 },
1957 { _MMIO(0x9888), 0x53800000 },
1958 { _MMIO(0x9888), 0x47800420 },
1959 { _MMIO(0x9888), 0x21800000 },
1960 { _MMIO(0x9888), 0x31800000 },
1961 { _MMIO(0x9888), 0x3f800421 },
1962 { _MMIO(0x9888), 0x41800041 },
1963};
1964
1965static int
1966get_data_port_reads_coalescing_mux_config(struct drm_i915_private *dev_priv,
1967 const struct i915_oa_reg **regs,
1968 int *lens)
1969{
1970 int n = 0;
1971
1972 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1973 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1974
1975 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
1976 regs[n] = mux_config_data_port_reads_coalescing_0_subslices_0x01;
1977 lens[n] = ARRAY_SIZE(mux_config_data_port_reads_coalescing_0_subslices_0x01);
1978 n++;
1979 }
1980
1981 return n;
1982}
1983
1984static const struct i915_oa_reg b_counter_config_data_port_writes_coalescing[] = {
1985 { _MMIO(0x2724), 0xf0800000 },
1986 { _MMIO(0x2720), 0x00000000 },
1987 { _MMIO(0x2714), 0xf0800000 },
1988 { _MMIO(0x2710), 0x00000000 },
1989 { _MMIO(0x274c), 0xba98ba98 },
1990 { _MMIO(0x2748), 0xba98ba98 },
1991 { _MMIO(0x2744), 0x00003377 },
1992 { _MMIO(0x2740), 0x00000000 },
1993 { _MMIO(0x2770), 0x0007ff72 },
1994 { _MMIO(0x2774), 0x0000bfd0 },
1995 { _MMIO(0x2778), 0x0007ff62 },
1996 { _MMIO(0x277c), 0x0000bfd0 },
1997 { _MMIO(0x2780), 0x0007ff42 },
1998 { _MMIO(0x2784), 0x0000bfd0 },
1999 { _MMIO(0x2788), 0x0007ff02 },
2000 { _MMIO(0x278c), 0x0000bfd0 },
2001 { _MMIO(0x2790), 0x0005fff2 },
2002 { _MMIO(0x2794), 0x0000bfd0 },
2003 { _MMIO(0x2798), 0x0005ffe2 },
2004 { _MMIO(0x279c), 0x0000bfd0 },
2005 { _MMIO(0x27a0), 0x0005ffc2 },
2006 { _MMIO(0x27a4), 0x0000bfd0 },
2007 { _MMIO(0x27a8), 0x0005ff82 },
2008 { _MMIO(0x27ac), 0x0000bfd0 },
2009};
2010
2011static const struct i915_oa_reg flex_eu_config_data_port_writes_coalescing[] = {
2012 { _MMIO(0xe458), 0x00005004 },
2013 { _MMIO(0xe558), 0x00000003 },
2014 { _MMIO(0xe658), 0x00002001 },
2015 { _MMIO(0xe758), 0x00778008 },
2016 { _MMIO(0xe45c), 0x00088078 },
2017 { _MMIO(0xe55c), 0x00808708 },
2018 { _MMIO(0xe65c), 0x00a08908 },
2019};
2020
2021static const struct i915_oa_reg mux_config_data_port_writes_coalescing_0_subslices_0x01[] = {
2022 { _MMIO(0x9888), 0x103d0005 },
2023 { _MMIO(0x9888), 0x143d0120 },
2024 { _MMIO(0x9888), 0x163d2400 },
2025 { _MMIO(0x9888), 0x1058022f },
2026 { _MMIO(0x9888), 0x105b0000 },
2027 { _MMIO(0x9888), 0x198b0003 },
2028 { _MMIO(0x9888), 0x005cc000 },
2029 { _MMIO(0x9888), 0x065cc000 },
2030 { _MMIO(0x9888), 0x085cc000 },
2031 { _MMIO(0x9888), 0x0a5cc000 },
2032 { _MMIO(0x9888), 0x0e5cc000 },
2033 { _MMIO(0x9888), 0x025c4000 },
2034 { _MMIO(0x9888), 0x045c8000 },
2035 { _MMIO(0x9888), 0x003d0000 },
2036 { _MMIO(0x9888), 0x063d0094 },
2037 { _MMIO(0x9888), 0x083d0182 },
2038 { _MMIO(0x9888), 0x0a3d1814 },
2039 { _MMIO(0x9888), 0x0e3d0000 },
2040 { _MMIO(0x9888), 0x183d0000 },
2041 { _MMIO(0x9888), 0x1a3d0000 },
2042 { _MMIO(0x9888), 0x0c3d0000 },
2043 { _MMIO(0x9888), 0x0e582242 },
2044 { _MMIO(0x9888), 0x00586700 },
2045 { _MMIO(0x9888), 0x0258004f },
2046 { _MMIO(0x9888), 0x0658c000 },
2047 { _MMIO(0x9888), 0x0858c000 },
2048 { _MMIO(0x9888), 0x0a58c000 },
2049 { _MMIO(0x9888), 0x045b6a80 },
2050 { _MMIO(0x9888), 0x005b4000 },
2051 { _MMIO(0x9888), 0x0e5b4000 },
2052 { _MMIO(0x9888), 0x185b5400 },
2053 { _MMIO(0x9888), 0x1a5b0141 },
2054 { _MMIO(0x9888), 0x025b4000 },
2055 { _MMIO(0x9888), 0x0a5b0000 },
2056 { _MMIO(0x9888), 0x0c5b4000 },
2057 { _MMIO(0x9888), 0x0c1fa800 },
2058 { _MMIO(0x9888), 0x0e1faaa0 },
2059 { _MMIO(0x9888), 0x101f0282 },
2060 { _MMIO(0x9888), 0x00384000 },
2061 { _MMIO(0x9888), 0x0e384000 },
2062 { _MMIO(0x9888), 0x16384000 },
2063 { _MMIO(0x9888), 0x18381415 },
2064 { _MMIO(0x9888), 0x02384000 },
2065 { _MMIO(0x9888), 0x04384000 },
2066 { _MMIO(0x9888), 0x0a384000 },
2067 { _MMIO(0x9888), 0x0c384000 },
2068 { _MMIO(0x9888), 0x0039a000 },
2069 { _MMIO(0x9888), 0x0639a000 },
2070 { _MMIO(0x9888), 0x0839a000 },
2071 { _MMIO(0x9888), 0x0a39a000 },
2072 { _MMIO(0x9888), 0x0e39a000 },
2073 { _MMIO(0x9888), 0x02392000 },
2074 { _MMIO(0x9888), 0x04398000 },
2075 { _MMIO(0x9888), 0x018a8000 },
2076 { _MMIO(0x9888), 0x0f8a8000 },
2077 { _MMIO(0x9888), 0x198a8000 },
2078 { _MMIO(0x9888), 0x1b8a82a0 },
2079 { _MMIO(0x9888), 0x1d8a0002 },
2080 { _MMIO(0x9888), 0x038a8000 },
2081 { _MMIO(0x9888), 0x058a8000 },
2082 { _MMIO(0x9888), 0x0b8a8000 },
2083 { _MMIO(0x9888), 0x0d8a8000 },
2084 { _MMIO(0x9888), 0x038b6300 },
2085 { _MMIO(0x9888), 0x058b0062 },
2086 { _MMIO(0x9888), 0x118b0000 },
2087 { _MMIO(0x9888), 0x238b02a0 },
2088 { _MMIO(0x9888), 0x258b1555 },
2089 { _MMIO(0x9888), 0x278b0014 },
2090 { _MMIO(0x9888), 0x1f85aa80 },
2091 { _MMIO(0x9888), 0x21852aaa },
2092 { _MMIO(0x9888), 0x23850028 },
2093 { _MMIO(0x9888), 0x01834000 },
2094 { _MMIO(0x9888), 0x0f834000 },
2095 { _MMIO(0x9888), 0x19835400 },
2096 { _MMIO(0x9888), 0x1b830141 },
2097 { _MMIO(0x9888), 0x03834000 },
2098 { _MMIO(0x9888), 0x05834000 },
2099 { _MMIO(0x9888), 0x07834000 },
2100 { _MMIO(0x9888), 0x09834000 },
2101 { _MMIO(0x9888), 0x0b834000 },
2102 { _MMIO(0x9888), 0x0d834000 },
2103 { _MMIO(0x9888), 0x0184c000 },
2104 { _MMIO(0x9888), 0x0784c000 },
2105 { _MMIO(0x9888), 0x0984c000 },
2106 { _MMIO(0x9888), 0x0b84c000 },
2107 { _MMIO(0x9888), 0x0f84c000 },
2108 { _MMIO(0x9888), 0x0384c000 },
2109 { _MMIO(0x9888), 0x0584c000 },
2110 { _MMIO(0x9888), 0x1180c000 },
2111 { _MMIO(0x9888), 0x1780c000 },
2112 { _MMIO(0x9888), 0x1980c000 },
2113 { _MMIO(0x9888), 0x1b80c000 },
2114 { _MMIO(0x9888), 0x1f80c000 },
2115 { _MMIO(0x9888), 0x1380c000 },
2116 { _MMIO(0x9888), 0x1580c000 },
2117 { _MMIO(0xd24), 0x00000000 },
2118 { _MMIO(0x9888), 0x4d801000 },
2119 { _MMIO(0x9888), 0x3d800000 },
2120 { _MMIO(0x9888), 0x4f800001 },
2121 { _MMIO(0x9888), 0x43800000 },
2122 { _MMIO(0x9888), 0x51800000 },
2123 { _MMIO(0x9888), 0x45800000 },
2124 { _MMIO(0x9888), 0x21800000 },
2125 { _MMIO(0x9888), 0x31800000 },
2126 { _MMIO(0x9888), 0x53800000 },
2127 { _MMIO(0x9888), 0x47800420 },
2128 { _MMIO(0x9888), 0x3f800421 },
2129 { _MMIO(0x9888), 0x41800041 },
2130};
2131
2132static int
2133get_data_port_writes_coalescing_mux_config(struct drm_i915_private *dev_priv,
2134 const struct i915_oa_reg **regs,
2135 int *lens)
2136{
2137 int n = 0;
2138
2139 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2140 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2141
2142 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
2143 regs[n] = mux_config_data_port_writes_coalescing_0_subslices_0x01;
2144 lens[n] = ARRAY_SIZE(mux_config_data_port_writes_coalescing_0_subslices_0x01);
2145 n++;
2146 }
2147
2148 return n;
2149}
2150
2151static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
2152 { _MMIO(0x2740), 0x00000000 },
2153 { _MMIO(0x2744), 0x00800000 },
2154 { _MMIO(0x2710), 0x00000000 },
2155 { _MMIO(0x2714), 0x10800000 },
2156 { _MMIO(0x2720), 0x00000000 },
2157 { _MMIO(0x2724), 0x00800000 },
2158 { _MMIO(0x2770), 0x00000002 },
2159 { _MMIO(0x2774), 0x0000fff7 },
2160};
2161
2162static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
2163 { _MMIO(0xe458), 0x00005004 },
2164 { _MMIO(0xe558), 0x00010003 },
2165 { _MMIO(0xe658), 0x00012011 },
2166 { _MMIO(0xe758), 0x00015014 },
2167 { _MMIO(0xe45c), 0x00051050 },
2168 { _MMIO(0xe55c), 0x00053052 },
2169 { _MMIO(0xe65c), 0x00055054 },
2170};
2171
2172static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
2173 { _MMIO(0x9888), 0x105c0232 },
2174 { _MMIO(0x9888), 0x10580232 },
2175 { _MMIO(0x9888), 0x10380232 },
2176 { _MMIO(0x9888), 0x10dc0232 },
2177 { _MMIO(0x9888), 0x10d80232 },
2178 { _MMIO(0x9888), 0x10b80232 },
2179 { _MMIO(0x9888), 0x118e4400 },
2180 { _MMIO(0x9888), 0x025c6080 },
2181 { _MMIO(0x9888), 0x045c004b },
2182 { _MMIO(0x9888), 0x005c8000 },
2183 { _MMIO(0x9888), 0x00582080 },
2184 { _MMIO(0x9888), 0x0258004b },
2185 { _MMIO(0x9888), 0x025b4000 },
2186 { _MMIO(0x9888), 0x045b4000 },
2187 { _MMIO(0x9888), 0x0c1fa000 },
2188 { _MMIO(0x9888), 0x0e1f00aa },
2189 { _MMIO(0x9888), 0x04386080 },
2190 { _MMIO(0x9888), 0x0638404b },
2191 { _MMIO(0x9888), 0x02384000 },
2192 { _MMIO(0x9888), 0x08384000 },
2193 { _MMIO(0x9888), 0x0a380000 },
2194 { _MMIO(0x9888), 0x0c380000 },
2195 { _MMIO(0x9888), 0x00398000 },
2196 { _MMIO(0x9888), 0x0239a000 },
2197 { _MMIO(0x9888), 0x0439a000 },
2198 { _MMIO(0x9888), 0x06392000 },
2199 { _MMIO(0x9888), 0x0cdc25c1 },
2200 { _MMIO(0x9888), 0x0adcc000 },
2201 { _MMIO(0x9888), 0x0ad825c1 },
2202 { _MMIO(0x9888), 0x18db4000 },
2203 { _MMIO(0x9888), 0x1adb0001 },
2204 { _MMIO(0x9888), 0x0e9f8000 },
2205 { _MMIO(0x9888), 0x109f02aa },
2206 { _MMIO(0x9888), 0x0eb825c1 },
2207 { _MMIO(0x9888), 0x18b80154 },
2208 { _MMIO(0x9888), 0x0ab9a000 },
2209 { _MMIO(0x9888), 0x0cb9a000 },
2210 { _MMIO(0x9888), 0x0eb9a000 },
2211 { _MMIO(0x9888), 0x0d88c000 },
2212 { _MMIO(0x9888), 0x0f88000f },
2213 { _MMIO(0x9888), 0x038a8000 },
2214 { _MMIO(0x9888), 0x058a8000 },
2215 { _MMIO(0x9888), 0x078a8000 },
2216 { _MMIO(0x9888), 0x098a8000 },
2217 { _MMIO(0x9888), 0x0b8a8000 },
2218 { _MMIO(0x9888), 0x0d8a8000 },
2219 { _MMIO(0x9888), 0x258baa05 },
2220 { _MMIO(0x9888), 0x278b002a },
2221 { _MMIO(0x9888), 0x238b2a80 },
2222 { _MMIO(0x9888), 0x198c5400 },
2223 { _MMIO(0x9888), 0x1b8c0015 },
2224 { _MMIO(0x9888), 0x098dc000 },
2225 { _MMIO(0x9888), 0x0b8da000 },
2226 { _MMIO(0x9888), 0x0d8da000 },
2227 { _MMIO(0x9888), 0x0f8da000 },
2228 { _MMIO(0x9888), 0x098e05c0 },
2229 { _MMIO(0x9888), 0x058e0000 },
2230 { _MMIO(0x9888), 0x198f0020 },
2231 { _MMIO(0x9888), 0x2185aa0a },
2232 { _MMIO(0x9888), 0x2385002a },
2233 { _MMIO(0x9888), 0x1f85aa00 },
2234 { _MMIO(0x9888), 0x19835000 },
2235 { _MMIO(0x9888), 0x1b830155 },
2236 { _MMIO(0x9888), 0x03834000 },
2237 { _MMIO(0x9888), 0x05834000 },
2238 { _MMIO(0x9888), 0x07834000 },
2239 { _MMIO(0x9888), 0x09834000 },
2240 { _MMIO(0x9888), 0x0b834000 },
2241 { _MMIO(0x9888), 0x0d834000 },
2242 { _MMIO(0x9888), 0x09848000 },
2243 { _MMIO(0x9888), 0x0b84c000 },
2244 { _MMIO(0x9888), 0x0d84c000 },
2245 { _MMIO(0x9888), 0x0f84c000 },
2246 { _MMIO(0x9888), 0x01848000 },
2247 { _MMIO(0x9888), 0x0384c000 },
2248 { _MMIO(0x9888), 0x0584c000 },
2249 { _MMIO(0x9888), 0x07844000 },
2250 { _MMIO(0x9888), 0x19808000 },
2251 { _MMIO(0x9888), 0x1b80c000 },
2252 { _MMIO(0x9888), 0x1d80c000 },
2253 { _MMIO(0x9888), 0x1f80c000 },
2254 { _MMIO(0x9888), 0x11808000 },
2255 { _MMIO(0x9888), 0x1380c000 },
2256 { _MMIO(0x9888), 0x1580c000 },
2257 { _MMIO(0x9888), 0x17804000 },
2258 { _MMIO(0x9888), 0x51800040 },
2259 { _MMIO(0x9888), 0x43800400 },
2260 { _MMIO(0x9888), 0x45800800 },
2261 { _MMIO(0x9888), 0x53800000 },
2262 { _MMIO(0x9888), 0x47800c62 },
2263 { _MMIO(0x9888), 0x21800000 },
2264 { _MMIO(0x9888), 0x31800000 },
2265 { _MMIO(0x9888), 0x4d800000 },
2266 { _MMIO(0x9888), 0x3f801042 },
2267 { _MMIO(0x9888), 0x4f800000 },
2268 { _MMIO(0x9888), 0x418014a4 },
2269};
2270
2271static int
2272get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
2273 const struct i915_oa_reg **regs,
2274 int *lens)
2275{
2276 int n = 0;
2277
2278 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2279 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2280
2281 regs[n] = mux_config_hdc_and_sf;
2282 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
2283 n++;
2284
2285 return n;
2286}
2287
2288static const struct i915_oa_reg b_counter_config_l3_1[] = {
2289 { _MMIO(0x2740), 0x00000000 },
2290 { _MMIO(0x2744), 0x00800000 },
2291 { _MMIO(0x2710), 0x00000000 },
2292 { _MMIO(0x2714), 0xf0800000 },
2293 { _MMIO(0x2720), 0x00000000 },
2294 { _MMIO(0x2724), 0xf0800000 },
2295 { _MMIO(0x2770), 0x00100070 },
2296 { _MMIO(0x2774), 0x0000fff1 },
2297 { _MMIO(0x2778), 0x00014002 },
2298 { _MMIO(0x277c), 0x0000c3ff },
2299 { _MMIO(0x2780), 0x00010002 },
2300 { _MMIO(0x2784), 0x0000c7ff },
2301 { _MMIO(0x2788), 0x00004002 },
2302 { _MMIO(0x278c), 0x0000d3ff },
2303 { _MMIO(0x2790), 0x00100700 },
2304 { _MMIO(0x2794), 0x0000ff1f },
2305 { _MMIO(0x2798), 0x00001402 },
2306 { _MMIO(0x279c), 0x0000fc3f },
2307 { _MMIO(0x27a0), 0x00001002 },
2308 { _MMIO(0x27a4), 0x0000fc7f },
2309 { _MMIO(0x27a8), 0x00000402 },
2310 { _MMIO(0x27ac), 0x0000fd3f },
2311};
2312
2313static const struct i915_oa_reg flex_eu_config_l3_1[] = {
2314 { _MMIO(0xe458), 0x00005004 },
2315 { _MMIO(0xe558), 0x00010003 },
2316 { _MMIO(0xe658), 0x00012011 },
2317 { _MMIO(0xe758), 0x00015014 },
2318 { _MMIO(0xe45c), 0x00051050 },
2319 { _MMIO(0xe55c), 0x00053052 },
2320 { _MMIO(0xe65c), 0x00055054 },
2321};
2322
2323static const struct i915_oa_reg mux_config_l3_1[] = {
2324 { _MMIO(0x9888), 0x10bf03da },
2325 { _MMIO(0x9888), 0x14bf0001 },
2326 { _MMIO(0x9888), 0x12980340 },
2327 { _MMIO(0x9888), 0x12990340 },
2328 { _MMIO(0x9888), 0x0cbf1187 },
2329 { _MMIO(0x9888), 0x0ebf1205 },
2330 { _MMIO(0x9888), 0x00bf0500 },
2331 { _MMIO(0x9888), 0x02bf042b },
2332 { _MMIO(0x9888), 0x04bf002c },
2333 { _MMIO(0x9888), 0x0cdac000 },
2334 { _MMIO(0x9888), 0x0edac000 },
2335 { _MMIO(0x9888), 0x00da8000 },
2336 { _MMIO(0x9888), 0x02dac000 },
2337 { _MMIO(0x9888), 0x04da4000 },
2338 { _MMIO(0x9888), 0x04983400 },
2339 { _MMIO(0x9888), 0x10980000 },
2340 { _MMIO(0x9888), 0x06990034 },
2341 { _MMIO(0x9888), 0x10990000 },
2342 { _MMIO(0x9888), 0x0c9dc000 },
2343 { _MMIO(0x9888), 0x0e9dc000 },
2344 { _MMIO(0x9888), 0x009d8000 },
2345 { _MMIO(0x9888), 0x029dc000 },
2346 { _MMIO(0x9888), 0x049d4000 },
2347 { _MMIO(0x9888), 0x109f02a8 },
2348 { _MMIO(0x9888), 0x0c9fa000 },
2349 { _MMIO(0x9888), 0x0e9f00ba },
2350 { _MMIO(0x9888), 0x0cb88000 },
2351 { _MMIO(0x9888), 0x0cb95000 },
2352 { _MMIO(0x9888), 0x0eb95000 },
2353 { _MMIO(0x9888), 0x00b94000 },
2354 { _MMIO(0x9888), 0x02b95000 },
2355 { _MMIO(0x9888), 0x04b91000 },
2356 { _MMIO(0x9888), 0x06b92000 },
2357 { _MMIO(0x9888), 0x0cba4000 },
2358 { _MMIO(0x9888), 0x0f88000f },
2359 { _MMIO(0x9888), 0x03888000 },
2360 { _MMIO(0x9888), 0x05888000 },
2361 { _MMIO(0x9888), 0x07888000 },
2362 { _MMIO(0x9888), 0x09888000 },
2363 { _MMIO(0x9888), 0x0b888000 },
2364 { _MMIO(0x9888), 0x0d880400 },
2365 { _MMIO(0x9888), 0x258b800a },
2366 { _MMIO(0x9888), 0x278b002a },
2367 { _MMIO(0x9888), 0x238b5500 },
2368 { _MMIO(0x9888), 0x198c4000 },
2369 { _MMIO(0x9888), 0x1b8c0015 },
2370 { _MMIO(0x9888), 0x038c4000 },
2371 { _MMIO(0x9888), 0x058c4000 },
2372 { _MMIO(0x9888), 0x078c4000 },
2373 { _MMIO(0x9888), 0x098c4000 },
2374 { _MMIO(0x9888), 0x0b8c4000 },
2375 { _MMIO(0x9888), 0x0d8c4000 },
2376 { _MMIO(0x9888), 0x0d8da000 },
2377 { _MMIO(0x9888), 0x0f8da000 },
2378 { _MMIO(0x9888), 0x018d8000 },
2379 { _MMIO(0x9888), 0x038da000 },
2380 { _MMIO(0x9888), 0x058da000 },
2381 { _MMIO(0x9888), 0x078d2000 },
2382 { _MMIO(0x9888), 0x2185800a },
2383 { _MMIO(0x9888), 0x2385002a },
2384 { _MMIO(0x9888), 0x1f85aa00 },
2385 { _MMIO(0x9888), 0x1b830154 },
2386 { _MMIO(0x9888), 0x03834000 },
2387 { _MMIO(0x9888), 0x05834000 },
2388 { _MMIO(0x9888), 0x07834000 },
2389 { _MMIO(0x9888), 0x09834000 },
2390 { _MMIO(0x9888), 0x0b834000 },
2391 { _MMIO(0x9888), 0x0d834000 },
2392 { _MMIO(0x9888), 0x0d84c000 },
2393 { _MMIO(0x9888), 0x0f84c000 },
2394 { _MMIO(0x9888), 0x01848000 },
2395 { _MMIO(0x9888), 0x0384c000 },
2396 { _MMIO(0x9888), 0x0584c000 },
2397 { _MMIO(0x9888), 0x07844000 },
2398 { _MMIO(0x9888), 0x1d80c000 },
2399 { _MMIO(0x9888), 0x1f80c000 },
2400 { _MMIO(0x9888), 0x11808000 },
2401 { _MMIO(0x9888), 0x1380c000 },
2402 { _MMIO(0x9888), 0x1580c000 },
2403 { _MMIO(0x9888), 0x17804000 },
2404 { _MMIO(0x9888), 0x53800000 },
2405 { _MMIO(0x9888), 0x45800000 },
2406 { _MMIO(0x9888), 0x47800000 },
2407 { _MMIO(0x9888), 0x21800000 },
2408 { _MMIO(0x9888), 0x31800000 },
2409 { _MMIO(0x9888), 0x4d800000 },
2410 { _MMIO(0x9888), 0x3f800000 },
2411 { _MMIO(0x9888), 0x4f800000 },
2412 { _MMIO(0x9888), 0x41800060 },
2413};
2414
2415static int
2416get_l3_1_mux_config(struct drm_i915_private *dev_priv,
2417 const struct i915_oa_reg **regs,
2418 int *lens)
2419{
2420 int n = 0;
2421
2422 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2423 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2424
2425 regs[n] = mux_config_l3_1;
2426 lens[n] = ARRAY_SIZE(mux_config_l3_1);
2427 n++;
2428
2429 return n;
2430}
2431
2432static const struct i915_oa_reg b_counter_config_l3_2[] = {
2433 { _MMIO(0x2740), 0x00000000 },
2434 { _MMIO(0x2744), 0x00800000 },
2435 { _MMIO(0x2710), 0x00000000 },
2436 { _MMIO(0x2714), 0xf0800000 },
2437 { _MMIO(0x2720), 0x00000000 },
2438 { _MMIO(0x2724), 0xf0800000 },
2439 { _MMIO(0x2770), 0x00100070 },
2440 { _MMIO(0x2774), 0x0000fff1 },
2441 { _MMIO(0x2778), 0x00014002 },
2442 { _MMIO(0x277c), 0x0000c3ff },
2443 { _MMIO(0x2780), 0x00010002 },
2444 { _MMIO(0x2784), 0x0000c7ff },
2445 { _MMIO(0x2788), 0x00004002 },
2446 { _MMIO(0x278c), 0x0000d3ff },
2447 { _MMIO(0x2790), 0x00100700 },
2448 { _MMIO(0x2794), 0x0000ff1f },
2449 { _MMIO(0x2798), 0x00001402 },
2450 { _MMIO(0x279c), 0x0000fc3f },
2451 { _MMIO(0x27a0), 0x00001002 },
2452 { _MMIO(0x27a4), 0x0000fc7f },
2453 { _MMIO(0x27a8), 0x00000402 },
2454 { _MMIO(0x27ac), 0x0000fd3f },
2455};
2456
2457static const struct i915_oa_reg flex_eu_config_l3_2[] = {
2458 { _MMIO(0xe458), 0x00005004 },
2459 { _MMIO(0xe558), 0x00010003 },
2460 { _MMIO(0xe658), 0x00012011 },
2461 { _MMIO(0xe758), 0x00015014 },
2462 { _MMIO(0xe45c), 0x00051050 },
2463 { _MMIO(0xe55c), 0x00053052 },
2464 { _MMIO(0xe65c), 0x00055054 },
2465};
2466
2467static const struct i915_oa_reg mux_config_l3_2[] = {
2468 { _MMIO(0x9888), 0x103f03da },
2469 { _MMIO(0x9888), 0x143f0001 },
2470 { _MMIO(0x9888), 0x12180340 },
2471 { _MMIO(0x9888), 0x12190340 },
2472 { _MMIO(0x9888), 0x0c3f1187 },
2473 { _MMIO(0x9888), 0x0e3f1205 },
2474 { _MMIO(0x9888), 0x003f0500 },
2475 { _MMIO(0x9888), 0x023f042b },
2476 { _MMIO(0x9888), 0x043f002c },
2477 { _MMIO(0x9888), 0x0c5ac000 },
2478 { _MMIO(0x9888), 0x0e5ac000 },
2479 { _MMIO(0x9888), 0x005a8000 },
2480 { _MMIO(0x9888), 0x025ac000 },
2481 { _MMIO(0x9888), 0x045a4000 },
2482 { _MMIO(0x9888), 0x04183400 },
2483 { _MMIO(0x9888), 0x10180000 },
2484 { _MMIO(0x9888), 0x06190034 },
2485 { _MMIO(0x9888), 0x10190000 },
2486 { _MMIO(0x9888), 0x0c1dc000 },
2487 { _MMIO(0x9888), 0x0e1dc000 },
2488 { _MMIO(0x9888), 0x001d8000 },
2489 { _MMIO(0x9888), 0x021dc000 },
2490 { _MMIO(0x9888), 0x041d4000 },
2491 { _MMIO(0x9888), 0x101f02a8 },
2492 { _MMIO(0x9888), 0x0c1fa000 },
2493 { _MMIO(0x9888), 0x0e1f00ba },
2494 { _MMIO(0x9888), 0x0c388000 },
2495 { _MMIO(0x9888), 0x0c395000 },
2496 { _MMIO(0x9888), 0x0e395000 },
2497 { _MMIO(0x9888), 0x00394000 },
2498 { _MMIO(0x9888), 0x02395000 },
2499 { _MMIO(0x9888), 0x04391000 },
2500 { _MMIO(0x9888), 0x06392000 },
2501 { _MMIO(0x9888), 0x0c3a4000 },
2502 { _MMIO(0x9888), 0x1b8aa800 },
2503 { _MMIO(0x9888), 0x1d8a0002 },
2504 { _MMIO(0x9888), 0x038a8000 },
2505 { _MMIO(0x9888), 0x058a8000 },
2506 { _MMIO(0x9888), 0x078a8000 },
2507 { _MMIO(0x9888), 0x098a8000 },
2508 { _MMIO(0x9888), 0x0b8a8000 },
2509 { _MMIO(0x9888), 0x0d8a8000 },
2510 { _MMIO(0x9888), 0x258b4005 },
2511 { _MMIO(0x9888), 0x278b0015 },
2512 { _MMIO(0x9888), 0x238b2a80 },
2513 { _MMIO(0x9888), 0x2185800a },
2514 { _MMIO(0x9888), 0x2385002a },
2515 { _MMIO(0x9888), 0x1f85aa00 },
2516 { _MMIO(0x9888), 0x1b830154 },
2517 { _MMIO(0x9888), 0x03834000 },
2518 { _MMIO(0x9888), 0x05834000 },
2519 { _MMIO(0x9888), 0x07834000 },
2520 { _MMIO(0x9888), 0x09834000 },
2521 { _MMIO(0x9888), 0x0b834000 },
2522 { _MMIO(0x9888), 0x0d834000 },
2523 { _MMIO(0x9888), 0x0d84c000 },
2524 { _MMIO(0x9888), 0x0f84c000 },
2525 { _MMIO(0x9888), 0x01848000 },
2526 { _MMIO(0x9888), 0x0384c000 },
2527 { _MMIO(0x9888), 0x0584c000 },
2528 { _MMIO(0x9888), 0x07844000 },
2529 { _MMIO(0x9888), 0x1d80c000 },
2530 { _MMIO(0x9888), 0x1f80c000 },
2531 { _MMIO(0x9888), 0x11808000 },
2532 { _MMIO(0x9888), 0x1380c000 },
2533 { _MMIO(0x9888), 0x1580c000 },
2534 { _MMIO(0x9888), 0x17804000 },
2535 { _MMIO(0x9888), 0x53800000 },
2536 { _MMIO(0x9888), 0x45800000 },
2537 { _MMIO(0x9888), 0x47800000 },
2538 { _MMIO(0x9888), 0x21800000 },
2539 { _MMIO(0x9888), 0x31800000 },
2540 { _MMIO(0x9888), 0x4d800000 },
2541 { _MMIO(0x9888), 0x3f800000 },
2542 { _MMIO(0x9888), 0x4f800000 },
2543 { _MMIO(0x9888), 0x41800060 },
2544};
2545
2546static int
2547get_l3_2_mux_config(struct drm_i915_private *dev_priv,
2548 const struct i915_oa_reg **regs,
2549 int *lens)
2550{
2551 int n = 0;
2552
2553 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2554 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2555
2556 regs[n] = mux_config_l3_2;
2557 lens[n] = ARRAY_SIZE(mux_config_l3_2);
2558 n++;
2559
2560 return n;
2561}
2562
2563static const struct i915_oa_reg b_counter_config_l3_3[] = {
2564 { _MMIO(0x2740), 0x00000000 },
2565 { _MMIO(0x2744), 0x00800000 },
2566 { _MMIO(0x2710), 0x00000000 },
2567 { _MMIO(0x2714), 0xf0800000 },
2568 { _MMIO(0x2720), 0x00000000 },
2569 { _MMIO(0x2724), 0xf0800000 },
2570 { _MMIO(0x2770), 0x00100070 },
2571 { _MMIO(0x2774), 0x0000fff1 },
2572 { _MMIO(0x2778), 0x00014002 },
2573 { _MMIO(0x277c), 0x0000c3ff },
2574 { _MMIO(0x2780), 0x00010002 },
2575 { _MMIO(0x2784), 0x0000c7ff },
2576 { _MMIO(0x2788), 0x00004002 },
2577 { _MMIO(0x278c), 0x0000d3ff },
2578 { _MMIO(0x2790), 0x00100700 },
2579 { _MMIO(0x2794), 0x0000ff1f },
2580 { _MMIO(0x2798), 0x00001402 },
2581 { _MMIO(0x279c), 0x0000fc3f },
2582 { _MMIO(0x27a0), 0x00001002 },
2583 { _MMIO(0x27a4), 0x0000fc7f },
2584 { _MMIO(0x27a8), 0x00000402 },
2585 { _MMIO(0x27ac), 0x0000fd3f },
2586};
2587
2588static const struct i915_oa_reg flex_eu_config_l3_3[] = {
2589 { _MMIO(0xe458), 0x00005004 },
2590 { _MMIO(0xe558), 0x00010003 },
2591 { _MMIO(0xe658), 0x00012011 },
2592 { _MMIO(0xe758), 0x00015014 },
2593 { _MMIO(0xe45c), 0x00051050 },
2594 { _MMIO(0xe55c), 0x00053052 },
2595 { _MMIO(0xe65c), 0x00055054 },
2596};
2597
2598static const struct i915_oa_reg mux_config_l3_3[] = {
2599 { _MMIO(0x9888), 0x121b0340 },
2600 { _MMIO(0x9888), 0x103f0274 },
2601 { _MMIO(0x9888), 0x123f0000 },
2602 { _MMIO(0x9888), 0x129b0340 },
2603 { _MMIO(0x9888), 0x10bf0274 },
2604 { _MMIO(0x9888), 0x12bf0000 },
2605 { _MMIO(0x9888), 0x041b3400 },
2606 { _MMIO(0x9888), 0x101b0000 },
2607 { _MMIO(0x9888), 0x045c8000 },
2608 { _MMIO(0x9888), 0x0a3d4000 },
2609 { _MMIO(0x9888), 0x003f0080 },
2610 { _MMIO(0x9888), 0x023f0793 },
2611 { _MMIO(0x9888), 0x043f0014 },
2612 { _MMIO(0x9888), 0x04588000 },
2613 { _MMIO(0x9888), 0x005a8000 },
2614 { _MMIO(0x9888), 0x025ac000 },
2615 { _MMIO(0x9888), 0x045a4000 },
2616 { _MMIO(0x9888), 0x0a5b4000 },
2617 { _MMIO(0x9888), 0x001d8000 },
2618 { _MMIO(0x9888), 0x021dc000 },
2619 { _MMIO(0x9888), 0x041d4000 },
2620 { _MMIO(0x9888), 0x0c1fa000 },
2621 { _MMIO(0x9888), 0x0e1f002a },
2622 { _MMIO(0x9888), 0x0a384000 },
2623 { _MMIO(0x9888), 0x00394000 },
2624 { _MMIO(0x9888), 0x02395000 },
2625 { _MMIO(0x9888), 0x04399000 },
2626 { _MMIO(0x9888), 0x069b0034 },
2627 { _MMIO(0x9888), 0x109b0000 },
2628 { _MMIO(0x9888), 0x06dc4000 },
2629 { _MMIO(0x9888), 0x0cbd4000 },
2630 { _MMIO(0x9888), 0x0cbf0981 },
2631 { _MMIO(0x9888), 0x0ebf0a0f },
2632 { _MMIO(0x9888), 0x06d84000 },
2633 { _MMIO(0x9888), 0x0cdac000 },
2634 { _MMIO(0x9888), 0x0edac000 },
2635 { _MMIO(0x9888), 0x0cdb4000 },
2636 { _MMIO(0x9888), 0x0c9dc000 },
2637 { _MMIO(0x9888), 0x0e9dc000 },
2638 { _MMIO(0x9888), 0x109f02a8 },
2639 { _MMIO(0x9888), 0x0e9f0080 },
2640 { _MMIO(0x9888), 0x0cb84000 },
2641 { _MMIO(0x9888), 0x0cb95000 },
2642 { _MMIO(0x9888), 0x0eb95000 },
2643 { _MMIO(0x9888), 0x06b92000 },
2644 { _MMIO(0x9888), 0x0f88000f },
2645 { _MMIO(0x9888), 0x0d880400 },
2646 { _MMIO(0x9888), 0x038a8000 },
2647 { _MMIO(0x9888), 0x058a8000 },
2648 { _MMIO(0x9888), 0x078a8000 },
2649 { _MMIO(0x9888), 0x098a8000 },
2650 { _MMIO(0x9888), 0x0b8a8000 },
2651 { _MMIO(0x9888), 0x258b8009 },
2652 { _MMIO(0x9888), 0x278b002a },
2653 { _MMIO(0x9888), 0x238b2a80 },
2654 { _MMIO(0x9888), 0x198c4000 },
2655 { _MMIO(0x9888), 0x1b8c0015 },
2656 { _MMIO(0x9888), 0x0d8c4000 },
2657 { _MMIO(0x9888), 0x0d8da000 },
2658 { _MMIO(0x9888), 0x0f8da000 },
2659 { _MMIO(0x9888), 0x078d2000 },
2660 { _MMIO(0x9888), 0x2185800a },
2661 { _MMIO(0x9888), 0x2385002a },
2662 { _MMIO(0x9888), 0x1f85aa00 },
2663 { _MMIO(0x9888), 0x1b830154 },
2664 { _MMIO(0x9888), 0x03834000 },
2665 { _MMIO(0x9888), 0x05834000 },
2666 { _MMIO(0x9888), 0x07834000 },
2667 { _MMIO(0x9888), 0x09834000 },
2668 { _MMIO(0x9888), 0x0b834000 },
2669 { _MMIO(0x9888), 0x0d834000 },
2670 { _MMIO(0x9888), 0x0d84c000 },
2671 { _MMIO(0x9888), 0x0f84c000 },
2672 { _MMIO(0x9888), 0x01848000 },
2673 { _MMIO(0x9888), 0x0384c000 },
2674 { _MMIO(0x9888), 0x0584c000 },
2675 { _MMIO(0x9888), 0x07844000 },
2676 { _MMIO(0x9888), 0x1d80c000 },
2677 { _MMIO(0x9888), 0x1f80c000 },
2678 { _MMIO(0x9888), 0x11808000 },
2679 { _MMIO(0x9888), 0x1380c000 },
2680 { _MMIO(0x9888), 0x1580c000 },
2681 { _MMIO(0x9888), 0x17804000 },
2682 { _MMIO(0x9888), 0x53800000 },
2683 { _MMIO(0x9888), 0x45800c00 },
2684 { _MMIO(0x9888), 0x47800c63 },
2685 { _MMIO(0x9888), 0x21800000 },
2686 { _MMIO(0x9888), 0x31800000 },
2687 { _MMIO(0x9888), 0x4d800000 },
2688 { _MMIO(0x9888), 0x3f8014a5 },
2689 { _MMIO(0x9888), 0x4f800000 },
2690 { _MMIO(0x9888), 0x41800045 },
2691};
2692
2693static int
2694get_l3_3_mux_config(struct drm_i915_private *dev_priv,
2695 const struct i915_oa_reg **regs,
2696 int *lens)
2697{
2698 int n = 0;
2699
2700 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2701 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2702
2703 regs[n] = mux_config_l3_3;
2704 lens[n] = ARRAY_SIZE(mux_config_l3_3);
2705 n++;
2706
2707 return n;
2708}
2709
2710static const struct i915_oa_reg b_counter_config_l3_4[] = {
2711 { _MMIO(0x2740), 0x00000000 },
2712 { _MMIO(0x2744), 0x00800000 },
2713 { _MMIO(0x2710), 0x00000000 },
2714 { _MMIO(0x2714), 0xf0800000 },
2715 { _MMIO(0x2720), 0x00000000 },
2716 { _MMIO(0x2724), 0xf0800000 },
2717 { _MMIO(0x2770), 0x00100070 },
2718 { _MMIO(0x2774), 0x0000fff1 },
2719 { _MMIO(0x2778), 0x00014002 },
2720 { _MMIO(0x277c), 0x0000c3ff },
2721 { _MMIO(0x2780), 0x00010002 },
2722 { _MMIO(0x2784), 0x0000c7ff },
2723 { _MMIO(0x2788), 0x00004002 },
2724 { _MMIO(0x278c), 0x0000d3ff },
2725 { _MMIO(0x2790), 0x00100700 },
2726 { _MMIO(0x2794), 0x0000ff1f },
2727 { _MMIO(0x2798), 0x00001402 },
2728 { _MMIO(0x279c), 0x0000fc3f },
2729 { _MMIO(0x27a0), 0x00001002 },
2730 { _MMIO(0x27a4), 0x0000fc7f },
2731 { _MMIO(0x27a8), 0x00000402 },
2732 { _MMIO(0x27ac), 0x0000fd3f },
2733};
2734
2735static const struct i915_oa_reg flex_eu_config_l3_4[] = {
2736 { _MMIO(0xe458), 0x00005004 },
2737 { _MMIO(0xe558), 0x00010003 },
2738 { _MMIO(0xe658), 0x00012011 },
2739 { _MMIO(0xe758), 0x00015014 },
2740 { _MMIO(0xe45c), 0x00051050 },
2741 { _MMIO(0xe55c), 0x00053052 },
2742 { _MMIO(0xe65c), 0x00055054 },
2743};
2744
2745static const struct i915_oa_reg mux_config_l3_4[] = {
2746 { _MMIO(0x9888), 0x121a0340 },
2747 { _MMIO(0x9888), 0x103f0017 },
2748 { _MMIO(0x9888), 0x123f0020 },
2749 { _MMIO(0x9888), 0x129a0340 },
2750 { _MMIO(0x9888), 0x10bf0017 },
2751 { _MMIO(0x9888), 0x12bf0020 },
2752 { _MMIO(0x9888), 0x041a3400 },
2753 { _MMIO(0x9888), 0x101a0000 },
2754 { _MMIO(0x9888), 0x043b8000 },
2755 { _MMIO(0x9888), 0x0a3e0010 },
2756 { _MMIO(0x9888), 0x003f0200 },
2757 { _MMIO(0x9888), 0x023f0113 },
2758 { _MMIO(0x9888), 0x043f0014 },
2759 { _MMIO(0x9888), 0x02592000 },
2760 { _MMIO(0x9888), 0x005a8000 },
2761 { _MMIO(0x9888), 0x025ac000 },
2762 { _MMIO(0x9888), 0x045a4000 },
2763 { _MMIO(0x9888), 0x0a1c8000 },
2764 { _MMIO(0x9888), 0x001d8000 },
2765 { _MMIO(0x9888), 0x021dc000 },
2766 { _MMIO(0x9888), 0x041d4000 },
2767 { _MMIO(0x9888), 0x0a1e8000 },
2768 { _MMIO(0x9888), 0x0c1fa000 },
2769 { _MMIO(0x9888), 0x0e1f001a },
2770 { _MMIO(0x9888), 0x00394000 },
2771 { _MMIO(0x9888), 0x02395000 },
2772 { _MMIO(0x9888), 0x04391000 },
2773 { _MMIO(0x9888), 0x069a0034 },
2774 { _MMIO(0x9888), 0x109a0000 },
2775 { _MMIO(0x9888), 0x06bb4000 },
2776 { _MMIO(0x9888), 0x0abe0040 },
2777 { _MMIO(0x9888), 0x0cbf0984 },
2778 { _MMIO(0x9888), 0x0ebf0a02 },
2779 { _MMIO(0x9888), 0x02d94000 },
2780 { _MMIO(0x9888), 0x0cdac000 },
2781 { _MMIO(0x9888), 0x0edac000 },
2782 { _MMIO(0x9888), 0x0c9c0400 },
2783 { _MMIO(0x9888), 0x0c9dc000 },
2784 { _MMIO(0x9888), 0x0e9dc000 },
2785 { _MMIO(0x9888), 0x0c9e0400 },
2786 { _MMIO(0x9888), 0x109f02a8 },
2787 { _MMIO(0x9888), 0x0e9f0040 },
2788 { _MMIO(0x9888), 0x0cb95000 },
2789 { _MMIO(0x9888), 0x0eb95000 },
2790 { _MMIO(0x9888), 0x0f88000f },
2791 { _MMIO(0x9888), 0x0d880400 },
2792 { _MMIO(0x9888), 0x038a8000 },
2793 { _MMIO(0x9888), 0x058a8000 },
2794 { _MMIO(0x9888), 0x078a8000 },
2795 { _MMIO(0x9888), 0x098a8000 },
2796 { _MMIO(0x9888), 0x0b8a8000 },
2797 { _MMIO(0x9888), 0x258b8009 },
2798 { _MMIO(0x9888), 0x278b002a },
2799 { _MMIO(0x9888), 0x238b2a80 },
2800 { _MMIO(0x9888), 0x198c4000 },
2801 { _MMIO(0x9888), 0x1b8c0015 },
2802 { _MMIO(0x9888), 0x0d8c4000 },
2803 { _MMIO(0x9888), 0x0d8da000 },
2804 { _MMIO(0x9888), 0x0f8da000 },
2805 { _MMIO(0x9888), 0x078d2000 },
2806 { _MMIO(0x9888), 0x2185800a },
2807 { _MMIO(0x9888), 0x2385002a },
2808 { _MMIO(0x9888), 0x1f85aa00 },
2809 { _MMIO(0x9888), 0x1b830154 },
2810 { _MMIO(0x9888), 0x03834000 },
2811 { _MMIO(0x9888), 0x05834000 },
2812 { _MMIO(0x9888), 0x07834000 },
2813 { _MMIO(0x9888), 0x09834000 },
2814 { _MMIO(0x9888), 0x0b834000 },
2815 { _MMIO(0x9888), 0x0d834000 },
2816 { _MMIO(0x9888), 0x0d84c000 },
2817 { _MMIO(0x9888), 0x0f84c000 },
2818 { _MMIO(0x9888), 0x01848000 },
2819 { _MMIO(0x9888), 0x0384c000 },
2820 { _MMIO(0x9888), 0x0584c000 },
2821 { _MMIO(0x9888), 0x07844000 },
2822 { _MMIO(0x9888), 0x1d80c000 },
2823 { _MMIO(0x9888), 0x1f80c000 },
2824 { _MMIO(0x9888), 0x11808000 },
2825 { _MMIO(0x9888), 0x1380c000 },
2826 { _MMIO(0x9888), 0x1580c000 },
2827 { _MMIO(0x9888), 0x17804000 },
2828 { _MMIO(0x9888), 0x53800000 },
2829 { _MMIO(0x9888), 0x45800800 },
2830 { _MMIO(0x9888), 0x47800842 },
2831 { _MMIO(0x9888), 0x21800000 },
2832 { _MMIO(0x9888), 0x31800000 },
2833 { _MMIO(0x9888), 0x4d800000 },
2834 { _MMIO(0x9888), 0x3f801084 },
2835 { _MMIO(0x9888), 0x4f800000 },
2836 { _MMIO(0x9888), 0x41800044 },
2837};
2838
2839static int
2840get_l3_4_mux_config(struct drm_i915_private *dev_priv,
2841 const struct i915_oa_reg **regs,
2842 int *lens)
2843{
2844 int n = 0;
2845
2846 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2847 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2848
2849 regs[n] = mux_config_l3_4;
2850 lens[n] = ARRAY_SIZE(mux_config_l3_4);
2851 n++;
2852
2853 return n;
2854}
2855
2856static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
2857 { _MMIO(0x2740), 0x00000000 },
2858 { _MMIO(0x2744), 0x00800000 },
2859 { _MMIO(0x2710), 0x00000000 },
2860 { _MMIO(0x2714), 0xf0800000 },
2861 { _MMIO(0x2720), 0x00000000 },
2862 { _MMIO(0x2724), 0x30800000 },
2863 { _MMIO(0x2770), 0x00006000 },
2864 { _MMIO(0x2774), 0x0000f3ff },
2865 { _MMIO(0x2778), 0x00001800 },
2866 { _MMIO(0x277c), 0x0000fcff },
2867 { _MMIO(0x2780), 0x00000600 },
2868 { _MMIO(0x2784), 0x0000ff3f },
2869 { _MMIO(0x2788), 0x00000180 },
2870 { _MMIO(0x278c), 0x0000ffcf },
2871 { _MMIO(0x2790), 0x00000060 },
2872 { _MMIO(0x2794), 0x0000fff3 },
2873 { _MMIO(0x2798), 0x00000018 },
2874 { _MMIO(0x279c), 0x0000fffc },
2875};
2876
2877static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
2878 { _MMIO(0xe458), 0x00005004 },
2879 { _MMIO(0xe558), 0x00010003 },
2880 { _MMIO(0xe658), 0x00012011 },
2881 { _MMIO(0xe758), 0x00015014 },
2882 { _MMIO(0xe45c), 0x00051050 },
2883 { _MMIO(0xe55c), 0x00053052 },
2884 { _MMIO(0xe65c), 0x00055054 },
2885};
2886
2887static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
2888 { _MMIO(0x9888), 0x143b000e },
2889 { _MMIO(0x9888), 0x043c55c0 },
2890 { _MMIO(0x9888), 0x0a1e0280 },
2891 { _MMIO(0x9888), 0x0c1e0408 },
2892 { _MMIO(0x9888), 0x10390000 },
2893 { _MMIO(0x9888), 0x12397a1f },
2894 { _MMIO(0x9888), 0x14bb000e },
2895 { _MMIO(0x9888), 0x04bc5000 },
2896 { _MMIO(0x9888), 0x0a9e0296 },
2897 { _MMIO(0x9888), 0x0c9e0008 },
2898 { _MMIO(0x9888), 0x10b90000 },
2899 { _MMIO(0x9888), 0x12b97a1f },
2900 { _MMIO(0x9888), 0x063b0042 },
2901 { _MMIO(0x9888), 0x103b0000 },
2902 { _MMIO(0x9888), 0x083c0000 },
2903 { _MMIO(0x9888), 0x0a3e0040 },
2904 { _MMIO(0x9888), 0x043f8000 },
2905 { _MMIO(0x9888), 0x02594000 },
2906 { _MMIO(0x9888), 0x045a8000 },
2907 { _MMIO(0x9888), 0x0c1c0400 },
2908 { _MMIO(0x9888), 0x041d8000 },
2909 { _MMIO(0x9888), 0x081e02c0 },
2910 { _MMIO(0x9888), 0x0e1e0000 },
2911 { _MMIO(0x9888), 0x0c1fa800 },
2912 { _MMIO(0x9888), 0x0e1f0260 },
2913 { _MMIO(0x9888), 0x101f0014 },
2914 { _MMIO(0x9888), 0x003905e0 },
2915 { _MMIO(0x9888), 0x06390bc0 },
2916 { _MMIO(0x9888), 0x02390018 },
2917 { _MMIO(0x9888), 0x04394000 },
2918 { _MMIO(0x9888), 0x04bb0042 },
2919 { _MMIO(0x9888), 0x10bb0000 },
2920 { _MMIO(0x9888), 0x02bc05c0 },
2921 { _MMIO(0x9888), 0x08bc0000 },
2922 { _MMIO(0x9888), 0x0abe0004 },
2923 { _MMIO(0x9888), 0x02bf8000 },
2924 { _MMIO(0x9888), 0x02d91000 },
2925 { _MMIO(0x9888), 0x02da8000 },
2926 { _MMIO(0x9888), 0x089c8000 },
2927 { _MMIO(0x9888), 0x029d8000 },
2928 { _MMIO(0x9888), 0x089e8000 },
2929 { _MMIO(0x9888), 0x0e9e0000 },
2930 { _MMIO(0x9888), 0x0e9fa806 },
2931 { _MMIO(0x9888), 0x109f0142 },
2932 { _MMIO(0x9888), 0x08b90617 },
2933 { _MMIO(0x9888), 0x0ab90be0 },
2934 { _MMIO(0x9888), 0x02b94000 },
2935 { _MMIO(0x9888), 0x0d88f000 },
2936 { _MMIO(0x9888), 0x0f88000c },
2937 { _MMIO(0x9888), 0x07888000 },
2938 { _MMIO(0x9888), 0x09888000 },
2939 { _MMIO(0x9888), 0x018a8000 },
2940 { _MMIO(0x9888), 0x0f8a8000 },
2941 { _MMIO(0x9888), 0x1b8a2800 },
2942 { _MMIO(0x9888), 0x038a8000 },
2943 { _MMIO(0x9888), 0x058a8000 },
2944 { _MMIO(0x9888), 0x0b8a8000 },
2945 { _MMIO(0x9888), 0x0d8a8000 },
2946 { _MMIO(0x9888), 0x238b52a0 },
2947 { _MMIO(0x9888), 0x258b6a95 },
2948 { _MMIO(0x9888), 0x278b0029 },
2949 { _MMIO(0x9888), 0x178c2000 },
2950 { _MMIO(0x9888), 0x198c1500 },
2951 { _MMIO(0x9888), 0x1b8c0014 },
2952 { _MMIO(0x9888), 0x078c4000 },
2953 { _MMIO(0x9888), 0x098c4000 },
2954 { _MMIO(0x9888), 0x098da000 },
2955 { _MMIO(0x9888), 0x0b8da000 },
2956 { _MMIO(0x9888), 0x0f8da000 },
2957 { _MMIO(0x9888), 0x038d8000 },
2958 { _MMIO(0x9888), 0x058d2000 },
2959 { _MMIO(0x9888), 0x1f85aa80 },
2960 { _MMIO(0x9888), 0x2185aaaa },
2961 { _MMIO(0x9888), 0x2385002a },
2962 { _MMIO(0x9888), 0x01834000 },
2963 { _MMIO(0x9888), 0x0f834000 },
2964 { _MMIO(0x9888), 0x19835400 },
2965 { _MMIO(0x9888), 0x1b830155 },
2966 { _MMIO(0x9888), 0x03834000 },
2967 { _MMIO(0x9888), 0x05834000 },
2968 { _MMIO(0x9888), 0x07834000 },
2969 { _MMIO(0x9888), 0x09834000 },
2970 { _MMIO(0x9888), 0x0b834000 },
2971 { _MMIO(0x9888), 0x0d834000 },
2972 { _MMIO(0x9888), 0x0184c000 },
2973 { _MMIO(0x9888), 0x0784c000 },
2974 { _MMIO(0x9888), 0x0984c000 },
2975 { _MMIO(0x9888), 0x0b84c000 },
2976 { _MMIO(0x9888), 0x0d84c000 },
2977 { _MMIO(0x9888), 0x0f84c000 },
2978 { _MMIO(0x9888), 0x0384c000 },
2979 { _MMIO(0x9888), 0x0584c000 },
2980 { _MMIO(0x9888), 0x1180c000 },
2981 { _MMIO(0x9888), 0x1780c000 },
2982 { _MMIO(0x9888), 0x1980c000 },
2983 { _MMIO(0x9888), 0x1b80c000 },
2984 { _MMIO(0x9888), 0x1d80c000 },
2985 { _MMIO(0x9888), 0x1f80c000 },
2986 { _MMIO(0x9888), 0x1380c000 },
2987 { _MMIO(0x9888), 0x1580c000 },
2988 { _MMIO(0x9888), 0x4d800444 },
2989 { _MMIO(0x9888), 0x3d800000 },
2990 { _MMIO(0x9888), 0x4f804000 },
2991 { _MMIO(0x9888), 0x43801080 },
2992 { _MMIO(0x9888), 0x51800000 },
2993 { _MMIO(0x9888), 0x45800084 },
2994 { _MMIO(0x9888), 0x53800044 },
2995 { _MMIO(0x9888), 0x47801080 },
2996 { _MMIO(0x9888), 0x21800000 },
2997 { _MMIO(0x9888), 0x31800000 },
2998 { _MMIO(0x9888), 0x3f800000 },
2999 { _MMIO(0x9888), 0x41800840 },
3000};
3001
3002static int
3003get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
3004 const struct i915_oa_reg **regs,
3005 int *lens)
3006{
3007 int n = 0;
3008
3009 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
3010 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
3011
3012 regs[n] = mux_config_rasterizer_and_pixel_backend;
3013 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
3014 n++;
3015
3016 return n;
3017}
3018
3019static const struct i915_oa_reg b_counter_config_sampler_1[] = {
3020 { _MMIO(0x2740), 0x00000000 },
3021 { _MMIO(0x2744), 0x00800000 },
3022 { _MMIO(0x2710), 0x00000000 },
3023 { _MMIO(0x2714), 0x70800000 },
3024 { _MMIO(0x2720), 0x00000000 },
3025 { _MMIO(0x2724), 0x00800000 },
3026 { _MMIO(0x2770), 0x0000c000 },
3027 { _MMIO(0x2774), 0x0000e7ff },
3028 { _MMIO(0x2778), 0x00003000 },
3029 { _MMIO(0x277c), 0x0000f9ff },
3030 { _MMIO(0x2780), 0x00000c00 },
3031 { _MMIO(0x2784), 0x0000fe7f },
3032};
3033
3034static const struct i915_oa_reg flex_eu_config_sampler_1[] = {
3035 { _MMIO(0xe458), 0x00005004 },
3036 { _MMIO(0xe558), 0x00010003 },
3037 { _MMIO(0xe658), 0x00012011 },
3038 { _MMIO(0xe758), 0x00015014 },
3039 { _MMIO(0xe45c), 0x00051050 },
3040 { _MMIO(0xe55c), 0x00053052 },
3041 { _MMIO(0xe65c), 0x00055054 },
3042};
3043
3044static const struct i915_oa_reg mux_config_sampler_1[] = {
3045 { _MMIO(0x9888), 0x18921400 },
3046 { _MMIO(0x9888), 0x149500ab },
3047 { _MMIO(0x9888), 0x18b21400 },
3048 { _MMIO(0x9888), 0x14b500ab },
3049 { _MMIO(0x9888), 0x18d21400 },
3050 { _MMIO(0x9888), 0x14d500ab },
3051 { _MMIO(0x9888), 0x0cdc8000 },
3052 { _MMIO(0x9888), 0x0edc4000 },
3053 { _MMIO(0x9888), 0x02dcc000 },
3054 { _MMIO(0x9888), 0x04dcc000 },
3055 { _MMIO(0x9888), 0x1abd00a0 },
3056 { _MMIO(0x9888), 0x0abd8000 },
3057 { _MMIO(0x9888), 0x0cd88000 },
3058 { _MMIO(0x9888), 0x0ed84000 },
3059 { _MMIO(0x9888), 0x04d88000 },
3060 { _MMIO(0x9888), 0x1adb0050 },
3061 { _MMIO(0x9888), 0x04db8000 },
3062 { _MMIO(0x9888), 0x06db8000 },
3063 { _MMIO(0x9888), 0x08db8000 },
3064 { _MMIO(0x9888), 0x0adb4000 },
3065 { _MMIO(0x9888), 0x109f02a0 },
3066 { _MMIO(0x9888), 0x0c9fa000 },
3067 { _MMIO(0x9888), 0x0e9f00aa },
3068 { _MMIO(0x9888), 0x18b82500 },
3069 { _MMIO(0x9888), 0x02b88000 },
3070 { _MMIO(0x9888), 0x04b84000 },
3071 { _MMIO(0x9888), 0x06b84000 },
3072 { _MMIO(0x9888), 0x08b84000 },
3073 { _MMIO(0x9888), 0x0ab84000 },
3074 { _MMIO(0x9888), 0x0cb88000 },
3075 { _MMIO(0x9888), 0x0cb98000 },
3076 { _MMIO(0x9888), 0x0eb9a000 },
3077 { _MMIO(0x9888), 0x00b98000 },
3078 { _MMIO(0x9888), 0x02b9a000 },
3079 { _MMIO(0x9888), 0x04b9a000 },
3080 { _MMIO(0x9888), 0x06b92000 },
3081 { _MMIO(0x9888), 0x1aba0200 },
3082 { _MMIO(0x9888), 0x02ba8000 },
3083 { _MMIO(0x9888), 0x0cba8000 },
3084 { _MMIO(0x9888), 0x04908000 },
3085 { _MMIO(0x9888), 0x04918000 },
3086 { _MMIO(0x9888), 0x04927300 },
3087 { _MMIO(0x9888), 0x10920000 },
3088 { _MMIO(0x9888), 0x1893000a },
3089 { _MMIO(0x9888), 0x0a934000 },
3090 { _MMIO(0x9888), 0x0a946000 },
3091 { _MMIO(0x9888), 0x0c959000 },
3092 { _MMIO(0x9888), 0x0e950098 },
3093 { _MMIO(0x9888), 0x10950000 },
3094 { _MMIO(0x9888), 0x04b04000 },
3095 { _MMIO(0x9888), 0x04b14000 },
3096 { _MMIO(0x9888), 0x04b20073 },
3097 { _MMIO(0x9888), 0x10b20000 },
3098 { _MMIO(0x9888), 0x04b38000 },
3099 { _MMIO(0x9888), 0x06b38000 },
3100 { _MMIO(0x9888), 0x08b34000 },
3101 { _MMIO(0x9888), 0x04b4c000 },
3102 { _MMIO(0x9888), 0x02b59890 },
3103 { _MMIO(0x9888), 0x10b50000 },
3104 { _MMIO(0x9888), 0x06d04000 },
3105 { _MMIO(0x9888), 0x06d14000 },
3106 { _MMIO(0x9888), 0x06d20073 },
3107 { _MMIO(0x9888), 0x10d20000 },
3108 { _MMIO(0x9888), 0x18d30020 },
3109 { _MMIO(0x9888), 0x02d38000 },
3110 { _MMIO(0x9888), 0x0cd34000 },
3111 { _MMIO(0x9888), 0x0ad48000 },
3112 { _MMIO(0x9888), 0x04d42000 },
3113 { _MMIO(0x9888), 0x0ed59000 },
3114 { _MMIO(0x9888), 0x00d59800 },
3115 { _MMIO(0x9888), 0x10d50000 },
3116 { _MMIO(0x9888), 0x0f88000e },
3117 { _MMIO(0x9888), 0x03888000 },
3118 { _MMIO(0x9888), 0x05888000 },
3119 { _MMIO(0x9888), 0x07888000 },
3120 { _MMIO(0x9888), 0x09888000 },
3121 { _MMIO(0x9888), 0x0b888000 },
3122 { _MMIO(0x9888), 0x0d880400 },
3123 { _MMIO(0x9888), 0x278b002a },
3124 { _MMIO(0x9888), 0x238b5500 },
3125 { _MMIO(0x9888), 0x258b000a },
3126 { _MMIO(0x9888), 0x1b8c0015 },
3127 { _MMIO(0x9888), 0x038c4000 },
3128 { _MMIO(0x9888), 0x058c4000 },
3129 { _MMIO(0x9888), 0x078c4000 },
3130 { _MMIO(0x9888), 0x098c4000 },
3131 { _MMIO(0x9888), 0x0b8c4000 },
3132 { _MMIO(0x9888), 0x0d8c4000 },
3133 { _MMIO(0x9888), 0x0d8d8000 },
3134 { _MMIO(0x9888), 0x0f8da000 },
3135 { _MMIO(0x9888), 0x018d8000 },
3136 { _MMIO(0x9888), 0x038da000 },
3137 { _MMIO(0x9888), 0x058da000 },
3138 { _MMIO(0x9888), 0x078d2000 },
3139 { _MMIO(0x9888), 0x2385002a },
3140 { _MMIO(0x9888), 0x1f85aa00 },
3141 { _MMIO(0x9888), 0x2185000a },
3142 { _MMIO(0x9888), 0x1b830150 },
3143 { _MMIO(0x9888), 0x03834000 },
3144 { _MMIO(0x9888), 0x05834000 },
3145 { _MMIO(0x9888), 0x07834000 },
3146 { _MMIO(0x9888), 0x09834000 },
3147 { _MMIO(0x9888), 0x0b834000 },
3148 { _MMIO(0x9888), 0x0d834000 },
3149 { _MMIO(0x9888), 0x0d848000 },
3150 { _MMIO(0x9888), 0x0f84c000 },
3151 { _MMIO(0x9888), 0x01848000 },
3152 { _MMIO(0x9888), 0x0384c000 },
3153 { _MMIO(0x9888), 0x0584c000 },
3154 { _MMIO(0x9888), 0x07844000 },
3155 { _MMIO(0x9888), 0x1d808000 },
3156 { _MMIO(0x9888), 0x1f80c000 },
3157 { _MMIO(0x9888), 0x11808000 },
3158 { _MMIO(0x9888), 0x1380c000 },
3159 { _MMIO(0x9888), 0x1580c000 },
3160 { _MMIO(0x9888), 0x17804000 },
3161 { _MMIO(0x9888), 0x53800000 },
3162 { _MMIO(0x9888), 0x47801021 },
3163 { _MMIO(0x9888), 0x21800000 },
3164 { _MMIO(0x9888), 0x31800000 },
3165 { _MMIO(0x9888), 0x4d800000 },
3166 { _MMIO(0x9888), 0x3f800c64 },
3167 { _MMIO(0x9888), 0x4f800000 },
3168 { _MMIO(0x9888), 0x41800c02 },
3169};
3170
3171static int
3172get_sampler_1_mux_config(struct drm_i915_private *dev_priv,
3173 const struct i915_oa_reg **regs,
3174 int *lens)
3175{
3176 int n = 0;
3177
3178 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
3179 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
3180
3181 regs[n] = mux_config_sampler_1;
3182 lens[n] = ARRAY_SIZE(mux_config_sampler_1);
3183 n++;
3184
3185 return n;
3186}
3187
3188static const struct i915_oa_reg b_counter_config_sampler_2[] = {
3189 { _MMIO(0x2740), 0x00000000 },
3190 { _MMIO(0x2744), 0x00800000 },
3191 { _MMIO(0x2710), 0x00000000 },
3192 { _MMIO(0x2714), 0x70800000 },
3193 { _MMIO(0x2720), 0x00000000 },
3194 { _MMIO(0x2724), 0x00800000 },
3195 { _MMIO(0x2770), 0x0000c000 },
3196 { _MMIO(0x2774), 0x0000e7ff },
3197 { _MMIO(0x2778), 0x00003000 },
3198 { _MMIO(0x277c), 0x0000f9ff },
3199 { _MMIO(0x2780), 0x00000c00 },
3200 { _MMIO(0x2784), 0x0000fe7f },
3201};
3202
3203static const struct i915_oa_reg flex_eu_config_sampler_2[] = {
3204 { _MMIO(0xe458), 0x00005004 },
3205 { _MMIO(0xe558), 0x00010003 },
3206 { _MMIO(0xe658), 0x00012011 },
3207 { _MMIO(0xe758), 0x00015014 },
3208 { _MMIO(0xe45c), 0x00051050 },
3209 { _MMIO(0xe55c), 0x00053052 },
3210 { _MMIO(0xe65c), 0x00055054 },
3211};
3212
3213static const struct i915_oa_reg mux_config_sampler_2[] = {
3214 { _MMIO(0x9888), 0x18121400 },
3215 { _MMIO(0x9888), 0x141500ab },
3216 { _MMIO(0x9888), 0x18321400 },
3217 { _MMIO(0x9888), 0x143500ab },
3218 { _MMIO(0x9888), 0x18521400 },
3219 { _MMIO(0x9888), 0x145500ab },
3220 { _MMIO(0x9888), 0x0c5c8000 },
3221 { _MMIO(0x9888), 0x0e5c4000 },
3222 { _MMIO(0x9888), 0x025cc000 },
3223 { _MMIO(0x9888), 0x045cc000 },
3224 { _MMIO(0x9888), 0x1a3d00a0 },
3225 { _MMIO(0x9888), 0x0a3d8000 },
3226 { _MMIO(0x9888), 0x0c588000 },
3227 { _MMIO(0x9888), 0x0e584000 },
3228 { _MMIO(0x9888), 0x04588000 },
3229 { _MMIO(0x9888), 0x1a5b0050 },
3230 { _MMIO(0x9888), 0x045b8000 },
3231 { _MMIO(0x9888), 0x065b8000 },
3232 { _MMIO(0x9888), 0x085b8000 },
3233 { _MMIO(0x9888), 0x0a5b4000 },
3234 { _MMIO(0x9888), 0x101f02a0 },
3235 { _MMIO(0x9888), 0x0c1fa000 },
3236 { _MMIO(0x9888), 0x0e1f00aa },
3237 { _MMIO(0x9888), 0x18382500 },
3238 { _MMIO(0x9888), 0x02388000 },
3239 { _MMIO(0x9888), 0x04384000 },
3240 { _MMIO(0x9888), 0x06384000 },
3241 { _MMIO(0x9888), 0x08384000 },
3242 { _MMIO(0x9888), 0x0a384000 },
3243 { _MMIO(0x9888), 0x0c388000 },
3244 { _MMIO(0x9888), 0x0c398000 },
3245 { _MMIO(0x9888), 0x0e39a000 },
3246 { _MMIO(0x9888), 0x00398000 },
3247 { _MMIO(0x9888), 0x0239a000 },
3248 { _MMIO(0x9888), 0x0439a000 },
3249 { _MMIO(0x9888), 0x06392000 },
3250 { _MMIO(0x9888), 0x1a3a0200 },
3251 { _MMIO(0x9888), 0x023a8000 },
3252 { _MMIO(0x9888), 0x0c3a8000 },
3253 { _MMIO(0x9888), 0x04108000 },
3254 { _MMIO(0x9888), 0x04118000 },
3255 { _MMIO(0x9888), 0x04127300 },
3256 { _MMIO(0x9888), 0x10120000 },
3257 { _MMIO(0x9888), 0x1813000a },
3258 { _MMIO(0x9888), 0x0a134000 },
3259 { _MMIO(0x9888), 0x0a146000 },
3260 { _MMIO(0x9888), 0x0c159000 },
3261 { _MMIO(0x9888), 0x0e150098 },
3262 { _MMIO(0x9888), 0x10150000 },
3263 { _MMIO(0x9888), 0x04304000 },
3264 { _MMIO(0x9888), 0x04314000 },
3265 { _MMIO(0x9888), 0x04320073 },
3266 { _MMIO(0x9888), 0x10320000 },
3267 { _MMIO(0x9888), 0x04338000 },
3268 { _MMIO(0x9888), 0x06338000 },
3269 { _MMIO(0x9888), 0x08334000 },
3270 { _MMIO(0x9888), 0x0434c000 },
3271 { _MMIO(0x9888), 0x02359890 },
3272 { _MMIO(0x9888), 0x10350000 },
3273 { _MMIO(0x9888), 0x06504000 },
3274 { _MMIO(0x9888), 0x06514000 },
3275 { _MMIO(0x9888), 0x06520073 },
3276 { _MMIO(0x9888), 0x10520000 },
3277 { _MMIO(0x9888), 0x18530020 },
3278 { _MMIO(0x9888), 0x02538000 },
3279 { _MMIO(0x9888), 0x0c534000 },
3280 { _MMIO(0x9888), 0x0a548000 },
3281 { _MMIO(0x9888), 0x04542000 },
3282 { _MMIO(0x9888), 0x0e559000 },
3283 { _MMIO(0x9888), 0x00559800 },
3284 { _MMIO(0x9888), 0x10550000 },
3285 { _MMIO(0x9888), 0x1b8aa000 },
3286 { _MMIO(0x9888), 0x1d8a0002 },
3287 { _MMIO(0x9888), 0x038a8000 },
3288 { _MMIO(0x9888), 0x058a8000 },
3289 { _MMIO(0x9888), 0x078a8000 },
3290 { _MMIO(0x9888), 0x098a8000 },
3291 { _MMIO(0x9888), 0x0b8a8000 },
3292 { _MMIO(0x9888), 0x0d8a8000 },
3293 { _MMIO(0x9888), 0x278b0015 },
3294 { _MMIO(0x9888), 0x238b2a80 },
3295 { _MMIO(0x9888), 0x258b0005 },
3296 { _MMIO(0x9888), 0x2385002a },
3297 { _MMIO(0x9888), 0x1f85aa00 },
3298 { _MMIO(0x9888), 0x2185000a },
3299 { _MMIO(0x9888), 0x1b830150 },
3300 { _MMIO(0x9888), 0x03834000 },
3301 { _MMIO(0x9888), 0x05834000 },
3302 { _MMIO(0x9888), 0x07834000 },
3303 { _MMIO(0x9888), 0x09834000 },
3304 { _MMIO(0x9888), 0x0b834000 },
3305 { _MMIO(0x9888), 0x0d834000 },
3306 { _MMIO(0x9888), 0x0d848000 },
3307 { _MMIO(0x9888), 0x0f84c000 },
3308 { _MMIO(0x9888), 0x01848000 },
3309 { _MMIO(0x9888), 0x0384c000 },
3310 { _MMIO(0x9888), 0x0584c000 },
3311 { _MMIO(0x9888), 0x07844000 },
3312 { _MMIO(0x9888), 0x1d808000 },
3313 { _MMIO(0x9888), 0x1f80c000 },
3314 { _MMIO(0x9888), 0x11808000 },
3315 { _MMIO(0x9888), 0x1380c000 },
3316 { _MMIO(0x9888), 0x1580c000 },
3317 { _MMIO(0x9888), 0x17804000 },
3318 { _MMIO(0x9888), 0x53800000 },
3319 { _MMIO(0x9888), 0x47801021 },
3320 { _MMIO(0x9888), 0x21800000 },
3321 { _MMIO(0x9888), 0x31800000 },
3322 { _MMIO(0x9888), 0x4d800000 },
3323 { _MMIO(0x9888), 0x3f800c64 },
3324 { _MMIO(0x9888), 0x4f800000 },
3325 { _MMIO(0x9888), 0x41800c02 },
3326};
3327
3328static int
3329get_sampler_2_mux_config(struct drm_i915_private *dev_priv,
3330 const struct i915_oa_reg **regs,
3331 int *lens)
3332{
3333 int n = 0;
3334
3335 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
3336 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
3337
3338 regs[n] = mux_config_sampler_2;
3339 lens[n] = ARRAY_SIZE(mux_config_sampler_2);
3340 n++;
3341
3342 return n;
3343}
3344
3345static const struct i915_oa_reg b_counter_config_tdl_1[] = {
3346 { _MMIO(0x2740), 0x00000000 },
3347 { _MMIO(0x2744), 0x00800000 },
3348 { _MMIO(0x2710), 0x00000000 },
3349 { _MMIO(0x2714), 0xf0800000 },
3350 { _MMIO(0x2720), 0x00000000 },
3351 { _MMIO(0x2724), 0x30800000 },
3352 { _MMIO(0x2770), 0x00000002 },
3353 { _MMIO(0x2774), 0x0000fdff },
3354 { _MMIO(0x2778), 0x00000000 },
3355 { _MMIO(0x277c), 0x0000fe7f },
3356 { _MMIO(0x2780), 0x00000002 },
3357 { _MMIO(0x2784), 0x0000ffbf },
3358 { _MMIO(0x2788), 0x00000000 },
3359 { _MMIO(0x278c), 0x0000ffcf },
3360 { _MMIO(0x2790), 0x00000002 },
3361 { _MMIO(0x2794), 0x0000fff7 },
3362 { _MMIO(0x2798), 0x00000000 },
3363 { _MMIO(0x279c), 0x0000fff9 },
3364};
3365
3366static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
3367 { _MMIO(0xe458), 0x00005004 },
3368 { _MMIO(0xe558), 0x00010003 },
3369 { _MMIO(0xe658), 0x00012011 },
3370 { _MMIO(0xe758), 0x00015014 },
3371 { _MMIO(0xe45c), 0x00051050 },
3372 { _MMIO(0xe55c), 0x00053052 },
3373 { _MMIO(0xe65c), 0x00055054 },
3374};
3375
3376static const struct i915_oa_reg mux_config_tdl_1[] = {
3377 { _MMIO(0x9888), 0x16154d60 },
3378 { _MMIO(0x9888), 0x16352e60 },
3379 { _MMIO(0x9888), 0x16554d60 },
3380 { _MMIO(0x9888), 0x16950000 },
3381 { _MMIO(0x9888), 0x16b50000 },
3382 { _MMIO(0x9888), 0x16d50000 },
3383 { _MMIO(0x9888), 0x005c8000 },
3384 { _MMIO(0x9888), 0x045cc000 },
3385 { _MMIO(0x9888), 0x065c4000 },
3386 { _MMIO(0x9888), 0x083d8000 },
3387 { _MMIO(0x9888), 0x0a3d8000 },
3388 { _MMIO(0x9888), 0x0458c000 },
3389 { _MMIO(0x9888), 0x025b8000 },
3390 { _MMIO(0x9888), 0x085b4000 },
3391 { _MMIO(0x9888), 0x0a5b4000 },
3392 { _MMIO(0x9888), 0x0c5b8000 },
3393 { _MMIO(0x9888), 0x0c1fa000 },
3394 { _MMIO(0x9888), 0x0e1f00aa },
3395 { _MMIO(0x9888), 0x02384000 },
3396 { _MMIO(0x9888), 0x04388000 },
3397 { _MMIO(0x9888), 0x06388000 },
3398 { _MMIO(0x9888), 0x08384000 },
3399 { _MMIO(0x9888), 0x0a384000 },
3400 { _MMIO(0x9888), 0x0c384000 },
3401 { _MMIO(0x9888), 0x00398000 },
3402 { _MMIO(0x9888), 0x0239a000 },
3403 { _MMIO(0x9888), 0x0439a000 },
3404 { _MMIO(0x9888), 0x06392000 },
3405 { _MMIO(0x9888), 0x043a8000 },
3406 { _MMIO(0x9888), 0x063a8000 },
3407 { _MMIO(0x9888), 0x08138000 },
3408 { _MMIO(0x9888), 0x0a138000 },
3409 { _MMIO(0x9888), 0x06143000 },
3410 { _MMIO(0x9888), 0x0415cfc7 },
3411 { _MMIO(0x9888), 0x10150000 },
3412 { _MMIO(0x9888), 0x02338000 },
3413 { _MMIO(0x9888), 0x0c338000 },
3414 { _MMIO(0x9888), 0x04342000 },
3415 { _MMIO(0x9888), 0x06344000 },
3416 { _MMIO(0x9888), 0x0035c700 },
3417 { _MMIO(0x9888), 0x063500cf },
3418 { _MMIO(0x9888), 0x10350000 },
3419 { _MMIO(0x9888), 0x04538000 },
3420 { _MMIO(0x9888), 0x06538000 },
3421 { _MMIO(0x9888), 0x0454c000 },
3422 { _MMIO(0x9888), 0x0255cfc7 },
3423 { _MMIO(0x9888), 0x10550000 },
3424 { _MMIO(0x9888), 0x06dc8000 },
3425 { _MMIO(0x9888), 0x08dc4000 },
3426 { _MMIO(0x9888), 0x0cdcc000 },
3427 { _MMIO(0x9888), 0x0edcc000 },
3428 { _MMIO(0x9888), 0x1abd00a8 },
3429 { _MMIO(0x9888), 0x0cd8c000 },
3430 { _MMIO(0x9888), 0x0ed84000 },
3431 { _MMIO(0x9888), 0x0edb8000 },
3432 { _MMIO(0x9888), 0x18db0800 },
3433 { _MMIO(0x9888), 0x1adb0254 },
3434 { _MMIO(0x9888), 0x0e9faa00 },
3435 { _MMIO(0x9888), 0x109f02aa },
3436 { _MMIO(0x9888), 0x0eb84000 },
3437 { _MMIO(0x9888), 0x16b84000 },
3438 { _MMIO(0x9888), 0x18b8156a },
3439 { _MMIO(0x9888), 0x06b98000 },
3440 { _MMIO(0x9888), 0x08b9a000 },
3441 { _MMIO(0x9888), 0x0ab9a000 },
3442 { _MMIO(0x9888), 0x0cb9a000 },
3443 { _MMIO(0x9888), 0x0eb9a000 },
3444 { _MMIO(0x9888), 0x18baa000 },
3445 { _MMIO(0x9888), 0x1aba0002 },
3446 { _MMIO(0x9888), 0x16934000 },
3447 { _MMIO(0x9888), 0x1893000a },
3448 { _MMIO(0x9888), 0x0a947000 },
3449 { _MMIO(0x9888), 0x0c95c5c1 },
3450 { _MMIO(0x9888), 0x0e9500c3 },
3451 { _MMIO(0x9888), 0x10950000 },
3452 { _MMIO(0x9888), 0x0eb38000 },
3453 { _MMIO(0x9888), 0x16b30040 },
3454 { _MMIO(0x9888), 0x18b30020 },
3455 { _MMIO(0x9888), 0x06b48000 },
3456 { _MMIO(0x9888), 0x08b41000 },
3457 { _MMIO(0x9888), 0x0ab48000 },
3458 { _MMIO(0x9888), 0x06b5c500 },
3459 { _MMIO(0x9888), 0x08b500c3 },
3460 { _MMIO(0x9888), 0x0eb5c100 },
3461 { _MMIO(0x9888), 0x10b50000 },
3462 { _MMIO(0x9888), 0x16d31500 },
3463 { _MMIO(0x9888), 0x08d4e000 },
3464 { _MMIO(0x9888), 0x08d5c100 },
3465 { _MMIO(0x9888), 0x0ad5c3c5 },
3466 { _MMIO(0x9888), 0x10d50000 },
3467 { _MMIO(0x9888), 0x0d88f800 },
3468 { _MMIO(0x9888), 0x0f88000f },
3469 { _MMIO(0x9888), 0x038a8000 },
3470 { _MMIO(0x9888), 0x058a8000 },
3471 { _MMIO(0x9888), 0x078a8000 },
3472 { _MMIO(0x9888), 0x098a8000 },
3473 { _MMIO(0x9888), 0x0b8a8000 },
3474 { _MMIO(0x9888), 0x0d8a8000 },
3475 { _MMIO(0x9888), 0x258baaa5 },
3476 { _MMIO(0x9888), 0x278b002a },
3477 { _MMIO(0x9888), 0x238b2a80 },
3478 { _MMIO(0x9888), 0x0f8c4000 },
3479 { _MMIO(0x9888), 0x178c2000 },
3480 { _MMIO(0x9888), 0x198c5500 },
3481 { _MMIO(0x9888), 0x1b8c0015 },
3482 { _MMIO(0x9888), 0x078d8000 },
3483 { _MMIO(0x9888), 0x098da000 },
3484 { _MMIO(0x9888), 0x0b8da000 },
3485 { _MMIO(0x9888), 0x0d8da000 },
3486 { _MMIO(0x9888), 0x0f8da000 },
3487 { _MMIO(0x9888), 0x2185aaaa },
3488 { _MMIO(0x9888), 0x2385002a },
3489 { _MMIO(0x9888), 0x1f85aa00 },
3490 { _MMIO(0x9888), 0x0f834000 },
3491 { _MMIO(0x9888), 0x19835400 },
3492 { _MMIO(0x9888), 0x1b830155 },
3493 { _MMIO(0x9888), 0x03834000 },
3494 { _MMIO(0x9888), 0x05834000 },
3495 { _MMIO(0x9888), 0x07834000 },
3496 { _MMIO(0x9888), 0x09834000 },
3497 { _MMIO(0x9888), 0x0b834000 },
3498 { _MMIO(0x9888), 0x0d834000 },
3499 { _MMIO(0x9888), 0x0784c000 },
3500 { _MMIO(0x9888), 0x0984c000 },
3501 { _MMIO(0x9888), 0x0b84c000 },
3502 { _MMIO(0x9888), 0x0d84c000 },
3503 { _MMIO(0x9888), 0x0f84c000 },
3504 { _MMIO(0x9888), 0x01848000 },
3505 { _MMIO(0x9888), 0x0384c000 },
3506 { _MMIO(0x9888), 0x0584c000 },
3507 { _MMIO(0x9888), 0x1780c000 },
3508 { _MMIO(0x9888), 0x1980c000 },
3509 { _MMIO(0x9888), 0x1b80c000 },
3510 { _MMIO(0x9888), 0x1d80c000 },
3511 { _MMIO(0x9888), 0x1f80c000 },
3512 { _MMIO(0x9888), 0x11808000 },
3513 { _MMIO(0x9888), 0x1380c000 },
3514 { _MMIO(0x9888), 0x1580c000 },
3515 { _MMIO(0x9888), 0x4f800000 },
3516 { _MMIO(0x9888), 0x43800c42 },
3517 { _MMIO(0x9888), 0x51800000 },
3518 { _MMIO(0x9888), 0x45800063 },
3519 { _MMIO(0x9888), 0x53800000 },
3520 { _MMIO(0x9888), 0x47800800 },
3521 { _MMIO(0x9888), 0x21800000 },
3522 { _MMIO(0x9888), 0x31800000 },
3523 { _MMIO(0x9888), 0x4d800000 },
3524 { _MMIO(0x9888), 0x3f8014a4 },
3525 { _MMIO(0x9888), 0x41801042 },
3526};
3527
3528static int
3529get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
3530 const struct i915_oa_reg **regs,
3531 int *lens)
3532{
3533 int n = 0;
3534
3535 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
3536 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
3537
3538 regs[n] = mux_config_tdl_1;
3539 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
3540 n++;
3541
3542 return n;
3543}
3544
3545static const struct i915_oa_reg b_counter_config_tdl_2[] = {
3546 { _MMIO(0x2740), 0x00000000 },
3547 { _MMIO(0x2744), 0x00800000 },
3548 { _MMIO(0x2710), 0x00000000 },
3549 { _MMIO(0x2714), 0xf0800000 },
3550 { _MMIO(0x2720), 0x00000000 },
3551 { _MMIO(0x2724), 0x30800000 },
3552 { _MMIO(0x2770), 0x00000002 },
3553 { _MMIO(0x2774), 0x0000fdff },
3554 { _MMIO(0x2778), 0x00000000 },
3555 { _MMIO(0x277c), 0x0000fe7f },
3556 { _MMIO(0x2780), 0x00000000 },
3557 { _MMIO(0x2784), 0x0000ff9f },
3558 { _MMIO(0x2788), 0x00000000 },
3559 { _MMIO(0x278c), 0x0000ffe7 },
3560 { _MMIO(0x2790), 0x00000002 },
3561 { _MMIO(0x2794), 0x0000fffb },
3562 { _MMIO(0x2798), 0x00000002 },
3563 { _MMIO(0x279c), 0x0000fffd },
3564};
3565
3566static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
3567 { _MMIO(0xe458), 0x00005004 },
3568 { _MMIO(0xe558), 0x00010003 },
3569 { _MMIO(0xe658), 0x00012011 },
3570 { _MMIO(0xe758), 0x00015014 },
3571 { _MMIO(0xe45c), 0x00051050 },
3572 { _MMIO(0xe55c), 0x00053052 },
3573 { _MMIO(0xe65c), 0x00055054 },
3574};
3575
3576static const struct i915_oa_reg mux_config_tdl_2[] = {
3577 { _MMIO(0x9888), 0x16150000 },
3578 { _MMIO(0x9888), 0x16350000 },
3579 { _MMIO(0x9888), 0x16550000 },
3580 { _MMIO(0x9888), 0x16952e60 },
3581 { _MMIO(0x9888), 0x16b54d60 },
3582 { _MMIO(0x9888), 0x16d52e60 },
3583 { _MMIO(0x9888), 0x065c8000 },
3584 { _MMIO(0x9888), 0x085cc000 },
3585 { _MMIO(0x9888), 0x0a5cc000 },
3586 { _MMIO(0x9888), 0x0c5c4000 },
3587 { _MMIO(0x9888), 0x0e3d8000 },
3588 { _MMIO(0x9888), 0x183da000 },
3589 { _MMIO(0x9888), 0x06588000 },
3590 { _MMIO(0x9888), 0x08588000 },
3591 { _MMIO(0x9888), 0x0a584000 },
3592 { _MMIO(0x9888), 0x0e5b4000 },
3593 { _MMIO(0x9888), 0x185b5800 },
3594 { _MMIO(0x9888), 0x1a5b000a },
3595 { _MMIO(0x9888), 0x0e1faa00 },
3596 { _MMIO(0x9888), 0x101f02aa },
3597 { _MMIO(0x9888), 0x0e384000 },
3598 { _MMIO(0x9888), 0x16384000 },
3599 { _MMIO(0x9888), 0x18382a55 },
3600 { _MMIO(0x9888), 0x06398000 },
3601 { _MMIO(0x9888), 0x0839a000 },
3602 { _MMIO(0x9888), 0x0a39a000 },
3603 { _MMIO(0x9888), 0x0c39a000 },
3604 { _MMIO(0x9888), 0x0e39a000 },
3605 { _MMIO(0x9888), 0x1a3a02a0 },
3606 { _MMIO(0x9888), 0x0e138000 },
3607 { _MMIO(0x9888), 0x16130500 },
3608 { _MMIO(0x9888), 0x06148000 },
3609 { _MMIO(0x9888), 0x08146000 },
3610 { _MMIO(0x9888), 0x0615c100 },
3611 { _MMIO(0x9888), 0x0815c500 },
3612 { _MMIO(0x9888), 0x0a1500c3 },
3613 { _MMIO(0x9888), 0x10150000 },
3614 { _MMIO(0x9888), 0x16335040 },
3615 { _MMIO(0x9888), 0x08349000 },
3616 { _MMIO(0x9888), 0x0a341000 },
3617 { _MMIO(0x9888), 0x083500c1 },
3618 { _MMIO(0x9888), 0x0a35c500 },
3619 { _MMIO(0x9888), 0x0c3500c3 },
3620 { _MMIO(0x9888), 0x10350000 },
3621 { _MMIO(0x9888), 0x1853002a },
3622 { _MMIO(0x9888), 0x0a54e000 },
3623 { _MMIO(0x9888), 0x0c55c500 },
3624 { _MMIO(0x9888), 0x0e55c1c3 },
3625 { _MMIO(0x9888), 0x10550000 },
3626 { _MMIO(0x9888), 0x00dc8000 },
3627 { _MMIO(0x9888), 0x02dcc000 },
3628 { _MMIO(0x9888), 0x04dc4000 },
3629 { _MMIO(0x9888), 0x04bd8000 },
3630 { _MMIO(0x9888), 0x06bd8000 },
3631 { _MMIO(0x9888), 0x02d8c000 },
3632 { _MMIO(0x9888), 0x02db8000 },
3633 { _MMIO(0x9888), 0x04db4000 },
3634 { _MMIO(0x9888), 0x06db4000 },
3635 { _MMIO(0x9888), 0x08db8000 },
3636 { _MMIO(0x9888), 0x0c9fa000 },
3637 { _MMIO(0x9888), 0x0e9f00aa },
3638 { _MMIO(0x9888), 0x02b84000 },
3639 { _MMIO(0x9888), 0x04b84000 },
3640 { _MMIO(0x9888), 0x06b84000 },
3641 { _MMIO(0x9888), 0x08b84000 },
3642 { _MMIO(0x9888), 0x0ab88000 },
3643 { _MMIO(0x9888), 0x0cb88000 },
3644 { _MMIO(0x9888), 0x00b98000 },
3645 { _MMIO(0x9888), 0x02b9a000 },
3646 { _MMIO(0x9888), 0x04b9a000 },
3647 { _MMIO(0x9888), 0x06b92000 },
3648 { _MMIO(0x9888), 0x0aba8000 },
3649 { _MMIO(0x9888), 0x0cba8000 },
3650 { _MMIO(0x9888), 0x04938000 },
3651 { _MMIO(0x9888), 0x06938000 },
3652 { _MMIO(0x9888), 0x0494c000 },
3653 { _MMIO(0x9888), 0x0295cfc7 },
3654 { _MMIO(0x9888), 0x10950000 },
3655 { _MMIO(0x9888), 0x02b38000 },
3656 { _MMIO(0x9888), 0x08b38000 },
3657 { _MMIO(0x9888), 0x04b42000 },
3658 { _MMIO(0x9888), 0x06b41000 },
3659 { _MMIO(0x9888), 0x00b5c700 },
3660 { _MMIO(0x9888), 0x04b500cf },
3661 { _MMIO(0x9888), 0x10b50000 },
3662 { _MMIO(0x9888), 0x0ad38000 },
3663 { _MMIO(0x9888), 0x0cd38000 },
3664 { _MMIO(0x9888), 0x06d46000 },
3665 { _MMIO(0x9888), 0x04d5c700 },
3666 { _MMIO(0x9888), 0x06d500cf },
3667 { _MMIO(0x9888), 0x10d50000 },
3668 { _MMIO(0x9888), 0x03888000 },
3669 { _MMIO(0x9888), 0x05888000 },
3670 { _MMIO(0x9888), 0x07888000 },
3671 { _MMIO(0x9888), 0x09888000 },
3672 { _MMIO(0x9888), 0x0b888000 },
3673 { _MMIO(0x9888), 0x0d880400 },
3674 { _MMIO(0x9888), 0x0f8a8000 },
3675 { _MMIO(0x9888), 0x198a8000 },
3676 { _MMIO(0x9888), 0x1b8aaaa0 },
3677 { _MMIO(0x9888), 0x1d8a0002 },
3678 { _MMIO(0x9888), 0x258b555a },
3679 { _MMIO(0x9888), 0x278b0015 },
3680 { _MMIO(0x9888), 0x238b5500 },
3681 { _MMIO(0x9888), 0x038c4000 },
3682 { _MMIO(0x9888), 0x058c4000 },
3683 { _MMIO(0x9888), 0x078c4000 },
3684 { _MMIO(0x9888), 0x098c4000 },
3685 { _MMIO(0x9888), 0x0b8c4000 },
3686 { _MMIO(0x9888), 0x0d8c4000 },
3687 { _MMIO(0x9888), 0x018d8000 },
3688 { _MMIO(0x9888), 0x038da000 },
3689 { _MMIO(0x9888), 0x058da000 },
3690 { _MMIO(0x9888), 0x078d2000 },
3691 { _MMIO(0x9888), 0x2185aaaa },
3692 { _MMIO(0x9888), 0x2385002a },
3693 { _MMIO(0x9888), 0x1f85aa00 },
3694 { _MMIO(0x9888), 0x0f834000 },
3695 { _MMIO(0x9888), 0x19835400 },
3696 { _MMIO(0x9888), 0x1b830155 },
3697 { _MMIO(0x9888), 0x03834000 },
3698 { _MMIO(0x9888), 0x05834000 },
3699 { _MMIO(0x9888), 0x07834000 },
3700 { _MMIO(0x9888), 0x09834000 },
3701 { _MMIO(0x9888), 0x0b834000 },
3702 { _MMIO(0x9888), 0x0d834000 },
3703 { _MMIO(0x9888), 0x0784c000 },
3704 { _MMIO(0x9888), 0x0984c000 },
3705 { _MMIO(0x9888), 0x0b84c000 },
3706 { _MMIO(0x9888), 0x0d84c000 },
3707 { _MMIO(0x9888), 0x0f84c000 },
3708 { _MMIO(0x9888), 0x01848000 },
3709 { _MMIO(0x9888), 0x0384c000 },
3710 { _MMIO(0x9888), 0x0584c000 },
3711 { _MMIO(0x9888), 0x1780c000 },
3712 { _MMIO(0x9888), 0x1980c000 },
3713 { _MMIO(0x9888), 0x1b80c000 },
3714 { _MMIO(0x9888), 0x1d80c000 },
3715 { _MMIO(0x9888), 0x1f80c000 },
3716 { _MMIO(0x9888), 0x11808000 },
3717 { _MMIO(0x9888), 0x1380c000 },
3718 { _MMIO(0x9888), 0x1580c000 },
3719 { _MMIO(0x9888), 0x4f800000 },
3720 { _MMIO(0x9888), 0x43800882 },
3721 { _MMIO(0x9888), 0x51800000 },
3722 { _MMIO(0x9888), 0x45801082 },
3723 { _MMIO(0x9888), 0x53800000 },
3724 { _MMIO(0x9888), 0x478014a5 },
3725 { _MMIO(0x9888), 0x21800000 },
3726 { _MMIO(0x9888), 0x31800000 },
3727 { _MMIO(0x9888), 0x4d800000 },
3728 { _MMIO(0x9888), 0x3f800002 },
3729 { _MMIO(0x9888), 0x41800c62 },
3730};
3731
3732static int
3733get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
3734 const struct i915_oa_reg **regs,
3735 int *lens)
3736{
3737 int n = 0;
3738
3739 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
3740 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
3741
3742 regs[n] = mux_config_tdl_2;
3743 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
3744 n++;
3745
3746 return n;
3747}
3748
3749static const struct i915_oa_reg b_counter_config_compute_extra[] = {
3750 { _MMIO(0x2740), 0x00000000 },
3751 { _MMIO(0x2744), 0x00800000 },
3752 { _MMIO(0x2710), 0x00000000 },
3753 { _MMIO(0x2714), 0x00800000 },
3754 { _MMIO(0x2720), 0x00000000 },
3755 { _MMIO(0x2724), 0x00800000 },
3756};
3757
3758static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
3759 { _MMIO(0xe458), 0x00001000 },
3760 { _MMIO(0xe558), 0x00003002 },
3761 { _MMIO(0xe658), 0x00005004 },
3762 { _MMIO(0xe758), 0x00011010 },
3763 { _MMIO(0xe45c), 0x00050012 },
3764 { _MMIO(0xe55c), 0x00052051 },
3765 { _MMIO(0xe65c), 0x00000008 },
3766};
3767
3768static const struct i915_oa_reg mux_config_compute_extra[] = {
3769 { _MMIO(0x9888), 0x161503e0 },
3770 { _MMIO(0x9888), 0x163503e0 },
3771 { _MMIO(0x9888), 0x165503e0 },
3772 { _MMIO(0x9888), 0x169503e0 },
3773 { _MMIO(0x9888), 0x16b503e0 },
3774 { _MMIO(0x9888), 0x16d503e0 },
3775 { _MMIO(0x9888), 0x045cc000 },
3776 { _MMIO(0x9888), 0x083d8000 },
3777 { _MMIO(0x9888), 0x04584000 },
3778 { _MMIO(0x9888), 0x085b4000 },
3779 { _MMIO(0x9888), 0x0a5b8000 },
3780 { _MMIO(0x9888), 0x0e1f00a8 },
3781 { _MMIO(0x9888), 0x08384000 },
3782 { _MMIO(0x9888), 0x0a384000 },
3783 { _MMIO(0x9888), 0x0c388000 },
3784 { _MMIO(0x9888), 0x0439a000 },
3785 { _MMIO(0x9888), 0x06392000 },
3786 { _MMIO(0x9888), 0x0c3a8000 },
3787 { _MMIO(0x9888), 0x08138000 },
3788 { _MMIO(0x9888), 0x06141000 },
3789 { _MMIO(0x9888), 0x041500c3 },
3790 { _MMIO(0x9888), 0x10150000 },
3791 { _MMIO(0x9888), 0x0a338000 },
3792 { _MMIO(0x9888), 0x06342000 },
3793 { _MMIO(0x9888), 0x0435c300 },
3794 { _MMIO(0x9888), 0x10350000 },
3795 { _MMIO(0x9888), 0x0c538000 },
3796 { _MMIO(0x9888), 0x06544000 },
3797 { _MMIO(0x9888), 0x065500c3 },
3798 { _MMIO(0x9888), 0x10550000 },
3799 { _MMIO(0x9888), 0x00dc8000 },
3800 { _MMIO(0x9888), 0x02dc4000 },
3801 { _MMIO(0x9888), 0x02bd8000 },
3802 { _MMIO(0x9888), 0x00d88000 },
3803 { _MMIO(0x9888), 0x02db4000 },
3804 { _MMIO(0x9888), 0x04db8000 },
3805 { _MMIO(0x9888), 0x0c9fa000 },
3806 { _MMIO(0x9888), 0x0e9f0002 },
3807 { _MMIO(0x9888), 0x02b84000 },
3808 { _MMIO(0x9888), 0x04b84000 },
3809 { _MMIO(0x9888), 0x06b88000 },
3810 { _MMIO(0x9888), 0x00b98000 },
3811 { _MMIO(0x9888), 0x02b9a000 },
3812 { _MMIO(0x9888), 0x06ba8000 },
3813 { _MMIO(0x9888), 0x02938000 },
3814 { _MMIO(0x9888), 0x04942000 },
3815 { _MMIO(0x9888), 0x0095c300 },
3816 { _MMIO(0x9888), 0x10950000 },
3817 { _MMIO(0x9888), 0x04b38000 },
3818 { _MMIO(0x9888), 0x04b44000 },
3819 { _MMIO(0x9888), 0x02b500c3 },
3820 { _MMIO(0x9888), 0x10b50000 },
3821 { _MMIO(0x9888), 0x06d38000 },
3822 { _MMIO(0x9888), 0x04d48000 },
3823 { _MMIO(0x9888), 0x02d5c300 },
3824 { _MMIO(0x9888), 0x10d50000 },
3825 { _MMIO(0x9888), 0x03888000 },
3826 { _MMIO(0x9888), 0x05888000 },
3827 { _MMIO(0x9888), 0x07888000 },
3828 { _MMIO(0x9888), 0x098a8000 },
3829 { _MMIO(0x9888), 0x0b8a8000 },
3830 { _MMIO(0x9888), 0x0d8a8000 },
3831 { _MMIO(0x9888), 0x238b3500 },
3832 { _MMIO(0x9888), 0x258b0005 },
3833 { _MMIO(0x9888), 0x038c4000 },
3834 { _MMIO(0x9888), 0x058c4000 },
3835 { _MMIO(0x9888), 0x078c4000 },
3836 { _MMIO(0x9888), 0x018d8000 },
3837 { _MMIO(0x9888), 0x038da000 },
3838 { _MMIO(0x9888), 0x1f85aa00 },
3839 { _MMIO(0x9888), 0x2185000a },
3840 { _MMIO(0x9888), 0x03834000 },
3841 { _MMIO(0x9888), 0x05834000 },
3842 { _MMIO(0x9888), 0x07834000 },
3843 { _MMIO(0x9888), 0x09834000 },
3844 { _MMIO(0x9888), 0x0b834000 },
3845 { _MMIO(0x9888), 0x0d834000 },
3846 { _MMIO(0x9888), 0x01848000 },
3847 { _MMIO(0x9888), 0x0384c000 },
3848 { _MMIO(0x9888), 0x0584c000 },
3849 { _MMIO(0x9888), 0x07844000 },
3850 { _MMIO(0x9888), 0x11808000 },
3851 { _MMIO(0x9888), 0x1380c000 },
3852 { _MMIO(0x9888), 0x1580c000 },
3853 { _MMIO(0x9888), 0x17804000 },
3854 { _MMIO(0x9888), 0x21800000 },
3855 { _MMIO(0x9888), 0x4d800000 },
3856 { _MMIO(0x9888), 0x3f800c40 },
3857 { _MMIO(0x9888), 0x4f800000 },
3858 { _MMIO(0x9888), 0x41801482 },
3859 { _MMIO(0x9888), 0x31800000 },
3860};
3861
3862static int
3863get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
3864 const struct i915_oa_reg **regs,
3865 int *lens)
3866{
3867 int n = 0;
3868
3869 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
3870 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
3871
3872 regs[n] = mux_config_compute_extra;
3873 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
3874 n++;
3875
3876 return n;
3877}
3878
3879static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
3880 { _MMIO(0x2740), 0x00000000 },
3881 { _MMIO(0x2710), 0x00000000 },
3882 { _MMIO(0x2714), 0xf0800000 },
3883 { _MMIO(0x2720), 0x00000000 },
3884 { _MMIO(0x2724), 0x30800000 },
3885 { _MMIO(0x2770), 0x00100030 },
3886 { _MMIO(0x2774), 0x0000fff9 },
3887 { _MMIO(0x2778), 0x00000002 },
3888 { _MMIO(0x277c), 0x0000fffc },
3889 { _MMIO(0x2780), 0x00000002 },
3890 { _MMIO(0x2784), 0x0000fff3 },
3891 { _MMIO(0x2788), 0x00100180 },
3892 { _MMIO(0x278c), 0x0000ffcf },
3893 { _MMIO(0x2790), 0x00000002 },
3894 { _MMIO(0x2794), 0x0000ffcf },
3895 { _MMIO(0x2798), 0x00000002 },
3896 { _MMIO(0x279c), 0x0000ff3f },
3897};
3898
3899static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
3900 { _MMIO(0xe458), 0x00005004 },
3901 { _MMIO(0xe558), 0x00008003 },
3902};
3903
3904static const struct i915_oa_reg mux_config_vme_pipe[] = {
3905 { _MMIO(0x9888), 0x14100812 },
3906 { _MMIO(0x9888), 0x14125800 },
3907 { _MMIO(0x9888), 0x161200c0 },
3908 { _MMIO(0x9888), 0x14300812 },
3909 { _MMIO(0x9888), 0x14325800 },
3910 { _MMIO(0x9888), 0x163200c0 },
3911 { _MMIO(0x9888), 0x005c4000 },
3912 { _MMIO(0x9888), 0x065c8000 },
3913 { _MMIO(0x9888), 0x085cc000 },
3914 { _MMIO(0x9888), 0x0a5cc000 },
3915 { _MMIO(0x9888), 0x0c5cc000 },
3916 { _MMIO(0x9888), 0x003d8000 },
3917 { _MMIO(0x9888), 0x0e3d8000 },
3918 { _MMIO(0x9888), 0x183d2800 },
3919 { _MMIO(0x9888), 0x00584000 },
3920 { _MMIO(0x9888), 0x06588000 },
3921 { _MMIO(0x9888), 0x0858c000 },
3922 { _MMIO(0x9888), 0x005b4000 },
3923 { _MMIO(0x9888), 0x0e5b4000 },
3924 { _MMIO(0x9888), 0x185b9400 },
3925 { _MMIO(0x9888), 0x1a5b002a },
3926 { _MMIO(0x9888), 0x0c1f0800 },
3927 { _MMIO(0x9888), 0x0e1faa00 },
3928 { _MMIO(0x9888), 0x101f002a },
3929 { _MMIO(0x9888), 0x00384000 },
3930 { _MMIO(0x9888), 0x0e384000 },
3931 { _MMIO(0x9888), 0x16384000 },
3932 { _MMIO(0x9888), 0x18380155 },
3933 { _MMIO(0x9888), 0x00392000 },
3934 { _MMIO(0x9888), 0x06398000 },
3935 { _MMIO(0x9888), 0x0839a000 },
3936 { _MMIO(0x9888), 0x0a39a000 },
3937 { _MMIO(0x9888), 0x0c39a000 },
3938 { _MMIO(0x9888), 0x00100047 },
3939 { _MMIO(0x9888), 0x06101a80 },
3940 { _MMIO(0x9888), 0x10100000 },
3941 { _MMIO(0x9888), 0x0810c000 },
3942 { _MMIO(0x9888), 0x0811c000 },
3943 { _MMIO(0x9888), 0x08126151 },
3944 { _MMIO(0x9888), 0x10120000 },
3945 { _MMIO(0x9888), 0x00134000 },
3946 { _MMIO(0x9888), 0x0e134000 },
3947 { _MMIO(0x9888), 0x161300a0 },
3948 { _MMIO(0x9888), 0x0a301ac7 },
3949 { _MMIO(0x9888), 0x10300000 },
3950 { _MMIO(0x9888), 0x0c30c000 },
3951 { _MMIO(0x9888), 0x0c31c000 },
3952 { _MMIO(0x9888), 0x0c326151 },
3953 { _MMIO(0x9888), 0x10320000 },
3954 { _MMIO(0x9888), 0x16332a00 },
3955 { _MMIO(0x9888), 0x18330001 },
3956 { _MMIO(0x9888), 0x018a8000 },
3957 { _MMIO(0x9888), 0x0f8a8000 },
3958 { _MMIO(0x9888), 0x198a8000 },
3959 { _MMIO(0x9888), 0x1b8a2aa0 },
3960 { _MMIO(0x9888), 0x238b0020 },
3961 { _MMIO(0x9888), 0x258b5550 },
3962 { _MMIO(0x9888), 0x278b0001 },
3963 { _MMIO(0x9888), 0x1f850080 },
3964 { _MMIO(0x9888), 0x2185aaa0 },
3965 { _MMIO(0x9888), 0x23850002 },
3966 { _MMIO(0x9888), 0x01834000 },
3967 { _MMIO(0x9888), 0x0f834000 },
3968 { _MMIO(0x9888), 0x19835400 },
3969 { _MMIO(0x9888), 0x1b830015 },
3970 { _MMIO(0x9888), 0x01844000 },
3971 { _MMIO(0x9888), 0x07848000 },
3972 { _MMIO(0x9888), 0x0984c000 },
3973 { _MMIO(0x9888), 0x0b84c000 },
3974 { _MMIO(0x9888), 0x0d84c000 },
3975 { _MMIO(0x9888), 0x11804000 },
3976 { _MMIO(0x9888), 0x17808000 },
3977 { _MMIO(0x9888), 0x1980c000 },
3978 { _MMIO(0x9888), 0x1b80c000 },
3979 { _MMIO(0x9888), 0x1d80c000 },
3980 { _MMIO(0x9888), 0x4d800000 },
3981 { _MMIO(0x9888), 0x3d800800 },
3982 { _MMIO(0x9888), 0x4f800000 },
3983 { _MMIO(0x9888), 0x43800002 },
3984 { _MMIO(0x9888), 0x51800000 },
3985 { _MMIO(0x9888), 0x45800884 },
3986 { _MMIO(0x9888), 0x53800000 },
3987 { _MMIO(0x9888), 0x47800002 },
3988 { _MMIO(0x9888), 0x21800000 },
3989 { _MMIO(0x9888), 0x31800000 },
3990};
3991
3992static int
3993get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
3994 const struct i915_oa_reg **regs,
3995 int *lens)
3996{
3997 int n = 0;
3998
3999 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
4000 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
4001
4002 regs[n] = mux_config_vme_pipe;
4003 lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
4004 n++;
4005
4006 return n;
4007}
4008
4009static const struct i915_oa_reg b_counter_config_test_oa[] = {
4010 { _MMIO(0x2740), 0x00000000 },
4011 { _MMIO(0x2744), 0x00800000 },
4012 { _MMIO(0x2714), 0xf0800000 },
4013 { _MMIO(0x2710), 0x00000000 },
4014 { _MMIO(0x2724), 0xf0800000 },
4015 { _MMIO(0x2720), 0x00000000 },
4016 { _MMIO(0x2770), 0x00000004 },
4017 { _MMIO(0x2774), 0x00000000 },
4018 { _MMIO(0x2778), 0x00000003 },
4019 { _MMIO(0x277c), 0x00000000 },
4020 { _MMIO(0x2780), 0x00000007 },
4021 { _MMIO(0x2784), 0x00000000 },
4022 { _MMIO(0x2788), 0x00100002 },
4023 { _MMIO(0x278c), 0x0000fff7 },
4024 { _MMIO(0x2790), 0x00100002 },
4025 { _MMIO(0x2794), 0x0000ffcf },
4026 { _MMIO(0x2798), 0x00100082 },
4027 { _MMIO(0x279c), 0x0000ffef },
4028 { _MMIO(0x27a0), 0x001000c2 },
4029 { _MMIO(0x27a4), 0x0000ffe7 },
4030 { _MMIO(0x27a8), 0x00100001 },
4031 { _MMIO(0x27ac), 0x0000ffe7 },
4032};
4033
4034static const struct i915_oa_reg flex_eu_config_test_oa[] = {
4035};
4036
4037static const struct i915_oa_reg mux_config_test_oa[] = {
4038 { _MMIO(0x9888), 0x198b0000 },
4039 { _MMIO(0x9888), 0x078b0066 },
4040 { _MMIO(0x9888), 0x118b0000 },
4041 { _MMIO(0x9888), 0x258b0000 },
4042 { _MMIO(0x9888), 0x21850008 },
4043 { _MMIO(0x9888), 0x0d834000 },
4044 { _MMIO(0x9888), 0x07844000 },
4045 { _MMIO(0x9888), 0x17804000 },
4046 { _MMIO(0x9888), 0x21800000 },
4047 { _MMIO(0x9888), 0x4f800000 },
4048 { _MMIO(0x9888), 0x41800000 },
4049 { _MMIO(0x9888), 0x31800000 },
4050};
4051
4052static int
4053get_test_oa_mux_config(struct drm_i915_private *dev_priv,
4054 const struct i915_oa_reg **regs,
4055 int *lens)
4056{
4057 int n = 0;
4058
4059 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
4060 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
4061
4062 regs[n] = mux_config_test_oa;
4063 lens[n] = ARRAY_SIZE(mux_config_test_oa);
4064 n++;
4065
4066 return n;
4067}
4068
4069int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv)
4070{
4071 dev_priv->perf.oa.n_mux_configs = 0;
4072 dev_priv->perf.oa.b_counter_regs = NULL;
4073 dev_priv->perf.oa.b_counter_regs_len = 0;
4074 dev_priv->perf.oa.flex_regs = NULL;
4075 dev_priv->perf.oa.flex_regs_len = 0;
4076
4077 switch (dev_priv->perf.oa.metrics_set) {
4078 case METRIC_SET_ID_RENDER_BASIC:
4079 dev_priv->perf.oa.n_mux_configs =
4080 get_render_basic_mux_config(dev_priv,
4081 dev_priv->perf.oa.mux_regs,
4082 dev_priv->perf.oa.mux_regs_lens);
4083 if (dev_priv->perf.oa.n_mux_configs == 0) {
4084 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
4085
4086 /* EINVAL because *_register_sysfs already checked this
4087 * and so it wouldn't have been advertised to userspace and
4088 * so shouldn't have been requested
4089 */
4090 return -EINVAL;
4091 }
4092
4093 dev_priv->perf.oa.b_counter_regs =
4094 b_counter_config_render_basic;
4095 dev_priv->perf.oa.b_counter_regs_len =
4096 ARRAY_SIZE(b_counter_config_render_basic);
4097
4098 dev_priv->perf.oa.flex_regs =
4099 flex_eu_config_render_basic;
4100 dev_priv->perf.oa.flex_regs_len =
4101 ARRAY_SIZE(flex_eu_config_render_basic);
4102
4103 return 0;
4104 case METRIC_SET_ID_COMPUTE_BASIC:
4105 dev_priv->perf.oa.n_mux_configs =
4106 get_compute_basic_mux_config(dev_priv,
4107 dev_priv->perf.oa.mux_regs,
4108 dev_priv->perf.oa.mux_regs_lens);
4109 if (dev_priv->perf.oa.n_mux_configs == 0) {
4110 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
4111
4112 /* EINVAL because *_register_sysfs already checked this
4113 * and so it wouldn't have been advertised to userspace and
4114 * so shouldn't have been requested
4115 */
4116 return -EINVAL;
4117 }
4118
4119 dev_priv->perf.oa.b_counter_regs =
4120 b_counter_config_compute_basic;
4121 dev_priv->perf.oa.b_counter_regs_len =
4122 ARRAY_SIZE(b_counter_config_compute_basic);
4123
4124 dev_priv->perf.oa.flex_regs =
4125 flex_eu_config_compute_basic;
4126 dev_priv->perf.oa.flex_regs_len =
4127 ARRAY_SIZE(flex_eu_config_compute_basic);
4128
4129 return 0;
4130 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
4131 dev_priv->perf.oa.n_mux_configs =
4132 get_render_pipe_profile_mux_config(dev_priv,
4133 dev_priv->perf.oa.mux_regs,
4134 dev_priv->perf.oa.mux_regs_lens);
4135 if (dev_priv->perf.oa.n_mux_configs == 0) {
4136 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
4137
4138 /* EINVAL because *_register_sysfs already checked this
4139 * and so it wouldn't have been advertised to userspace and
4140 * so shouldn't have been requested
4141 */
4142 return -EINVAL;
4143 }
4144
4145 dev_priv->perf.oa.b_counter_regs =
4146 b_counter_config_render_pipe_profile;
4147 dev_priv->perf.oa.b_counter_regs_len =
4148 ARRAY_SIZE(b_counter_config_render_pipe_profile);
4149
4150 dev_priv->perf.oa.flex_regs =
4151 flex_eu_config_render_pipe_profile;
4152 dev_priv->perf.oa.flex_regs_len =
4153 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
4154
4155 return 0;
4156 case METRIC_SET_ID_MEMORY_READS:
4157 dev_priv->perf.oa.n_mux_configs =
4158 get_memory_reads_mux_config(dev_priv,
4159 dev_priv->perf.oa.mux_regs,
4160 dev_priv->perf.oa.mux_regs_lens);
4161 if (dev_priv->perf.oa.n_mux_configs == 0) {
4162 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
4163
4164 /* EINVAL because *_register_sysfs already checked this
4165 * and so it wouldn't have been advertised to userspace and
4166 * so shouldn't have been requested
4167 */
4168 return -EINVAL;
4169 }
4170
4171 dev_priv->perf.oa.b_counter_regs =
4172 b_counter_config_memory_reads;
4173 dev_priv->perf.oa.b_counter_regs_len =
4174 ARRAY_SIZE(b_counter_config_memory_reads);
4175
4176 dev_priv->perf.oa.flex_regs =
4177 flex_eu_config_memory_reads;
4178 dev_priv->perf.oa.flex_regs_len =
4179 ARRAY_SIZE(flex_eu_config_memory_reads);
4180
4181 return 0;
4182 case METRIC_SET_ID_MEMORY_WRITES:
4183 dev_priv->perf.oa.n_mux_configs =
4184 get_memory_writes_mux_config(dev_priv,
4185 dev_priv->perf.oa.mux_regs,
4186 dev_priv->perf.oa.mux_regs_lens);
4187 if (dev_priv->perf.oa.n_mux_configs == 0) {
4188 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
4189
4190 /* EINVAL because *_register_sysfs already checked this
4191 * and so it wouldn't have been advertised to userspace and
4192 * so shouldn't have been requested
4193 */
4194 return -EINVAL;
4195 }
4196
4197 dev_priv->perf.oa.b_counter_regs =
4198 b_counter_config_memory_writes;
4199 dev_priv->perf.oa.b_counter_regs_len =
4200 ARRAY_SIZE(b_counter_config_memory_writes);
4201
4202 dev_priv->perf.oa.flex_regs =
4203 flex_eu_config_memory_writes;
4204 dev_priv->perf.oa.flex_regs_len =
4205 ARRAY_SIZE(flex_eu_config_memory_writes);
4206
4207 return 0;
4208 case METRIC_SET_ID_COMPUTE_EXTENDED:
4209 dev_priv->perf.oa.n_mux_configs =
4210 get_compute_extended_mux_config(dev_priv,
4211 dev_priv->perf.oa.mux_regs,
4212 dev_priv->perf.oa.mux_regs_lens);
4213 if (dev_priv->perf.oa.n_mux_configs == 0) {
4214 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
4215
4216 /* EINVAL because *_register_sysfs already checked this
4217 * and so it wouldn't have been advertised to userspace and
4218 * so shouldn't have been requested
4219 */
4220 return -EINVAL;
4221 }
4222
4223 dev_priv->perf.oa.b_counter_regs =
4224 b_counter_config_compute_extended;
4225 dev_priv->perf.oa.b_counter_regs_len =
4226 ARRAY_SIZE(b_counter_config_compute_extended);
4227
4228 dev_priv->perf.oa.flex_regs =
4229 flex_eu_config_compute_extended;
4230 dev_priv->perf.oa.flex_regs_len =
4231 ARRAY_SIZE(flex_eu_config_compute_extended);
4232
4233 return 0;
4234 case METRIC_SET_ID_COMPUTE_L3_CACHE:
4235 dev_priv->perf.oa.n_mux_configs =
4236 get_compute_l3_cache_mux_config(dev_priv,
4237 dev_priv->perf.oa.mux_regs,
4238 dev_priv->perf.oa.mux_regs_lens);
4239 if (dev_priv->perf.oa.n_mux_configs == 0) {
4240 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
4241
4242 /* EINVAL because *_register_sysfs already checked this
4243 * and so it wouldn't have been advertised to userspace and
4244 * so shouldn't have been requested
4245 */
4246 return -EINVAL;
4247 }
4248
4249 dev_priv->perf.oa.b_counter_regs =
4250 b_counter_config_compute_l3_cache;
4251 dev_priv->perf.oa.b_counter_regs_len =
4252 ARRAY_SIZE(b_counter_config_compute_l3_cache);
4253
4254 dev_priv->perf.oa.flex_regs =
4255 flex_eu_config_compute_l3_cache;
4256 dev_priv->perf.oa.flex_regs_len =
4257 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
4258
4259 return 0;
4260 case METRIC_SET_ID_DATA_PORT_READS_COALESCING:
4261 dev_priv->perf.oa.n_mux_configs =
4262 get_data_port_reads_coalescing_mux_config(dev_priv,
4263 dev_priv->perf.oa.mux_regs,
4264 dev_priv->perf.oa.mux_regs_lens);
4265 if (dev_priv->perf.oa.n_mux_configs == 0) {
4266 DRM_DEBUG_DRIVER("No suitable MUX config for \"DATA_PORT_READS_COALESCING\" metric set\n");
4267
4268 /* EINVAL because *_register_sysfs already checked this
4269 * and so it wouldn't have been advertised to userspace and
4270 * so shouldn't have been requested
4271 */
4272 return -EINVAL;
4273 }
4274
4275 dev_priv->perf.oa.b_counter_regs =
4276 b_counter_config_data_port_reads_coalescing;
4277 dev_priv->perf.oa.b_counter_regs_len =
4278 ARRAY_SIZE(b_counter_config_data_port_reads_coalescing);
4279
4280 dev_priv->perf.oa.flex_regs =
4281 flex_eu_config_data_port_reads_coalescing;
4282 dev_priv->perf.oa.flex_regs_len =
4283 ARRAY_SIZE(flex_eu_config_data_port_reads_coalescing);
4284
4285 return 0;
4286 case METRIC_SET_ID_DATA_PORT_WRITES_COALESCING:
4287 dev_priv->perf.oa.n_mux_configs =
4288 get_data_port_writes_coalescing_mux_config(dev_priv,
4289 dev_priv->perf.oa.mux_regs,
4290 dev_priv->perf.oa.mux_regs_lens);
4291 if (dev_priv->perf.oa.n_mux_configs == 0) {
4292 DRM_DEBUG_DRIVER("No suitable MUX config for \"DATA_PORT_WRITES_COALESCING\" metric set\n");
4293
4294 /* EINVAL because *_register_sysfs already checked this
4295 * and so it wouldn't have been advertised to userspace and
4296 * so shouldn't have been requested
4297 */
4298 return -EINVAL;
4299 }
4300
4301 dev_priv->perf.oa.b_counter_regs =
4302 b_counter_config_data_port_writes_coalescing;
4303 dev_priv->perf.oa.b_counter_regs_len =
4304 ARRAY_SIZE(b_counter_config_data_port_writes_coalescing);
4305
4306 dev_priv->perf.oa.flex_regs =
4307 flex_eu_config_data_port_writes_coalescing;
4308 dev_priv->perf.oa.flex_regs_len =
4309 ARRAY_SIZE(flex_eu_config_data_port_writes_coalescing);
4310
4311 return 0;
4312 case METRIC_SET_ID_HDC_AND_SF:
4313 dev_priv->perf.oa.n_mux_configs =
4314 get_hdc_and_sf_mux_config(dev_priv,
4315 dev_priv->perf.oa.mux_regs,
4316 dev_priv->perf.oa.mux_regs_lens);
4317 if (dev_priv->perf.oa.n_mux_configs == 0) {
4318 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
4319
4320 /* EINVAL because *_register_sysfs already checked this
4321 * and so it wouldn't have been advertised to userspace and
4322 * so shouldn't have been requested
4323 */
4324 return -EINVAL;
4325 }
4326
4327 dev_priv->perf.oa.b_counter_regs =
4328 b_counter_config_hdc_and_sf;
4329 dev_priv->perf.oa.b_counter_regs_len =
4330 ARRAY_SIZE(b_counter_config_hdc_and_sf);
4331
4332 dev_priv->perf.oa.flex_regs =
4333 flex_eu_config_hdc_and_sf;
4334 dev_priv->perf.oa.flex_regs_len =
4335 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
4336
4337 return 0;
4338 case METRIC_SET_ID_L3_1:
4339 dev_priv->perf.oa.n_mux_configs =
4340 get_l3_1_mux_config(dev_priv,
4341 dev_priv->perf.oa.mux_regs,
4342 dev_priv->perf.oa.mux_regs_lens);
4343 if (dev_priv->perf.oa.n_mux_configs == 0) {
4344 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
4345
4346 /* EINVAL because *_register_sysfs already checked this
4347 * and so it wouldn't have been advertised to userspace and
4348 * so shouldn't have been requested
4349 */
4350 return -EINVAL;
4351 }
4352
4353 dev_priv->perf.oa.b_counter_regs =
4354 b_counter_config_l3_1;
4355 dev_priv->perf.oa.b_counter_regs_len =
4356 ARRAY_SIZE(b_counter_config_l3_1);
4357
4358 dev_priv->perf.oa.flex_regs =
4359 flex_eu_config_l3_1;
4360 dev_priv->perf.oa.flex_regs_len =
4361 ARRAY_SIZE(flex_eu_config_l3_1);
4362
4363 return 0;
4364 case METRIC_SET_ID_L3_2:
4365 dev_priv->perf.oa.n_mux_configs =
4366 get_l3_2_mux_config(dev_priv,
4367 dev_priv->perf.oa.mux_regs,
4368 dev_priv->perf.oa.mux_regs_lens);
4369 if (dev_priv->perf.oa.n_mux_configs == 0) {
4370 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
4371
4372 /* EINVAL because *_register_sysfs already checked this
4373 * and so it wouldn't have been advertised to userspace and
4374 * so shouldn't have been requested
4375 */
4376 return -EINVAL;
4377 }
4378
4379 dev_priv->perf.oa.b_counter_regs =
4380 b_counter_config_l3_2;
4381 dev_priv->perf.oa.b_counter_regs_len =
4382 ARRAY_SIZE(b_counter_config_l3_2);
4383
4384 dev_priv->perf.oa.flex_regs =
4385 flex_eu_config_l3_2;
4386 dev_priv->perf.oa.flex_regs_len =
4387 ARRAY_SIZE(flex_eu_config_l3_2);
4388
4389 return 0;
4390 case METRIC_SET_ID_L3_3:
4391 dev_priv->perf.oa.n_mux_configs =
4392 get_l3_3_mux_config(dev_priv,
4393 dev_priv->perf.oa.mux_regs,
4394 dev_priv->perf.oa.mux_regs_lens);
4395 if (dev_priv->perf.oa.n_mux_configs == 0) {
4396 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
4397
4398 /* EINVAL because *_register_sysfs already checked this
4399 * and so it wouldn't have been advertised to userspace and
4400 * so shouldn't have been requested
4401 */
4402 return -EINVAL;
4403 }
4404
4405 dev_priv->perf.oa.b_counter_regs =
4406 b_counter_config_l3_3;
4407 dev_priv->perf.oa.b_counter_regs_len =
4408 ARRAY_SIZE(b_counter_config_l3_3);
4409
4410 dev_priv->perf.oa.flex_regs =
4411 flex_eu_config_l3_3;
4412 dev_priv->perf.oa.flex_regs_len =
4413 ARRAY_SIZE(flex_eu_config_l3_3);
4414
4415 return 0;
4416 case METRIC_SET_ID_L3_4:
4417 dev_priv->perf.oa.n_mux_configs =
4418 get_l3_4_mux_config(dev_priv,
4419 dev_priv->perf.oa.mux_regs,
4420 dev_priv->perf.oa.mux_regs_lens);
4421 if (dev_priv->perf.oa.n_mux_configs == 0) {
4422 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_4\" metric set\n");
4423
4424 /* EINVAL because *_register_sysfs already checked this
4425 * and so it wouldn't have been advertised to userspace and
4426 * so shouldn't have been requested
4427 */
4428 return -EINVAL;
4429 }
4430
4431 dev_priv->perf.oa.b_counter_regs =
4432 b_counter_config_l3_4;
4433 dev_priv->perf.oa.b_counter_regs_len =
4434 ARRAY_SIZE(b_counter_config_l3_4);
4435
4436 dev_priv->perf.oa.flex_regs =
4437 flex_eu_config_l3_4;
4438 dev_priv->perf.oa.flex_regs_len =
4439 ARRAY_SIZE(flex_eu_config_l3_4);
4440
4441 return 0;
4442 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
4443 dev_priv->perf.oa.n_mux_configs =
4444 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
4445 dev_priv->perf.oa.mux_regs,
4446 dev_priv->perf.oa.mux_regs_lens);
4447 if (dev_priv->perf.oa.n_mux_configs == 0) {
4448 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
4449
4450 /* EINVAL because *_register_sysfs already checked this
4451 * and so it wouldn't have been advertised to userspace and
4452 * so shouldn't have been requested
4453 */
4454 return -EINVAL;
4455 }
4456
4457 dev_priv->perf.oa.b_counter_regs =
4458 b_counter_config_rasterizer_and_pixel_backend;
4459 dev_priv->perf.oa.b_counter_regs_len =
4460 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
4461
4462 dev_priv->perf.oa.flex_regs =
4463 flex_eu_config_rasterizer_and_pixel_backend;
4464 dev_priv->perf.oa.flex_regs_len =
4465 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
4466
4467 return 0;
4468 case METRIC_SET_ID_SAMPLER_1:
4469 dev_priv->perf.oa.n_mux_configs =
4470 get_sampler_1_mux_config(dev_priv,
4471 dev_priv->perf.oa.mux_regs,
4472 dev_priv->perf.oa.mux_regs_lens);
4473 if (dev_priv->perf.oa.n_mux_configs == 0) {
4474 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_1\" metric set\n");
4475
4476 /* EINVAL because *_register_sysfs already checked this
4477 * and so it wouldn't have been advertised to userspace and
4478 * so shouldn't have been requested
4479 */
4480 return -EINVAL;
4481 }
4482
4483 dev_priv->perf.oa.b_counter_regs =
4484 b_counter_config_sampler_1;
4485 dev_priv->perf.oa.b_counter_regs_len =
4486 ARRAY_SIZE(b_counter_config_sampler_1);
4487
4488 dev_priv->perf.oa.flex_regs =
4489 flex_eu_config_sampler_1;
4490 dev_priv->perf.oa.flex_regs_len =
4491 ARRAY_SIZE(flex_eu_config_sampler_1);
4492
4493 return 0;
4494 case METRIC_SET_ID_SAMPLER_2:
4495 dev_priv->perf.oa.n_mux_configs =
4496 get_sampler_2_mux_config(dev_priv,
4497 dev_priv->perf.oa.mux_regs,
4498 dev_priv->perf.oa.mux_regs_lens);
4499 if (dev_priv->perf.oa.n_mux_configs == 0) {
4500 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_2\" metric set\n");
4501
4502 /* EINVAL because *_register_sysfs already checked this
4503 * and so it wouldn't have been advertised to userspace and
4504 * so shouldn't have been requested
4505 */
4506 return -EINVAL;
4507 }
4508
4509 dev_priv->perf.oa.b_counter_regs =
4510 b_counter_config_sampler_2;
4511 dev_priv->perf.oa.b_counter_regs_len =
4512 ARRAY_SIZE(b_counter_config_sampler_2);
4513
4514 dev_priv->perf.oa.flex_regs =
4515 flex_eu_config_sampler_2;
4516 dev_priv->perf.oa.flex_regs_len =
4517 ARRAY_SIZE(flex_eu_config_sampler_2);
4518
4519 return 0;
4520 case METRIC_SET_ID_TDL_1:
4521 dev_priv->perf.oa.n_mux_configs =
4522 get_tdl_1_mux_config(dev_priv,
4523 dev_priv->perf.oa.mux_regs,
4524 dev_priv->perf.oa.mux_regs_lens);
4525 if (dev_priv->perf.oa.n_mux_configs == 0) {
4526 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
4527
4528 /* EINVAL because *_register_sysfs already checked this
4529 * and so it wouldn't have been advertised to userspace and
4530 * so shouldn't have been requested
4531 */
4532 return -EINVAL;
4533 }
4534
4535 dev_priv->perf.oa.b_counter_regs =
4536 b_counter_config_tdl_1;
4537 dev_priv->perf.oa.b_counter_regs_len =
4538 ARRAY_SIZE(b_counter_config_tdl_1);
4539
4540 dev_priv->perf.oa.flex_regs =
4541 flex_eu_config_tdl_1;
4542 dev_priv->perf.oa.flex_regs_len =
4543 ARRAY_SIZE(flex_eu_config_tdl_1);
4544
4545 return 0;
4546 case METRIC_SET_ID_TDL_2:
4547 dev_priv->perf.oa.n_mux_configs =
4548 get_tdl_2_mux_config(dev_priv,
4549 dev_priv->perf.oa.mux_regs,
4550 dev_priv->perf.oa.mux_regs_lens);
4551 if (dev_priv->perf.oa.n_mux_configs == 0) {
4552 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
4553
4554 /* EINVAL because *_register_sysfs already checked this
4555 * and so it wouldn't have been advertised to userspace and
4556 * so shouldn't have been requested
4557 */
4558 return -EINVAL;
4559 }
4560
4561 dev_priv->perf.oa.b_counter_regs =
4562 b_counter_config_tdl_2;
4563 dev_priv->perf.oa.b_counter_regs_len =
4564 ARRAY_SIZE(b_counter_config_tdl_2);
4565
4566 dev_priv->perf.oa.flex_regs =
4567 flex_eu_config_tdl_2;
4568 dev_priv->perf.oa.flex_regs_len =
4569 ARRAY_SIZE(flex_eu_config_tdl_2);
4570
4571 return 0;
4572 case METRIC_SET_ID_COMPUTE_EXTRA:
4573 dev_priv->perf.oa.n_mux_configs =
4574 get_compute_extra_mux_config(dev_priv,
4575 dev_priv->perf.oa.mux_regs,
4576 dev_priv->perf.oa.mux_regs_lens);
4577 if (dev_priv->perf.oa.n_mux_configs == 0) {
4578 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
4579
4580 /* EINVAL because *_register_sysfs already checked this
4581 * and so it wouldn't have been advertised to userspace and
4582 * so shouldn't have been requested
4583 */
4584 return -EINVAL;
4585 }
4586
4587 dev_priv->perf.oa.b_counter_regs =
4588 b_counter_config_compute_extra;
4589 dev_priv->perf.oa.b_counter_regs_len =
4590 ARRAY_SIZE(b_counter_config_compute_extra);
4591
4592 dev_priv->perf.oa.flex_regs =
4593 flex_eu_config_compute_extra;
4594 dev_priv->perf.oa.flex_regs_len =
4595 ARRAY_SIZE(flex_eu_config_compute_extra);
4596
4597 return 0;
4598 case METRIC_SET_ID_VME_PIPE:
4599 dev_priv->perf.oa.n_mux_configs =
4600 get_vme_pipe_mux_config(dev_priv,
4601 dev_priv->perf.oa.mux_regs,
4602 dev_priv->perf.oa.mux_regs_lens);
4603 if (dev_priv->perf.oa.n_mux_configs == 0) {
4604 DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
4605
4606 /* EINVAL because *_register_sysfs already checked this
4607 * and so it wouldn't have been advertised to userspace and
4608 * so shouldn't have been requested
4609 */
4610 return -EINVAL;
4611 }
4612
4613 dev_priv->perf.oa.b_counter_regs =
4614 b_counter_config_vme_pipe;
4615 dev_priv->perf.oa.b_counter_regs_len =
4616 ARRAY_SIZE(b_counter_config_vme_pipe);
4617
4618 dev_priv->perf.oa.flex_regs =
4619 flex_eu_config_vme_pipe;
4620 dev_priv->perf.oa.flex_regs_len =
4621 ARRAY_SIZE(flex_eu_config_vme_pipe);
4622
4623 return 0;
4624 case METRIC_SET_ID_TEST_OA:
4625 dev_priv->perf.oa.n_mux_configs =
4626 get_test_oa_mux_config(dev_priv,
4627 dev_priv->perf.oa.mux_regs,
4628 dev_priv->perf.oa.mux_regs_lens);
4629 if (dev_priv->perf.oa.n_mux_configs == 0) {
4630 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
4631
4632 /* EINVAL because *_register_sysfs already checked this
4633 * and so it wouldn't have been advertised to userspace and
4634 * so shouldn't have been requested
4635 */
4636 return -EINVAL;
4637 }
4638
4639 dev_priv->perf.oa.b_counter_regs =
4640 b_counter_config_test_oa;
4641 dev_priv->perf.oa.b_counter_regs_len =
4642 ARRAY_SIZE(b_counter_config_test_oa);
4643
4644 dev_priv->perf.oa.flex_regs =
4645 flex_eu_config_test_oa;
4646 dev_priv->perf.oa.flex_regs_len =
4647 ARRAY_SIZE(flex_eu_config_test_oa);
4648
4649 return 0;
4650 default:
4651 return -ENODEV;
4652 }
4653}
4654
4655static ssize_t
4656show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
4657{
4658 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
4659}
4660
4661static struct device_attribute dev_attr_render_basic_id = {
4662 .attr = { .name = "id", .mode = 0444 },
4663 .show = show_render_basic_id,
4664 .store = NULL,
4665};
4666
4667static struct attribute *attrs_render_basic[] = {
4668 &dev_attr_render_basic_id.attr,
4669 NULL,
4670};
4671
4672static struct attribute_group group_render_basic = {
4673 .name = "b541bd57-0e0f-4154-b4c0-5858010a2bf7",
4674 .attrs = attrs_render_basic,
4675};
4676
4677static ssize_t
4678show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
4679{
4680 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
4681}
4682
4683static struct device_attribute dev_attr_compute_basic_id = {
4684 .attr = { .name = "id", .mode = 0444 },
4685 .show = show_compute_basic_id,
4686 .store = NULL,
4687};
4688
4689static struct attribute *attrs_compute_basic[] = {
4690 &dev_attr_compute_basic_id.attr,
4691 NULL,
4692};
4693
4694static struct attribute_group group_compute_basic = {
4695 .name = "35fbc9b2-a891-40a6-a38d-022bb7057552",
4696 .attrs = attrs_compute_basic,
4697};
4698
4699static ssize_t
4700show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
4701{
4702 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
4703}
4704
4705static struct device_attribute dev_attr_render_pipe_profile_id = {
4706 .attr = { .name = "id", .mode = 0444 },
4707 .show = show_render_pipe_profile_id,
4708 .store = NULL,
4709};
4710
4711static struct attribute *attrs_render_pipe_profile[] = {
4712 &dev_attr_render_pipe_profile_id.attr,
4713 NULL,
4714};
4715
4716static struct attribute_group group_render_pipe_profile = {
4717 .name = "233d0544-fff7-4281-8291-e02f222aff72",
4718 .attrs = attrs_render_pipe_profile,
4719};
4720
4721static ssize_t
4722show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
4723{
4724 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
4725}
4726
4727static struct device_attribute dev_attr_memory_reads_id = {
4728 .attr = { .name = "id", .mode = 0444 },
4729 .show = show_memory_reads_id,
4730 .store = NULL,
4731};
4732
4733static struct attribute *attrs_memory_reads[] = {
4734 &dev_attr_memory_reads_id.attr,
4735 NULL,
4736};
4737
4738static struct attribute_group group_memory_reads = {
4739 .name = "2b255d48-2117-4fef-a8f7-f151e1d25a2c",
4740 .attrs = attrs_memory_reads,
4741};
4742
4743static ssize_t
4744show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
4745{
4746 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
4747}
4748
4749static struct device_attribute dev_attr_memory_writes_id = {
4750 .attr = { .name = "id", .mode = 0444 },
4751 .show = show_memory_writes_id,
4752 .store = NULL,
4753};
4754
4755static struct attribute *attrs_memory_writes[] = {
4756 &dev_attr_memory_writes_id.attr,
4757 NULL,
4758};
4759
4760static struct attribute_group group_memory_writes = {
4761 .name = "f7fd3220-b466-4a4d-9f98-b0caf3f2394c",
4762 .attrs = attrs_memory_writes,
4763};
4764
4765static ssize_t
4766show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
4767{
4768 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
4769}
4770
4771static struct device_attribute dev_attr_compute_extended_id = {
4772 .attr = { .name = "id", .mode = 0444 },
4773 .show = show_compute_extended_id,
4774 .store = NULL,
4775};
4776
4777static struct attribute *attrs_compute_extended[] = {
4778 &dev_attr_compute_extended_id.attr,
4779 NULL,
4780};
4781
4782static struct attribute_group group_compute_extended = {
4783 .name = "e99ccaca-821c-4df9-97a7-96bdb7204e43",
4784 .attrs = attrs_compute_extended,
4785};
4786
4787static ssize_t
4788show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
4789{
4790 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
4791}
4792
4793static struct device_attribute dev_attr_compute_l3_cache_id = {
4794 .attr = { .name = "id", .mode = 0444 },
4795 .show = show_compute_l3_cache_id,
4796 .store = NULL,
4797};
4798
4799static struct attribute *attrs_compute_l3_cache[] = {
4800 &dev_attr_compute_l3_cache_id.attr,
4801 NULL,
4802};
4803
4804static struct attribute_group group_compute_l3_cache = {
4805 .name = "27a364dc-8225-4ecb-b607-d6f1925598d9",
4806 .attrs = attrs_compute_l3_cache,
4807};
4808
4809static ssize_t
4810show_data_port_reads_coalescing_id(struct device *kdev, struct device_attribute *attr, char *buf)
4811{
4812 return sprintf(buf, "%d\n", METRIC_SET_ID_DATA_PORT_READS_COALESCING);
4813}
4814
4815static struct device_attribute dev_attr_data_port_reads_coalescing_id = {
4816 .attr = { .name = "id", .mode = 0444 },
4817 .show = show_data_port_reads_coalescing_id,
4818 .store = NULL,
4819};
4820
4821static struct attribute *attrs_data_port_reads_coalescing[] = {
4822 &dev_attr_data_port_reads_coalescing_id.attr,
4823 NULL,
4824};
4825
4826static struct attribute_group group_data_port_reads_coalescing = {
4827 .name = "857fc630-2f09-4804-85f1-084adfadd5ab",
4828 .attrs = attrs_data_port_reads_coalescing,
4829};
4830
4831static ssize_t
4832show_data_port_writes_coalescing_id(struct device *kdev, struct device_attribute *attr, char *buf)
4833{
4834 return sprintf(buf, "%d\n", METRIC_SET_ID_DATA_PORT_WRITES_COALESCING);
4835}
4836
4837static struct device_attribute dev_attr_data_port_writes_coalescing_id = {
4838 .attr = { .name = "id", .mode = 0444 },
4839 .show = show_data_port_writes_coalescing_id,
4840 .store = NULL,
4841};
4842
4843static struct attribute *attrs_data_port_writes_coalescing[] = {
4844 &dev_attr_data_port_writes_coalescing_id.attr,
4845 NULL,
4846};
4847
4848static struct attribute_group group_data_port_writes_coalescing = {
4849 .name = "343ebc99-4a55-414c-8c17-d8e259cf5e20",
4850 .attrs = attrs_data_port_writes_coalescing,
4851};
4852
4853static ssize_t
4854show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
4855{
4856 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
4857}
4858
4859static struct device_attribute dev_attr_hdc_and_sf_id = {
4860 .attr = { .name = "id", .mode = 0444 },
4861 .show = show_hdc_and_sf_id,
4862 .store = NULL,
4863};
4864
4865static struct attribute *attrs_hdc_and_sf[] = {
4866 &dev_attr_hdc_and_sf_id.attr,
4867 NULL,
4868};
4869
4870static struct attribute_group group_hdc_and_sf = {
4871 .name = "7bdafd88-a4fa-4ed5-bc09-1a977aa5be3e",
4872 .attrs = attrs_hdc_and_sf,
4873};
4874
4875static ssize_t
4876show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
4877{
4878 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
4879}
4880
4881static struct device_attribute dev_attr_l3_1_id = {
4882 .attr = { .name = "id", .mode = 0444 },
4883 .show = show_l3_1_id,
4884 .store = NULL,
4885};
4886
4887static struct attribute *attrs_l3_1[] = {
4888 &dev_attr_l3_1_id.attr,
4889 NULL,
4890};
4891
4892static struct attribute_group group_l3_1 = {
4893 .name = "9385ebb2-f34f-4aa5-aec5-7e9cbbea0f0b",
4894 .attrs = attrs_l3_1,
4895};
4896
4897static ssize_t
4898show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
4899{
4900 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
4901}
4902
4903static struct device_attribute dev_attr_l3_2_id = {
4904 .attr = { .name = "id", .mode = 0444 },
4905 .show = show_l3_2_id,
4906 .store = NULL,
4907};
4908
4909static struct attribute *attrs_l3_2[] = {
4910 &dev_attr_l3_2_id.attr,
4911 NULL,
4912};
4913
4914static struct attribute_group group_l3_2 = {
4915 .name = "446ae59b-ff2e-41c9-b49e-0184a54bf00a",
4916 .attrs = attrs_l3_2,
4917};
4918
4919static ssize_t
4920show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
4921{
4922 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
4923}
4924
4925static struct device_attribute dev_attr_l3_3_id = {
4926 .attr = { .name = "id", .mode = 0444 },
4927 .show = show_l3_3_id,
4928 .store = NULL,
4929};
4930
4931static struct attribute *attrs_l3_3[] = {
4932 &dev_attr_l3_3_id.attr,
4933 NULL,
4934};
4935
4936static struct attribute_group group_l3_3 = {
4937 .name = "84a7956f-1ea4-4d0d-837f-e39a0376e38c",
4938 .attrs = attrs_l3_3,
4939};
4940
4941static ssize_t
4942show_l3_4_id(struct device *kdev, struct device_attribute *attr, char *buf)
4943{
4944 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_4);
4945}
4946
4947static struct device_attribute dev_attr_l3_4_id = {
4948 .attr = { .name = "id", .mode = 0444 },
4949 .show = show_l3_4_id,
4950 .store = NULL,
4951};
4952
4953static struct attribute *attrs_l3_4[] = {
4954 &dev_attr_l3_4_id.attr,
4955 NULL,
4956};
4957
4958static struct attribute_group group_l3_4 = {
4959 .name = "92b493d9-df18-4bed-be06-5cac6f2a6f5f",
4960 .attrs = attrs_l3_4,
4961};
4962
4963static ssize_t
4964show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
4965{
4966 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
4967}
4968
4969static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
4970 .attr = { .name = "id", .mode = 0444 },
4971 .show = show_rasterizer_and_pixel_backend_id,
4972 .store = NULL,
4973};
4974
4975static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
4976 &dev_attr_rasterizer_and_pixel_backend_id.attr,
4977 NULL,
4978};
4979
4980static struct attribute_group group_rasterizer_and_pixel_backend = {
4981 .name = "14345c35-cc46-40d0-bb04-6ed1fbb43679",
4982 .attrs = attrs_rasterizer_and_pixel_backend,
4983};
4984
4985static ssize_t
4986show_sampler_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
4987{
4988 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_1);
4989}
4990
4991static struct device_attribute dev_attr_sampler_1_id = {
4992 .attr = { .name = "id", .mode = 0444 },
4993 .show = show_sampler_1_id,
4994 .store = NULL,
4995};
4996
4997static struct attribute *attrs_sampler_1[] = {
4998 &dev_attr_sampler_1_id.attr,
4999 NULL,
5000};
5001
5002static struct attribute_group group_sampler_1 = {
5003 .name = "f0c6ba37-d3d3-4211-91b5-226730312a54",
5004 .attrs = attrs_sampler_1,
5005};
5006
5007static ssize_t
5008show_sampler_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
5009{
5010 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_2);
5011}
5012
5013static struct device_attribute dev_attr_sampler_2_id = {
5014 .attr = { .name = "id", .mode = 0444 },
5015 .show = show_sampler_2_id,
5016 .store = NULL,
5017};
5018
5019static struct attribute *attrs_sampler_2[] = {
5020 &dev_attr_sampler_2_id.attr,
5021 NULL,
5022};
5023
5024static struct attribute_group group_sampler_2 = {
5025 .name = "30bf3702-48cf-4bca-b412-7cf50bb2f564",
5026 .attrs = attrs_sampler_2,
5027};
5028
5029static ssize_t
5030show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
5031{
5032 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
5033}
5034
5035static struct device_attribute dev_attr_tdl_1_id = {
5036 .attr = { .name = "id", .mode = 0444 },
5037 .show = show_tdl_1_id,
5038 .store = NULL,
5039};
5040
5041static struct attribute *attrs_tdl_1[] = {
5042 &dev_attr_tdl_1_id.attr,
5043 NULL,
5044};
5045
5046static struct attribute_group group_tdl_1 = {
5047 .name = "238bec85-df05-44f3-b905-d166712f2451",
5048 .attrs = attrs_tdl_1,
5049};
5050
5051static ssize_t
5052show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
5053{
5054 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
5055}
5056
5057static struct device_attribute dev_attr_tdl_2_id = {
5058 .attr = { .name = "id", .mode = 0444 },
5059 .show = show_tdl_2_id,
5060 .store = NULL,
5061};
5062
5063static struct attribute *attrs_tdl_2[] = {
5064 &dev_attr_tdl_2_id.attr,
5065 NULL,
5066};
5067
5068static struct attribute_group group_tdl_2 = {
5069 .name = "24bf02cd-8693-4583-981c-c4165b33da01",
5070 .attrs = attrs_tdl_2,
5071};
5072
5073static ssize_t
5074show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
5075{
5076 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
5077}
5078
5079static struct device_attribute dev_attr_compute_extra_id = {
5080 .attr = { .name = "id", .mode = 0444 },
5081 .show = show_compute_extra_id,
5082 .store = NULL,
5083};
5084
5085static struct attribute *attrs_compute_extra[] = {
5086 &dev_attr_compute_extra_id.attr,
5087 NULL,
5088};
5089
5090static struct attribute_group group_compute_extra = {
5091 .name = "8fb61ba2-2fbb-454c-a136-2dec5a8a595e",
5092 .attrs = attrs_compute_extra,
5093};
5094
5095static ssize_t
5096show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
5097{
5098 return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
5099}
5100
5101static struct device_attribute dev_attr_vme_pipe_id = {
5102 .attr = { .name = "id", .mode = 0444 },
5103 .show = show_vme_pipe_id,
5104 .store = NULL,
5105};
5106
5107static struct attribute *attrs_vme_pipe[] = {
5108 &dev_attr_vme_pipe_id.attr,
5109 NULL,
5110};
5111
5112static struct attribute_group group_vme_pipe = {
5113 .name = "e1743ca0-7fc8-410b-a066-de7bbb9280b7",
5114 .attrs = attrs_vme_pipe,
5115};
5116
5117static ssize_t
5118show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
5119{
5120 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
5121}
5122
5123static struct device_attribute dev_attr_test_oa_id = {
5124 .attr = { .name = "id", .mode = 0444 },
5125 .show = show_test_oa_id,
5126 .store = NULL,
5127};
5128
5129static struct attribute *attrs_test_oa[] = {
5130 &dev_attr_test_oa_id.attr,
5131 NULL,
5132};
5133
5134static struct attribute_group group_test_oa = {
5135 .name = "d6de6f55-e526-4f79-a6a6-d7315c09044e",
5136 .attrs = attrs_test_oa,
5137};
5138
5139int
5140i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv)
5141{
5142 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
5143 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
5144 int ret = 0;
5145
5146 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
5147 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
5148 if (ret)
5149 goto error_render_basic;
5150 }
5151 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
5152 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
5153 if (ret)
5154 goto error_compute_basic;
5155 }
5156 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
5157 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
5158 if (ret)
5159 goto error_render_pipe_profile;
5160 }
5161 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
5162 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
5163 if (ret)
5164 goto error_memory_reads;
5165 }
5166 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
5167 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
5168 if (ret)
5169 goto error_memory_writes;
5170 }
5171 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
5172 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
5173 if (ret)
5174 goto error_compute_extended;
5175 }
5176 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
5177 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
5178 if (ret)
5179 goto error_compute_l3_cache;
5180 }
5181 if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens)) {
5182 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
5183 if (ret)
5184 goto error_data_port_reads_coalescing;
5185 }
5186 if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens)) {
5187 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
5188 if (ret)
5189 goto error_data_port_writes_coalescing;
5190 }
5191 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
5192 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
5193 if (ret)
5194 goto error_hdc_and_sf;
5195 }
5196 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
5197 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
5198 if (ret)
5199 goto error_l3_1;
5200 }
5201 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
5202 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
5203 if (ret)
5204 goto error_l3_2;
5205 }
5206 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
5207 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
5208 if (ret)
5209 goto error_l3_3;
5210 }
5211 if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens)) {
5212 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_4);
5213 if (ret)
5214 goto error_l3_4;
5215 }
5216 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
5217 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
5218 if (ret)
5219 goto error_rasterizer_and_pixel_backend;
5220 }
5221 if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens)) {
5222 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
5223 if (ret)
5224 goto error_sampler_1;
5225 }
5226 if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens)) {
5227 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
5228 if (ret)
5229 goto error_sampler_2;
5230 }
5231 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
5232 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
5233 if (ret)
5234 goto error_tdl_1;
5235 }
5236 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
5237 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
5238 if (ret)
5239 goto error_tdl_2;
5240 }
5241 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
5242 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
5243 if (ret)
5244 goto error_compute_extra;
5245 }
5246 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
5247 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
5248 if (ret)
5249 goto error_vme_pipe;
5250 }
5251 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
5252 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
5253 if (ret)
5254 goto error_test_oa;
5255 }
5256
5257 return 0;
5258
5259error_test_oa:
5260 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
5261 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
5262error_vme_pipe:
5263 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
5264 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
5265error_compute_extra:
5266 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
5267 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
5268error_tdl_2:
5269 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
5270 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
5271error_tdl_1:
5272 if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
5273 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
5274error_sampler_2:
5275 if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
5276 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
5277error_sampler_1:
5278 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
5279 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
5280error_rasterizer_and_pixel_backend:
5281 if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
5282 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
5283error_l3_4:
5284 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
5285 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
5286error_l3_3:
5287 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
5288 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
5289error_l3_2:
5290 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
5291 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
5292error_l3_1:
5293 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
5294 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
5295error_hdc_and_sf:
5296 if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
5297 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
5298error_data_port_writes_coalescing:
5299 if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
5300 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
5301error_data_port_reads_coalescing:
5302 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
5303 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
5304error_compute_l3_cache:
5305 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
5306 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
5307error_compute_extended:
5308 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
5309 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
5310error_memory_writes:
5311 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
5312 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
5313error_memory_reads:
5314 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
5315 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
5316error_render_pipe_profile:
5317 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
5318 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
5319error_compute_basic:
5320 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
5321 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
5322error_render_basic:
5323 return ret;
5324}
5325
5326void
5327i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv)
5328{
5329 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
5330 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
5331
5332 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
5333 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
5334 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
5335 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
5336 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
5337 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
5338 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
5339 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
5340 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
5341 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
5342 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
5343 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
5344 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
5345 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
5346 if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
5347 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
5348 if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
5349 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
5350 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
5351 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
5352 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
5353 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
5354 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
5355 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
5356 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
5357 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
5358 if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
5359 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
5360 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
5361 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
5362 if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
5363 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
5364 if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
5365 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
5366 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
5367 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
5368 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
5369 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
5370 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
5371 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
5372 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
5373 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
5374 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
5375 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
5376}
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
new file mode 100644
index 000000000000..6363ff9f64c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_BDW_H__
30#define __I915_OA_BDW_H__
31
32extern int i915_oa_n_builtin_metric_sets_bdw;
33
34extern int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
new file mode 100644
index 000000000000..93864d8f32dd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -0,0 +1,2690 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_bxt.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
45 METRIC_SET_ID_SAMPLER,
46 METRIC_SET_ID_TDL_1,
47 METRIC_SET_ID_TDL_2,
48 METRIC_SET_ID_COMPUTE_EXTRA,
49 METRIC_SET_ID_TEST_OA,
50};
51
52int i915_oa_n_builtin_metric_sets_bxt = 15;
53
54static const struct i915_oa_reg b_counter_config_render_basic[] = {
55 { _MMIO(0x2710), 0x00000000 },
56 { _MMIO(0x2714), 0x00800000 },
57 { _MMIO(0x2720), 0x00000000 },
58 { _MMIO(0x2724), 0x00800000 },
59 { _MMIO(0x2740), 0x00000000 },
60};
61
62static const struct i915_oa_reg flex_eu_config_render_basic[] = {
63 { _MMIO(0xe458), 0x00005004 },
64 { _MMIO(0xe558), 0x00010003 },
65 { _MMIO(0xe658), 0x00012011 },
66 { _MMIO(0xe758), 0x00015014 },
67 { _MMIO(0xe45c), 0x00051050 },
68 { _MMIO(0xe55c), 0x00053052 },
69 { _MMIO(0xe65c), 0x00055054 },
70};
71
72static const struct i915_oa_reg mux_config_render_basic_0_sku_gte_0x03[] = {
73 { _MMIO(0x9888), 0x166c00f0 },
74 { _MMIO(0x9888), 0x12120280 },
75 { _MMIO(0x9888), 0x12320280 },
76 { _MMIO(0x9888), 0x11930317 },
77 { _MMIO(0x9888), 0x159303df },
78 { _MMIO(0x9888), 0x3f900c00 },
79 { _MMIO(0x9888), 0x419000a0 },
80 { _MMIO(0x9888), 0x002d1000 },
81 { _MMIO(0x9888), 0x062d4000 },
82 { _MMIO(0x9888), 0x082d5000 },
83 { _MMIO(0x9888), 0x0a2d1000 },
84 { _MMIO(0x9888), 0x0c2e0800 },
85 { _MMIO(0x9888), 0x0e2e5900 },
86 { _MMIO(0x9888), 0x0a4c8000 },
87 { _MMIO(0x9888), 0x0c4c8000 },
88 { _MMIO(0x9888), 0x0e4c4000 },
89 { _MMIO(0x9888), 0x064e8000 },
90 { _MMIO(0x9888), 0x084e8000 },
91 { _MMIO(0x9888), 0x0a4e2000 },
92 { _MMIO(0x9888), 0x1c4f0010 },
93 { _MMIO(0x9888), 0x0a6c0053 },
94 { _MMIO(0x9888), 0x106c0000 },
95 { _MMIO(0x9888), 0x1c6c0000 },
96 { _MMIO(0x9888), 0x1a0fcc00 },
97 { _MMIO(0x9888), 0x1c0f0002 },
98 { _MMIO(0x9888), 0x1c2c0040 },
99 { _MMIO(0x9888), 0x00101000 },
100 { _MMIO(0x9888), 0x04101000 },
101 { _MMIO(0x9888), 0x00114000 },
102 { _MMIO(0x9888), 0x08114000 },
103 { _MMIO(0x9888), 0x00120020 },
104 { _MMIO(0x9888), 0x08120021 },
105 { _MMIO(0x9888), 0x00141000 },
106 { _MMIO(0x9888), 0x08141000 },
107 { _MMIO(0x9888), 0x02308000 },
108 { _MMIO(0x9888), 0x04302000 },
109 { _MMIO(0x9888), 0x06318000 },
110 { _MMIO(0x9888), 0x08318000 },
111 { _MMIO(0x9888), 0x06320800 },
112 { _MMIO(0x9888), 0x08320840 },
113 { _MMIO(0x9888), 0x00320000 },
114 { _MMIO(0x9888), 0x06344000 },
115 { _MMIO(0x9888), 0x08344000 },
116 { _MMIO(0x9888), 0x0d931831 },
117 { _MMIO(0x9888), 0x0f939f3f },
118 { _MMIO(0x9888), 0x01939e80 },
119 { _MMIO(0x9888), 0x039303bc },
120 { _MMIO(0x9888), 0x0593000e },
121 { _MMIO(0x9888), 0x1993002a },
122 { _MMIO(0x9888), 0x07930000 },
123 { _MMIO(0x9888), 0x09930000 },
124 { _MMIO(0x9888), 0x1d900177 },
125 { _MMIO(0x9888), 0x1f900187 },
126 { _MMIO(0x9888), 0x35900000 },
127 { _MMIO(0x9888), 0x13904000 },
128 { _MMIO(0x9888), 0x21904000 },
129 { _MMIO(0x9888), 0x23904000 },
130 { _MMIO(0x9888), 0x25904000 },
131 { _MMIO(0x9888), 0x27904000 },
132 { _MMIO(0x9888), 0x2b904000 },
133 { _MMIO(0x9888), 0x2d904000 },
134 { _MMIO(0x9888), 0x2f904000 },
135 { _MMIO(0x9888), 0x31904000 },
136 { _MMIO(0x9888), 0x15904000 },
137 { _MMIO(0x9888), 0x17904000 },
138 { _MMIO(0x9888), 0x19904000 },
139 { _MMIO(0x9888), 0x1b904000 },
140 { _MMIO(0x9888), 0x53901110 },
141 { _MMIO(0x9888), 0x43900423 },
142 { _MMIO(0x9888), 0x55900111 },
143 { _MMIO(0x9888), 0x47900c02 },
144 { _MMIO(0x9888), 0x57900000 },
145 { _MMIO(0x9888), 0x49900020 },
146 { _MMIO(0x9888), 0x59901111 },
147 { _MMIO(0x9888), 0x4b900421 },
148 { _MMIO(0x9888), 0x37900000 },
149 { _MMIO(0x9888), 0x33900000 },
150 { _MMIO(0x9888), 0x4d900001 },
151 { _MMIO(0x9888), 0x45900821 },
152};
153
154static int
155get_render_basic_mux_config(struct drm_i915_private *dev_priv,
156 const struct i915_oa_reg **regs,
157 int *lens)
158{
159 int n = 0;
160
161 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
162 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
163
164 if (dev_priv->drm.pdev->revision >= 0x03) {
165 regs[n] = mux_config_render_basic_0_sku_gte_0x03;
166 lens[n] = ARRAY_SIZE(mux_config_render_basic_0_sku_gte_0x03);
167 n++;
168 }
169
170 return n;
171}
172
173static const struct i915_oa_reg b_counter_config_compute_basic[] = {
174 { _MMIO(0x2710), 0x00000000 },
175 { _MMIO(0x2714), 0x00800000 },
176 { _MMIO(0x2720), 0x00000000 },
177 { _MMIO(0x2724), 0x00800000 },
178 { _MMIO(0x2740), 0x00000000 },
179};
180
181static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
182 { _MMIO(0xe458), 0x00005004 },
183 { _MMIO(0xe558), 0x00000003 },
184 { _MMIO(0xe658), 0x00002001 },
185 { _MMIO(0xe758), 0x00778008 },
186 { _MMIO(0xe45c), 0x00088078 },
187 { _MMIO(0xe55c), 0x00808708 },
188 { _MMIO(0xe65c), 0x00a08908 },
189};
190
191static const struct i915_oa_reg mux_config_compute_basic[] = {
192 { _MMIO(0x9888), 0x104f00e0 },
193 { _MMIO(0x9888), 0x124f1c00 },
194 { _MMIO(0x9888), 0x39900340 },
195 { _MMIO(0x9888), 0x3f900c00 },
196 { _MMIO(0x9888), 0x41900000 },
197 { _MMIO(0x9888), 0x002d5000 },
198 { _MMIO(0x9888), 0x062d4000 },
199 { _MMIO(0x9888), 0x082d4000 },
200 { _MMIO(0x9888), 0x0a2d1000 },
201 { _MMIO(0x9888), 0x0c2d5000 },
202 { _MMIO(0x9888), 0x0e2d4000 },
203 { _MMIO(0x9888), 0x0c2e1400 },
204 { _MMIO(0x9888), 0x0e2e5100 },
205 { _MMIO(0x9888), 0x102e0114 },
206 { _MMIO(0x9888), 0x044cc000 },
207 { _MMIO(0x9888), 0x0a4c8000 },
208 { _MMIO(0x9888), 0x0c4c8000 },
209 { _MMIO(0x9888), 0x0e4c4000 },
210 { _MMIO(0x9888), 0x104c8000 },
211 { _MMIO(0x9888), 0x124c8000 },
212 { _MMIO(0x9888), 0x164c2000 },
213 { _MMIO(0x9888), 0x004ea000 },
214 { _MMIO(0x9888), 0x064e8000 },
215 { _MMIO(0x9888), 0x084e8000 },
216 { _MMIO(0x9888), 0x0a4e2000 },
217 { _MMIO(0x9888), 0x0c4ea000 },
218 { _MMIO(0x9888), 0x0e4e8000 },
219 { _MMIO(0x9888), 0x004f6b42 },
220 { _MMIO(0x9888), 0x064f6200 },
221 { _MMIO(0x9888), 0x084f4100 },
222 { _MMIO(0x9888), 0x0a4f0061 },
223 { _MMIO(0x9888), 0x0c4f6c4c },
224 { _MMIO(0x9888), 0x0e4f4b00 },
225 { _MMIO(0x9888), 0x1a4f0000 },
226 { _MMIO(0x9888), 0x1c4f0000 },
227 { _MMIO(0x9888), 0x180f5000 },
228 { _MMIO(0x9888), 0x1a0f8800 },
229 { _MMIO(0x9888), 0x1c0f08a2 },
230 { _MMIO(0x9888), 0x182c4000 },
231 { _MMIO(0x9888), 0x1c2c1451 },
232 { _MMIO(0x9888), 0x1e2c0001 },
233 { _MMIO(0x9888), 0x1a2c0010 },
234 { _MMIO(0x9888), 0x01938000 },
235 { _MMIO(0x9888), 0x0f938000 },
236 { _MMIO(0x9888), 0x19938a28 },
237 { _MMIO(0x9888), 0x03938000 },
238 { _MMIO(0x9888), 0x19900177 },
239 { _MMIO(0x9888), 0x1b900178 },
240 { _MMIO(0x9888), 0x1d900125 },
241 { _MMIO(0x9888), 0x1f900123 },
242 { _MMIO(0x9888), 0x35900000 },
243 { _MMIO(0x9888), 0x13904000 },
244 { _MMIO(0x9888), 0x21904000 },
245 { _MMIO(0x9888), 0x25904000 },
246 { _MMIO(0x9888), 0x27904000 },
247 { _MMIO(0x9888), 0x2b904000 },
248 { _MMIO(0x9888), 0x2d904000 },
249 { _MMIO(0x9888), 0x31904000 },
250 { _MMIO(0x9888), 0x15904000 },
251 { _MMIO(0x9888), 0x53901000 },
252 { _MMIO(0x9888), 0x43900000 },
253 { _MMIO(0x9888), 0x55900111 },
254 { _MMIO(0x9888), 0x47900000 },
255 { _MMIO(0x9888), 0x57900000 },
256 { _MMIO(0x9888), 0x49900000 },
257 { _MMIO(0x9888), 0x59900000 },
258 { _MMIO(0x9888), 0x4b900000 },
259 { _MMIO(0x9888), 0x37900000 },
260 { _MMIO(0x9888), 0x33900000 },
261 { _MMIO(0x9888), 0x4d900000 },
262 { _MMIO(0x9888), 0x45900000 },
263};
264
265static int
266get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
267 const struct i915_oa_reg **regs,
268 int *lens)
269{
270 int n = 0;
271
272 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
273 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
274
275 regs[n] = mux_config_compute_basic;
276 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
277 n++;
278
279 return n;
280}
281
282static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
283 { _MMIO(0x2724), 0xf0800000 },
284 { _MMIO(0x2720), 0x00000000 },
285 { _MMIO(0x2714), 0xf0800000 },
286 { _MMIO(0x2710), 0x00000000 },
287 { _MMIO(0x2740), 0x00000000 },
288 { _MMIO(0x2770), 0x0007ffea },
289 { _MMIO(0x2774), 0x00007ffc },
290 { _MMIO(0x2778), 0x0007affa },
291 { _MMIO(0x277c), 0x0000f5fd },
292 { _MMIO(0x2780), 0x00079ffa },
293 { _MMIO(0x2784), 0x0000f3fb },
294 { _MMIO(0x2788), 0x0007bf7a },
295 { _MMIO(0x278c), 0x0000f7e7 },
296 { _MMIO(0x2790), 0x0007fefa },
297 { _MMIO(0x2794), 0x0000f7cf },
298 { _MMIO(0x2798), 0x00077ffa },
299 { _MMIO(0x279c), 0x0000efdf },
300 { _MMIO(0x27a0), 0x0006fffa },
301 { _MMIO(0x27a4), 0x0000cfbf },
302 { _MMIO(0x27a8), 0x0003fffa },
303 { _MMIO(0x27ac), 0x00005f7f },
304};
305
306static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
307 { _MMIO(0xe458), 0x00005004 },
308 { _MMIO(0xe558), 0x00015014 },
309 { _MMIO(0xe658), 0x00025024 },
310 { _MMIO(0xe758), 0x00035034 },
311 { _MMIO(0xe45c), 0x00045044 },
312 { _MMIO(0xe55c), 0x00055054 },
313 { _MMIO(0xe65c), 0x00065064 },
314};
315
316static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
317 { _MMIO(0x9888), 0x0c2e001f },
318 { _MMIO(0x9888), 0x0a2f0000 },
319 { _MMIO(0x9888), 0x10186800 },
320 { _MMIO(0x9888), 0x11810019 },
321 { _MMIO(0x9888), 0x15810013 },
322 { _MMIO(0x9888), 0x13820020 },
323 { _MMIO(0x9888), 0x11830020 },
324 { _MMIO(0x9888), 0x17840000 },
325 { _MMIO(0x9888), 0x11860007 },
326 { _MMIO(0x9888), 0x21860000 },
327 { _MMIO(0x9888), 0x178703e0 },
328 { _MMIO(0x9888), 0x0c2d8000 },
329 { _MMIO(0x9888), 0x042d4000 },
330 { _MMIO(0x9888), 0x062d1000 },
331 { _MMIO(0x9888), 0x022e5400 },
332 { _MMIO(0x9888), 0x002e0000 },
333 { _MMIO(0x9888), 0x0e2e0080 },
334 { _MMIO(0x9888), 0x082f0040 },
335 { _MMIO(0x9888), 0x002f0000 },
336 { _MMIO(0x9888), 0x06143000 },
337 { _MMIO(0x9888), 0x06174000 },
338 { _MMIO(0x9888), 0x06180012 },
339 { _MMIO(0x9888), 0x00180000 },
340 { _MMIO(0x9888), 0x0d804000 },
341 { _MMIO(0x9888), 0x0f804000 },
342 { _MMIO(0x9888), 0x05804000 },
343 { _MMIO(0x9888), 0x09810200 },
344 { _MMIO(0x9888), 0x0b810030 },
345 { _MMIO(0x9888), 0x03810003 },
346 { _MMIO(0x9888), 0x21819140 },
347 { _MMIO(0x9888), 0x23819050 },
348 { _MMIO(0x9888), 0x25810018 },
349 { _MMIO(0x9888), 0x0b820980 },
350 { _MMIO(0x9888), 0x03820d80 },
351 { _MMIO(0x9888), 0x11820000 },
352 { _MMIO(0x9888), 0x0182c000 },
353 { _MMIO(0x9888), 0x07828000 },
354 { _MMIO(0x9888), 0x09824000 },
355 { _MMIO(0x9888), 0x0f828000 },
356 { _MMIO(0x9888), 0x0d830004 },
357 { _MMIO(0x9888), 0x0583000c },
358 { _MMIO(0x9888), 0x0f831000 },
359 { _MMIO(0x9888), 0x01848072 },
360 { _MMIO(0x9888), 0x11840000 },
361 { _MMIO(0x9888), 0x07848000 },
362 { _MMIO(0x9888), 0x09844000 },
363 { _MMIO(0x9888), 0x0f848000 },
364 { _MMIO(0x9888), 0x07860000 },
365 { _MMIO(0x9888), 0x09860092 },
366 { _MMIO(0x9888), 0x0f860400 },
367 { _MMIO(0x9888), 0x01869100 },
368 { _MMIO(0x9888), 0x0f870065 },
369 { _MMIO(0x9888), 0x01870000 },
370 { _MMIO(0x9888), 0x19930800 },
371 { _MMIO(0x9888), 0x0b938000 },
372 { _MMIO(0x9888), 0x0d938000 },
373 { _MMIO(0x9888), 0x1b952000 },
374 { _MMIO(0x9888), 0x1d955055 },
375 { _MMIO(0x9888), 0x1f951455 },
376 { _MMIO(0x9888), 0x0992a000 },
377 { _MMIO(0x9888), 0x0f928000 },
378 { _MMIO(0x9888), 0x1192a800 },
379 { _MMIO(0x9888), 0x1392028a },
380 { _MMIO(0x9888), 0x0b92a000 },
381 { _MMIO(0x9888), 0x0d922000 },
382 { _MMIO(0x9888), 0x13908000 },
383 { _MMIO(0x9888), 0x21908000 },
384 { _MMIO(0x9888), 0x23908000 },
385 { _MMIO(0x9888), 0x25908000 },
386 { _MMIO(0x9888), 0x27908000 },
387 { _MMIO(0x9888), 0x29908000 },
388 { _MMIO(0x9888), 0x2b908000 },
389 { _MMIO(0x9888), 0x2d904000 },
390 { _MMIO(0x9888), 0x2f908000 },
391 { _MMIO(0x9888), 0x31908000 },
392 { _MMIO(0x9888), 0x15908000 },
393 { _MMIO(0x9888), 0x17908000 },
394 { _MMIO(0x9888), 0x19908000 },
395 { _MMIO(0x9888), 0x1b908000 },
396 { _MMIO(0x9888), 0x1d904000 },
397 { _MMIO(0x9888), 0x1f904000 },
398 { _MMIO(0x9888), 0x53900000 },
399 { _MMIO(0x9888), 0x43900c01 },
400 { _MMIO(0x9888), 0x55900000 },
401 { _MMIO(0x9888), 0x47900000 },
402 { _MMIO(0x9888), 0x57900000 },
403 { _MMIO(0x9888), 0x49900863 },
404 { _MMIO(0x9888), 0x59900000 },
405 { _MMIO(0x9888), 0x4b900061 },
406 { _MMIO(0x9888), 0x37900000 },
407 { _MMIO(0x9888), 0x33900000 },
408 { _MMIO(0x9888), 0x4d900000 },
409 { _MMIO(0x9888), 0x45900c22 },
410};
411
412static int
413get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
414 const struct i915_oa_reg **regs,
415 int *lens)
416{
417 int n = 0;
418
419 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
420 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
421
422 regs[n] = mux_config_render_pipe_profile;
423 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
424 n++;
425
426 return n;
427}
428
429static const struct i915_oa_reg b_counter_config_memory_reads[] = {
430 { _MMIO(0x272c), 0xffffffff },
431 { _MMIO(0x2728), 0xffffffff },
432 { _MMIO(0x2724), 0xf0800000 },
433 { _MMIO(0x2720), 0x00000000 },
434 { _MMIO(0x271c), 0xffffffff },
435 { _MMIO(0x2718), 0xffffffff },
436 { _MMIO(0x2714), 0xf0800000 },
437 { _MMIO(0x2710), 0x00000000 },
438 { _MMIO(0x274c), 0x86543210 },
439 { _MMIO(0x2748), 0x86543210 },
440 { _MMIO(0x2744), 0x00006667 },
441 { _MMIO(0x2740), 0x00000000 },
442 { _MMIO(0x275c), 0x86543210 },
443 { _MMIO(0x2758), 0x86543210 },
444 { _MMIO(0x2754), 0x00006465 },
445 { _MMIO(0x2750), 0x00000000 },
446 { _MMIO(0x2770), 0x0007f81a },
447 { _MMIO(0x2774), 0x0000fe00 },
448 { _MMIO(0x2778), 0x0007f82a },
449 { _MMIO(0x277c), 0x0000fe00 },
450 { _MMIO(0x2780), 0x0007f872 },
451 { _MMIO(0x2784), 0x0000fe00 },
452 { _MMIO(0x2788), 0x0007f8ba },
453 { _MMIO(0x278c), 0x0000fe00 },
454 { _MMIO(0x2790), 0x0007f87a },
455 { _MMIO(0x2794), 0x0000fe00 },
456 { _MMIO(0x2798), 0x0007f8ea },
457 { _MMIO(0x279c), 0x0000fe00 },
458 { _MMIO(0x27a0), 0x0007f8e2 },
459 { _MMIO(0x27a4), 0x0000fe00 },
460 { _MMIO(0x27a8), 0x0007f8f2 },
461 { _MMIO(0x27ac), 0x0000fe00 },
462};
463
464static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
465 { _MMIO(0xe458), 0x00005004 },
466 { _MMIO(0xe558), 0x00015014 },
467 { _MMIO(0xe658), 0x00025024 },
468 { _MMIO(0xe758), 0x00035034 },
469 { _MMIO(0xe45c), 0x00045044 },
470 { _MMIO(0xe55c), 0x00055054 },
471 { _MMIO(0xe65c), 0x00065064 },
472};
473
474static const struct i915_oa_reg mux_config_memory_reads[] = {
475 { _MMIO(0x9888), 0x19800343 },
476 { _MMIO(0x9888), 0x39900340 },
477 { _MMIO(0x9888), 0x3f901000 },
478 { _MMIO(0x9888), 0x41900003 },
479 { _MMIO(0x9888), 0x03803180 },
480 { _MMIO(0x9888), 0x058035e2 },
481 { _MMIO(0x9888), 0x0780006a },
482 { _MMIO(0x9888), 0x11800000 },
483 { _MMIO(0x9888), 0x2181a000 },
484 { _MMIO(0x9888), 0x2381000a },
485 { _MMIO(0x9888), 0x1d950550 },
486 { _MMIO(0x9888), 0x0b928000 },
487 { _MMIO(0x9888), 0x0d92a000 },
488 { _MMIO(0x9888), 0x0f922000 },
489 { _MMIO(0x9888), 0x13900170 },
490 { _MMIO(0x9888), 0x21900171 },
491 { _MMIO(0x9888), 0x23900172 },
492 { _MMIO(0x9888), 0x25900173 },
493 { _MMIO(0x9888), 0x27900174 },
494 { _MMIO(0x9888), 0x29900175 },
495 { _MMIO(0x9888), 0x2b900176 },
496 { _MMIO(0x9888), 0x2d900177 },
497 { _MMIO(0x9888), 0x2f90017f },
498 { _MMIO(0x9888), 0x31900125 },
499 { _MMIO(0x9888), 0x15900123 },
500 { _MMIO(0x9888), 0x17900121 },
501 { _MMIO(0x9888), 0x35900000 },
502 { _MMIO(0x9888), 0x19908000 },
503 { _MMIO(0x9888), 0x1b908000 },
504 { _MMIO(0x9888), 0x1d908000 },
505 { _MMIO(0x9888), 0x1f908000 },
506 { _MMIO(0x9888), 0x53900000 },
507 { _MMIO(0x9888), 0x43901084 },
508 { _MMIO(0x9888), 0x55900000 },
509 { _MMIO(0x9888), 0x47901080 },
510 { _MMIO(0x9888), 0x57900000 },
511 { _MMIO(0x9888), 0x49901084 },
512 { _MMIO(0x9888), 0x59900000 },
513 { _MMIO(0x9888), 0x4b901084 },
514 { _MMIO(0x9888), 0x37900000 },
515 { _MMIO(0x9888), 0x33900000 },
516 { _MMIO(0x9888), 0x4d900004 },
517 { _MMIO(0x9888), 0x45900000 },
518};
519
520static int
521get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
522 const struct i915_oa_reg **regs,
523 int *lens)
524{
525 int n = 0;
526
527 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
528 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
529
530 regs[n] = mux_config_memory_reads;
531 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
532 n++;
533
534 return n;
535}
536
537static const struct i915_oa_reg b_counter_config_memory_writes[] = {
538 { _MMIO(0x272c), 0xffffffff },
539 { _MMIO(0x2728), 0xffffffff },
540 { _MMIO(0x2724), 0xf0800000 },
541 { _MMIO(0x2720), 0x00000000 },
542 { _MMIO(0x271c), 0xffffffff },
543 { _MMIO(0x2718), 0xffffffff },
544 { _MMIO(0x2714), 0xf0800000 },
545 { _MMIO(0x2710), 0x00000000 },
546 { _MMIO(0x274c), 0x86543210 },
547 { _MMIO(0x2748), 0x86543210 },
548 { _MMIO(0x2744), 0x00006667 },
549 { _MMIO(0x2740), 0x00000000 },
550 { _MMIO(0x275c), 0x86543210 },
551 { _MMIO(0x2758), 0x86543210 },
552 { _MMIO(0x2754), 0x00006465 },
553 { _MMIO(0x2750), 0x00000000 },
554 { _MMIO(0x2770), 0x0007f81a },
555 { _MMIO(0x2774), 0x0000fe00 },
556 { _MMIO(0x2778), 0x0007f82a },
557 { _MMIO(0x277c), 0x0000fe00 },
558 { _MMIO(0x2780), 0x0007f822 },
559 { _MMIO(0x2784), 0x0000fe00 },
560 { _MMIO(0x2788), 0x0007f8ba },
561 { _MMIO(0x278c), 0x0000fe00 },
562 { _MMIO(0x2790), 0x0007f87a },
563 { _MMIO(0x2794), 0x0000fe00 },
564 { _MMIO(0x2798), 0x0007f8ea },
565 { _MMIO(0x279c), 0x0000fe00 },
566 { _MMIO(0x27a0), 0x0007f8e2 },
567 { _MMIO(0x27a4), 0x0000fe00 },
568 { _MMIO(0x27a8), 0x0007f8f2 },
569 { _MMIO(0x27ac), 0x0000fe00 },
570};
571
572static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
573 { _MMIO(0xe458), 0x00005004 },
574 { _MMIO(0xe558), 0x00015014 },
575 { _MMIO(0xe658), 0x00025024 },
576 { _MMIO(0xe758), 0x00035034 },
577 { _MMIO(0xe45c), 0x00045044 },
578 { _MMIO(0xe55c), 0x00055054 },
579 { _MMIO(0xe65c), 0x00065064 },
580};
581
582static const struct i915_oa_reg mux_config_memory_writes[] = {
583 { _MMIO(0x9888), 0x19800343 },
584 { _MMIO(0x9888), 0x39900340 },
585 { _MMIO(0x9888), 0x3f900000 },
586 { _MMIO(0x9888), 0x41900080 },
587 { _MMIO(0x9888), 0x03803180 },
588 { _MMIO(0x9888), 0x058035e2 },
589 { _MMIO(0x9888), 0x0780006a },
590 { _MMIO(0x9888), 0x11800000 },
591 { _MMIO(0x9888), 0x2181a000 },
592 { _MMIO(0x9888), 0x2381000a },
593 { _MMIO(0x9888), 0x1d950550 },
594 { _MMIO(0x9888), 0x0b928000 },
595 { _MMIO(0x9888), 0x0d92a000 },
596 { _MMIO(0x9888), 0x0f922000 },
597 { _MMIO(0x9888), 0x13900180 },
598 { _MMIO(0x9888), 0x21900181 },
599 { _MMIO(0x9888), 0x23900182 },
600 { _MMIO(0x9888), 0x25900183 },
601 { _MMIO(0x9888), 0x27900184 },
602 { _MMIO(0x9888), 0x29900185 },
603 { _MMIO(0x9888), 0x2b900186 },
604 { _MMIO(0x9888), 0x2d900187 },
605 { _MMIO(0x9888), 0x2f900170 },
606 { _MMIO(0x9888), 0x31900125 },
607 { _MMIO(0x9888), 0x15900123 },
608 { _MMIO(0x9888), 0x17900121 },
609 { _MMIO(0x9888), 0x35900000 },
610 { _MMIO(0x9888), 0x19908000 },
611 { _MMIO(0x9888), 0x1b908000 },
612 { _MMIO(0x9888), 0x1d908000 },
613 { _MMIO(0x9888), 0x1f908000 },
614 { _MMIO(0x9888), 0x53900000 },
615 { _MMIO(0x9888), 0x43901084 },
616 { _MMIO(0x9888), 0x55900000 },
617 { _MMIO(0x9888), 0x47901080 },
618 { _MMIO(0x9888), 0x57900000 },
619 { _MMIO(0x9888), 0x49901084 },
620 { _MMIO(0x9888), 0x59900000 },
621 { _MMIO(0x9888), 0x4b901084 },
622 { _MMIO(0x9888), 0x37900000 },
623 { _MMIO(0x9888), 0x33900000 },
624 { _MMIO(0x9888), 0x4d900004 },
625 { _MMIO(0x9888), 0x45900000 },
626};
627
628static int
629get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
630 const struct i915_oa_reg **regs,
631 int *lens)
632{
633 int n = 0;
634
635 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
636 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
637
638 regs[n] = mux_config_memory_writes;
639 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
640 n++;
641
642 return n;
643}
644
645static const struct i915_oa_reg b_counter_config_compute_extended[] = {
646 { _MMIO(0x2724), 0xf0800000 },
647 { _MMIO(0x2720), 0x00000000 },
648 { _MMIO(0x2714), 0xf0800000 },
649 { _MMIO(0x2710), 0x00000000 },
650 { _MMIO(0x2740), 0x00000000 },
651 { _MMIO(0x2770), 0x0007fc2a },
652 { _MMIO(0x2774), 0x0000bf00 },
653 { _MMIO(0x2778), 0x0007fc6a },
654 { _MMIO(0x277c), 0x0000bf00 },
655 { _MMIO(0x2780), 0x0007fc92 },
656 { _MMIO(0x2784), 0x0000bf00 },
657 { _MMIO(0x2788), 0x0007fca2 },
658 { _MMIO(0x278c), 0x0000bf00 },
659 { _MMIO(0x2790), 0x0007fc32 },
660 { _MMIO(0x2794), 0x0000bf00 },
661 { _MMIO(0x2798), 0x0007fc9a },
662 { _MMIO(0x279c), 0x0000bf00 },
663 { _MMIO(0x27a0), 0x0007fe6a },
664 { _MMIO(0x27a4), 0x0000bf00 },
665 { _MMIO(0x27a8), 0x0007fe7a },
666 { _MMIO(0x27ac), 0x0000bf00 },
667};
668
669static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
670 { _MMIO(0xe458), 0x00005004 },
671 { _MMIO(0xe558), 0x00000003 },
672 { _MMIO(0xe658), 0x00002001 },
673 { _MMIO(0xe758), 0x00778008 },
674 { _MMIO(0xe45c), 0x00088078 },
675 { _MMIO(0xe55c), 0x00808708 },
676 { _MMIO(0xe65c), 0x00a08908 },
677};
678
679static const struct i915_oa_reg mux_config_compute_extended[] = {
680 { _MMIO(0x9888), 0x104f00e0 },
681 { _MMIO(0x9888), 0x141c0160 },
682 { _MMIO(0x9888), 0x161c0015 },
683 { _MMIO(0x9888), 0x181c0120 },
684 { _MMIO(0x9888), 0x002d5000 },
685 { _MMIO(0x9888), 0x062d4000 },
686 { _MMIO(0x9888), 0x082d5000 },
687 { _MMIO(0x9888), 0x0a2d5000 },
688 { _MMIO(0x9888), 0x0c2d5000 },
689 { _MMIO(0x9888), 0x0e2d5000 },
690 { _MMIO(0x9888), 0x022d5000 },
691 { _MMIO(0x9888), 0x042d5000 },
692 { _MMIO(0x9888), 0x0c2e5400 },
693 { _MMIO(0x9888), 0x0e2e5515 },
694 { _MMIO(0x9888), 0x102e0155 },
695 { _MMIO(0x9888), 0x044cc000 },
696 { _MMIO(0x9888), 0x0a4c8000 },
697 { _MMIO(0x9888), 0x0c4cc000 },
698 { _MMIO(0x9888), 0x0e4cc000 },
699 { _MMIO(0x9888), 0x104c8000 },
700 { _MMIO(0x9888), 0x124c8000 },
701 { _MMIO(0x9888), 0x144c8000 },
702 { _MMIO(0x9888), 0x164c2000 },
703 { _MMIO(0x9888), 0x064cc000 },
704 { _MMIO(0x9888), 0x084cc000 },
705 { _MMIO(0x9888), 0x004ea000 },
706 { _MMIO(0x9888), 0x064e8000 },
707 { _MMIO(0x9888), 0x084ea000 },
708 { _MMIO(0x9888), 0x0a4ea000 },
709 { _MMIO(0x9888), 0x0c4ea000 },
710 { _MMIO(0x9888), 0x0e4ea000 },
711 { _MMIO(0x9888), 0x024ea000 },
712 { _MMIO(0x9888), 0x044ea000 },
713 { _MMIO(0x9888), 0x0e4f4b41 },
714 { _MMIO(0x9888), 0x004f4200 },
715 { _MMIO(0x9888), 0x024f404c },
716 { _MMIO(0x9888), 0x1c4f0000 },
717 { _MMIO(0x9888), 0x1a4f0000 },
718 { _MMIO(0x9888), 0x001b4000 },
719 { _MMIO(0x9888), 0x061b8000 },
720 { _MMIO(0x9888), 0x081bc000 },
721 { _MMIO(0x9888), 0x0a1bc000 },
722 { _MMIO(0x9888), 0x0c1bc000 },
723 { _MMIO(0x9888), 0x041bc000 },
724 { _MMIO(0x9888), 0x001c0031 },
725 { _MMIO(0x9888), 0x061c1900 },
726 { _MMIO(0x9888), 0x081c1a33 },
727 { _MMIO(0x9888), 0x0a1c1b35 },
728 { _MMIO(0x9888), 0x0c1c3337 },
729 { _MMIO(0x9888), 0x041c31c7 },
730 { _MMIO(0x9888), 0x180f5000 },
731 { _MMIO(0x9888), 0x1a0fa8aa },
732 { _MMIO(0x9888), 0x1c0f0aaa },
733 { _MMIO(0x9888), 0x182c8000 },
734 { _MMIO(0x9888), 0x1c2c6aaa },
735 { _MMIO(0x9888), 0x1e2c0001 },
736 { _MMIO(0x9888), 0x1a2c2950 },
737 { _MMIO(0x9888), 0x01938000 },
738 { _MMIO(0x9888), 0x0f938000 },
739 { _MMIO(0x9888), 0x1993aaaa },
740 { _MMIO(0x9888), 0x03938000 },
741 { _MMIO(0x9888), 0x05938000 },
742 { _MMIO(0x9888), 0x07938000 },
743 { _MMIO(0x9888), 0x09938000 },
744 { _MMIO(0x9888), 0x0b938000 },
745 { _MMIO(0x9888), 0x13904000 },
746 { _MMIO(0x9888), 0x21904000 },
747 { _MMIO(0x9888), 0x23904000 },
748 { _MMIO(0x9888), 0x25904000 },
749 { _MMIO(0x9888), 0x27904000 },
750 { _MMIO(0x9888), 0x29904000 },
751 { _MMIO(0x9888), 0x2b904000 },
752 { _MMIO(0x9888), 0x2d904000 },
753 { _MMIO(0x9888), 0x2f904000 },
754 { _MMIO(0x9888), 0x31904000 },
755 { _MMIO(0x9888), 0x15904000 },
756 { _MMIO(0x9888), 0x17904000 },
757 { _MMIO(0x9888), 0x19904000 },
758 { _MMIO(0x9888), 0x1b904000 },
759 { _MMIO(0x9888), 0x1d904000 },
760 { _MMIO(0x9888), 0x53900000 },
761 { _MMIO(0x9888), 0x43900420 },
762 { _MMIO(0x9888), 0x55900000 },
763 { _MMIO(0x9888), 0x47900000 },
764 { _MMIO(0x9888), 0x57900000 },
765 { _MMIO(0x9888), 0x49900000 },
766 { _MMIO(0x9888), 0x59900000 },
767 { _MMIO(0x9888), 0x4b900400 },
768 { _MMIO(0x9888), 0x37900000 },
769 { _MMIO(0x9888), 0x33900000 },
770 { _MMIO(0x9888), 0x4d900001 },
771 { _MMIO(0x9888), 0x45900001 },
772};
773
774static int
775get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
776 const struct i915_oa_reg **regs,
777 int *lens)
778{
779 int n = 0;
780
781 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
782 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
783
784 regs[n] = mux_config_compute_extended;
785 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
786 n++;
787
788 return n;
789}
790
791static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
792 { _MMIO(0x2710), 0x00000000 },
793 { _MMIO(0x2714), 0x30800000 },
794 { _MMIO(0x2720), 0x00000000 },
795 { _MMIO(0x2724), 0x30800000 },
796 { _MMIO(0x2740), 0x00000000 },
797 { _MMIO(0x2770), 0x0007fffa },
798 { _MMIO(0x2774), 0x0000fefe },
799 { _MMIO(0x2778), 0x0007fffa },
800 { _MMIO(0x277c), 0x0000fefd },
801 { _MMIO(0x2790), 0x0007fffa },
802 { _MMIO(0x2794), 0x0000fbef },
803 { _MMIO(0x2798), 0x0007fffa },
804 { _MMIO(0x279c), 0x0000fbdf },
805};
806
807static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
808 { _MMIO(0xe458), 0x00005004 },
809 { _MMIO(0xe558), 0x00000003 },
810 { _MMIO(0xe658), 0x00002001 },
811 { _MMIO(0xe758), 0x00101100 },
812 { _MMIO(0xe45c), 0x00201200 },
813 { _MMIO(0xe55c), 0x00301300 },
814 { _MMIO(0xe65c), 0x00401400 },
815};
816
817static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
818 { _MMIO(0x9888), 0x166c03b0 },
819 { _MMIO(0x9888), 0x1593001e },
820 { _MMIO(0x9888), 0x3f900c00 },
821 { _MMIO(0x9888), 0x41900000 },
822 { _MMIO(0x9888), 0x002d1000 },
823 { _MMIO(0x9888), 0x062d4000 },
824 { _MMIO(0x9888), 0x082d5000 },
825 { _MMIO(0x9888), 0x0e2d5000 },
826 { _MMIO(0x9888), 0x0c2e0400 },
827 { _MMIO(0x9888), 0x0e2e1500 },
828 { _MMIO(0x9888), 0x102e0140 },
829 { _MMIO(0x9888), 0x044c4000 },
830 { _MMIO(0x9888), 0x0a4c8000 },
831 { _MMIO(0x9888), 0x0c4cc000 },
832 { _MMIO(0x9888), 0x144c8000 },
833 { _MMIO(0x9888), 0x164c2000 },
834 { _MMIO(0x9888), 0x004e2000 },
835 { _MMIO(0x9888), 0x064e8000 },
836 { _MMIO(0x9888), 0x084ea000 },
837 { _MMIO(0x9888), 0x0e4ea000 },
838 { _MMIO(0x9888), 0x1a4f4001 },
839 { _MMIO(0x9888), 0x1c4f5005 },
840 { _MMIO(0x9888), 0x006c0051 },
841 { _MMIO(0x9888), 0x066c5000 },
842 { _MMIO(0x9888), 0x086c5c5d },
843 { _MMIO(0x9888), 0x0e6c5e5f },
844 { _MMIO(0x9888), 0x106c0000 },
845 { _MMIO(0x9888), 0x146c0000 },
846 { _MMIO(0x9888), 0x1a6c0000 },
847 { _MMIO(0x9888), 0x1c6c0000 },
848 { _MMIO(0x9888), 0x180f1000 },
849 { _MMIO(0x9888), 0x1a0fa800 },
850 { _MMIO(0x9888), 0x1c0f0a00 },
851 { _MMIO(0x9888), 0x182c4000 },
852 { _MMIO(0x9888), 0x1c2c4015 },
853 { _MMIO(0x9888), 0x1e2c0001 },
854 { _MMIO(0x9888), 0x03931980 },
855 { _MMIO(0x9888), 0x05930032 },
856 { _MMIO(0x9888), 0x11930000 },
857 { _MMIO(0x9888), 0x01938000 },
858 { _MMIO(0x9888), 0x0f938000 },
859 { _MMIO(0x9888), 0x1993a00a },
860 { _MMIO(0x9888), 0x07930000 },
861 { _MMIO(0x9888), 0x09930000 },
862 { _MMIO(0x9888), 0x1d900177 },
863 { _MMIO(0x9888), 0x1f900178 },
864 { _MMIO(0x9888), 0x35900000 },
865 { _MMIO(0x9888), 0x13904000 },
866 { _MMIO(0x9888), 0x21904000 },
867 { _MMIO(0x9888), 0x23904000 },
868 { _MMIO(0x9888), 0x25904000 },
869 { _MMIO(0x9888), 0x2f904000 },
870 { _MMIO(0x9888), 0x31904000 },
871 { _MMIO(0x9888), 0x19904000 },
872 { _MMIO(0x9888), 0x1b904000 },
873 { _MMIO(0x9888), 0x53901000 },
874 { _MMIO(0x9888), 0x43900000 },
875 { _MMIO(0x9888), 0x55900111 },
876 { _MMIO(0x9888), 0x47900001 },
877 { _MMIO(0x9888), 0x57900000 },
878 { _MMIO(0x9888), 0x49900000 },
879 { _MMIO(0x9888), 0x37900000 },
880 { _MMIO(0x9888), 0x33900000 },
881 { _MMIO(0x9888), 0x59900000 },
882 { _MMIO(0x9888), 0x4b900000 },
883 { _MMIO(0x9888), 0x4d900000 },
884 { _MMIO(0x9888), 0x45900400 },
885};
886
887static int
888get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
889 const struct i915_oa_reg **regs,
890 int *lens)
891{
892 int n = 0;
893
894 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
895 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
896
897 regs[n] = mux_config_compute_l3_cache;
898 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
899 n++;
900
901 return n;
902}
903
904static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
905 { _MMIO(0x2740), 0x00000000 },
906 { _MMIO(0x2744), 0x00800000 },
907 { _MMIO(0x2710), 0x00000000 },
908 { _MMIO(0x2714), 0x10800000 },
909 { _MMIO(0x2720), 0x00000000 },
910 { _MMIO(0x2724), 0x00800000 },
911 { _MMIO(0x2770), 0x00000002 },
912 { _MMIO(0x2774), 0x0000fdff },
913};
914
915static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
916 { _MMIO(0xe458), 0x00005004 },
917 { _MMIO(0xe558), 0x00010003 },
918 { _MMIO(0xe658), 0x00012011 },
919 { _MMIO(0xe758), 0x00015014 },
920 { _MMIO(0xe45c), 0x00051050 },
921 { _MMIO(0xe55c), 0x00053052 },
922 { _MMIO(0xe65c), 0x00055054 },
923};
924
925static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
926 { _MMIO(0x9888), 0x104f0232 },
927 { _MMIO(0x9888), 0x124f4640 },
928 { _MMIO(0x9888), 0x11834400 },
929 { _MMIO(0x9888), 0x022d4000 },
930 { _MMIO(0x9888), 0x042d5000 },
931 { _MMIO(0x9888), 0x062d1000 },
932 { _MMIO(0x9888), 0x0e2e0055 },
933 { _MMIO(0x9888), 0x064c8000 },
934 { _MMIO(0x9888), 0x084cc000 },
935 { _MMIO(0x9888), 0x0a4c4000 },
936 { _MMIO(0x9888), 0x024e8000 },
937 { _MMIO(0x9888), 0x044ea000 },
938 { _MMIO(0x9888), 0x064e2000 },
939 { _MMIO(0x9888), 0x024f6100 },
940 { _MMIO(0x9888), 0x044f416b },
941 { _MMIO(0x9888), 0x064f004b },
942 { _MMIO(0x9888), 0x1a4f0000 },
943 { _MMIO(0x9888), 0x1a0f02a8 },
944 { _MMIO(0x9888), 0x1a2c5500 },
945 { _MMIO(0x9888), 0x0f808000 },
946 { _MMIO(0x9888), 0x25810020 },
947 { _MMIO(0x9888), 0x0f8305c0 },
948 { _MMIO(0x9888), 0x07938000 },
949 { _MMIO(0x9888), 0x09938000 },
950 { _MMIO(0x9888), 0x0b938000 },
951 { _MMIO(0x9888), 0x0d938000 },
952 { _MMIO(0x9888), 0x1f951000 },
953 { _MMIO(0x9888), 0x13920200 },
954 { _MMIO(0x9888), 0x31908000 },
955 { _MMIO(0x9888), 0x19904000 },
956 { _MMIO(0x9888), 0x1b904000 },
957 { _MMIO(0x9888), 0x1d904000 },
958 { _MMIO(0x9888), 0x1f904000 },
959 { _MMIO(0x9888), 0x37900000 },
960 { _MMIO(0x9888), 0x59900000 },
961 { _MMIO(0x9888), 0x4d900003 },
962 { _MMIO(0x9888), 0x53900000 },
963 { _MMIO(0x9888), 0x45900000 },
964 { _MMIO(0x9888), 0x55900000 },
965 { _MMIO(0x9888), 0x47900000 },
966 { _MMIO(0x9888), 0x33900000 },
967};
968
969static int
970get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
971 const struct i915_oa_reg **regs,
972 int *lens)
973{
974 int n = 0;
975
976 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
977 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
978
979 regs[n] = mux_config_hdc_and_sf;
980 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
981 n++;
982
983 return n;
984}
985
986static const struct i915_oa_reg b_counter_config_l3_1[] = {
987 { _MMIO(0x2740), 0x00000000 },
988 { _MMIO(0x2744), 0x00800000 },
989 { _MMIO(0x2710), 0x00000000 },
990 { _MMIO(0x2714), 0xf0800000 },
991 { _MMIO(0x2720), 0x00000000 },
992 { _MMIO(0x2724), 0xf0800000 },
993 { _MMIO(0x2770), 0x00100070 },
994 { _MMIO(0x2774), 0x0000fff1 },
995 { _MMIO(0x2778), 0x00014002 },
996 { _MMIO(0x277c), 0x0000c3ff },
997 { _MMIO(0x2780), 0x00010002 },
998 { _MMIO(0x2784), 0x0000c7ff },
999 { _MMIO(0x2788), 0x00004002 },
1000 { _MMIO(0x278c), 0x0000d3ff },
1001 { _MMIO(0x2790), 0x00100700 },
1002 { _MMIO(0x2794), 0x0000ff1f },
1003 { _MMIO(0x2798), 0x00001402 },
1004 { _MMIO(0x279c), 0x0000fc3f },
1005 { _MMIO(0x27a0), 0x00001002 },
1006 { _MMIO(0x27a4), 0x0000fc7f },
1007 { _MMIO(0x27a8), 0x00000402 },
1008 { _MMIO(0x27ac), 0x0000fd3f },
1009};
1010
1011static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1012 { _MMIO(0xe458), 0x00005004 },
1013 { _MMIO(0xe558), 0x00010003 },
1014 { _MMIO(0xe658), 0x00012011 },
1015 { _MMIO(0xe758), 0x00015014 },
1016 { _MMIO(0xe45c), 0x00051050 },
1017 { _MMIO(0xe55c), 0x00053052 },
1018 { _MMIO(0xe65c), 0x00055054 },
1019};
1020
1021static const struct i915_oa_reg mux_config_l3_1_0_sku_gte_0x03[] = {
1022 { _MMIO(0x9888), 0x12643400 },
1023 { _MMIO(0x9888), 0x12653400 },
1024 { _MMIO(0x9888), 0x106c6800 },
1025 { _MMIO(0x9888), 0x126c001e },
1026 { _MMIO(0x9888), 0x166c0010 },
1027 { _MMIO(0x9888), 0x0c2d5000 },
1028 { _MMIO(0x9888), 0x0e2d5000 },
1029 { _MMIO(0x9888), 0x002d4000 },
1030 { _MMIO(0x9888), 0x022d5000 },
1031 { _MMIO(0x9888), 0x042d5000 },
1032 { _MMIO(0x9888), 0x062d1000 },
1033 { _MMIO(0x9888), 0x102e0154 },
1034 { _MMIO(0x9888), 0x0c2e5000 },
1035 { _MMIO(0x9888), 0x0e2e0055 },
1036 { _MMIO(0x9888), 0x104c8000 },
1037 { _MMIO(0x9888), 0x124c8000 },
1038 { _MMIO(0x9888), 0x144c8000 },
1039 { _MMIO(0x9888), 0x164c2000 },
1040 { _MMIO(0x9888), 0x044c8000 },
1041 { _MMIO(0x9888), 0x064cc000 },
1042 { _MMIO(0x9888), 0x084cc000 },
1043 { _MMIO(0x9888), 0x0a4c4000 },
1044 { _MMIO(0x9888), 0x0c4ea000 },
1045 { _MMIO(0x9888), 0x0e4ea000 },
1046 { _MMIO(0x9888), 0x004e8000 },
1047 { _MMIO(0x9888), 0x024ea000 },
1048 { _MMIO(0x9888), 0x044ea000 },
1049 { _MMIO(0x9888), 0x064e2000 },
1050 { _MMIO(0x9888), 0x1c4f5500 },
1051 { _MMIO(0x9888), 0x1a4f1554 },
1052 { _MMIO(0x9888), 0x0a640024 },
1053 { _MMIO(0x9888), 0x10640000 },
1054 { _MMIO(0x9888), 0x04640000 },
1055 { _MMIO(0x9888), 0x0c650024 },
1056 { _MMIO(0x9888), 0x10650000 },
1057 { _MMIO(0x9888), 0x06650000 },
1058 { _MMIO(0x9888), 0x0c6c5327 },
1059 { _MMIO(0x9888), 0x0e6c5425 },
1060 { _MMIO(0x9888), 0x006c2a00 },
1061 { _MMIO(0x9888), 0x026c285b },
1062 { _MMIO(0x9888), 0x046c005c },
1063 { _MMIO(0x9888), 0x1c6c0000 },
1064 { _MMIO(0x9888), 0x1a6c0900 },
1065 { _MMIO(0x9888), 0x1c0f0aa0 },
1066 { _MMIO(0x9888), 0x180f4000 },
1067 { _MMIO(0x9888), 0x1a0f02aa },
1068 { _MMIO(0x9888), 0x1c2c5400 },
1069 { _MMIO(0x9888), 0x1e2c0001 },
1070 { _MMIO(0x9888), 0x1a2c5550 },
1071 { _MMIO(0x9888), 0x1993aa00 },
1072 { _MMIO(0x9888), 0x03938000 },
1073 { _MMIO(0x9888), 0x05938000 },
1074 { _MMIO(0x9888), 0x07938000 },
1075 { _MMIO(0x9888), 0x09938000 },
1076 { _MMIO(0x9888), 0x0b938000 },
1077 { _MMIO(0x9888), 0x0d938000 },
1078 { _MMIO(0x9888), 0x2b904000 },
1079 { _MMIO(0x9888), 0x2d904000 },
1080 { _MMIO(0x9888), 0x2f904000 },
1081 { _MMIO(0x9888), 0x31904000 },
1082 { _MMIO(0x9888), 0x15904000 },
1083 { _MMIO(0x9888), 0x17904000 },
1084 { _MMIO(0x9888), 0x19904000 },
1085 { _MMIO(0x9888), 0x1b904000 },
1086 { _MMIO(0x9888), 0x1d904000 },
1087 { _MMIO(0x9888), 0x1f904000 },
1088 { _MMIO(0x9888), 0x59900000 },
1089 { _MMIO(0x9888), 0x4b900421 },
1090 { _MMIO(0x9888), 0x37900000 },
1091 { _MMIO(0x9888), 0x33900000 },
1092 { _MMIO(0x9888), 0x4d900001 },
1093 { _MMIO(0x9888), 0x53900000 },
1094 { _MMIO(0x9888), 0x43900420 },
1095 { _MMIO(0x9888), 0x45900021 },
1096 { _MMIO(0x9888), 0x55900000 },
1097 { _MMIO(0x9888), 0x47900000 },
1098};
1099
1100static const struct i915_oa_reg mux_config_l3_1_0_sku_lt_0x03[] = {
1101 { _MMIO(0x9888), 0x14640340 },
1102 { _MMIO(0x9888), 0x14650340 },
1103 { _MMIO(0x9888), 0x106c6800 },
1104 { _MMIO(0x9888), 0x126c001e },
1105 { _MMIO(0x9888), 0x166c0010 },
1106 { _MMIO(0x9888), 0x0c2d5000 },
1107 { _MMIO(0x9888), 0x0e2d5000 },
1108 { _MMIO(0x9888), 0x002d4000 },
1109 { _MMIO(0x9888), 0x022d5000 },
1110 { _MMIO(0x9888), 0x042d5000 },
1111 { _MMIO(0x9888), 0x062d1000 },
1112 { _MMIO(0x9888), 0x102e0154 },
1113 { _MMIO(0x9888), 0x0c2e5000 },
1114 { _MMIO(0x9888), 0x0e2e0055 },
1115 { _MMIO(0x9888), 0x104c8000 },
1116 { _MMIO(0x9888), 0x124c8000 },
1117 { _MMIO(0x9888), 0x144c8000 },
1118 { _MMIO(0x9888), 0x164c2000 },
1119 { _MMIO(0x9888), 0x044c8000 },
1120 { _MMIO(0x9888), 0x064cc000 },
1121 { _MMIO(0x9888), 0x084cc000 },
1122 { _MMIO(0x9888), 0x0a4c4000 },
1123 { _MMIO(0x9888), 0x0c4ea000 },
1124 { _MMIO(0x9888), 0x0e4ea000 },
1125 { _MMIO(0x9888), 0x004e8000 },
1126 { _MMIO(0x9888), 0x024ea000 },
1127 { _MMIO(0x9888), 0x044ea000 },
1128 { _MMIO(0x9888), 0x064e2000 },
1129 { _MMIO(0x9888), 0x1c4f5500 },
1130 { _MMIO(0x9888), 0x1a4f1554 },
1131 { _MMIO(0x9888), 0x04642400 },
1132 { _MMIO(0x9888), 0x22640000 },
1133 { _MMIO(0x9888), 0x1a640000 },
1134 { _MMIO(0x9888), 0x06650024 },
1135 { _MMIO(0x9888), 0x22650000 },
1136 { _MMIO(0x9888), 0x1c650000 },
1137 { _MMIO(0x9888), 0x0c6c5327 },
1138 { _MMIO(0x9888), 0x0e6c5425 },
1139 { _MMIO(0x9888), 0x006c2a00 },
1140 { _MMIO(0x9888), 0x026c285b },
1141 { _MMIO(0x9888), 0x046c005c },
1142 { _MMIO(0x9888), 0x1c6c0000 },
1143 { _MMIO(0x9888), 0x1a6c0900 },
1144 { _MMIO(0x9888), 0x1c0f0aa0 },
1145 { _MMIO(0x9888), 0x180f4000 },
1146 { _MMIO(0x9888), 0x1a0f02aa },
1147 { _MMIO(0x9888), 0x1c2c5400 },
1148 { _MMIO(0x9888), 0x1e2c0001 },
1149 { _MMIO(0x9888), 0x1a2c5550 },
1150 { _MMIO(0x9888), 0x1993aa00 },
1151 { _MMIO(0x9888), 0x03938000 },
1152 { _MMIO(0x9888), 0x05938000 },
1153 { _MMIO(0x9888), 0x07938000 },
1154 { _MMIO(0x9888), 0x09938000 },
1155 { _MMIO(0x9888), 0x0b938000 },
1156 { _MMIO(0x9888), 0x0d938000 },
1157 { _MMIO(0x9888), 0x2b904000 },
1158 { _MMIO(0x9888), 0x2d904000 },
1159 { _MMIO(0x9888), 0x2f904000 },
1160 { _MMIO(0x9888), 0x31904000 },
1161 { _MMIO(0x9888), 0x15904000 },
1162 { _MMIO(0x9888), 0x17904000 },
1163 { _MMIO(0x9888), 0x19904000 },
1164 { _MMIO(0x9888), 0x1b904000 },
1165 { _MMIO(0x9888), 0x1d904000 },
1166 { _MMIO(0x9888), 0x1f904000 },
1167 { _MMIO(0x9888), 0x59900000 },
1168 { _MMIO(0x9888), 0x4b900421 },
1169 { _MMIO(0x9888), 0x37900000 },
1170 { _MMIO(0x9888), 0x33900000 },
1171 { _MMIO(0x9888), 0x4d900001 },
1172 { _MMIO(0x9888), 0x53900000 },
1173 { _MMIO(0x9888), 0x43900420 },
1174 { _MMIO(0x9888), 0x45900021 },
1175 { _MMIO(0x9888), 0x55900000 },
1176 { _MMIO(0x9888), 0x47900000 },
1177};
1178
1179static int
1180get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1181 const struct i915_oa_reg **regs,
1182 int *lens)
1183{
1184 int n = 0;
1185
1186 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
1187 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
1188
1189 if (dev_priv->drm.pdev->revision >= 0x03) {
1190 regs[n] = mux_config_l3_1_0_sku_gte_0x03;
1191 lens[n] = ARRAY_SIZE(mux_config_l3_1_0_sku_gte_0x03);
1192 n++;
1193 }
1194 if (dev_priv->drm.pdev->revision < 0x03) {
1195 regs[n] = mux_config_l3_1_0_sku_lt_0x03;
1196 lens[n] = ARRAY_SIZE(mux_config_l3_1_0_sku_lt_0x03);
1197 n++;
1198 }
1199
1200 return n;
1201}
1202
1203static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1204 { _MMIO(0x2740), 0x00000000 },
1205 { _MMIO(0x2744), 0x00800000 },
1206 { _MMIO(0x2710), 0x00000000 },
1207 { _MMIO(0x2714), 0x30800000 },
1208 { _MMIO(0x2720), 0x00000000 },
1209 { _MMIO(0x2724), 0x00800000 },
1210 { _MMIO(0x2770), 0x00000002 },
1211 { _MMIO(0x2774), 0x0000efff },
1212 { _MMIO(0x2778), 0x00006000 },
1213 { _MMIO(0x277c), 0x0000f3ff },
1214};
1215
1216static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1217 { _MMIO(0xe458), 0x00005004 },
1218 { _MMIO(0xe558), 0x00010003 },
1219 { _MMIO(0xe658), 0x00012011 },
1220 { _MMIO(0xe758), 0x00015014 },
1221 { _MMIO(0xe45c), 0x00051050 },
1222 { _MMIO(0xe55c), 0x00053052 },
1223 { _MMIO(0xe65c), 0x00055054 },
1224};
1225
1226static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1227 { _MMIO(0x9888), 0x102d7800 },
1228 { _MMIO(0x9888), 0x122d79e0 },
1229 { _MMIO(0x9888), 0x0c2f0004 },
1230 { _MMIO(0x9888), 0x100e3800 },
1231 { _MMIO(0x9888), 0x180f0005 },
1232 { _MMIO(0x9888), 0x002d0940 },
1233 { _MMIO(0x9888), 0x022d802f },
1234 { _MMIO(0x9888), 0x042d4013 },
1235 { _MMIO(0x9888), 0x062d1000 },
1236 { _MMIO(0x9888), 0x0e2e0050 },
1237 { _MMIO(0x9888), 0x022f0010 },
1238 { _MMIO(0x9888), 0x002f0000 },
1239 { _MMIO(0x9888), 0x084c8000 },
1240 { _MMIO(0x9888), 0x0a4c4000 },
1241 { _MMIO(0x9888), 0x044e8000 },
1242 { _MMIO(0x9888), 0x064e2000 },
1243 { _MMIO(0x9888), 0x040e0480 },
1244 { _MMIO(0x9888), 0x000e0000 },
1245 { _MMIO(0x9888), 0x060f0027 },
1246 { _MMIO(0x9888), 0x100f0000 },
1247 { _MMIO(0x9888), 0x1a0f0040 },
1248 { _MMIO(0x9888), 0x03938000 },
1249 { _MMIO(0x9888), 0x05938000 },
1250 { _MMIO(0x9888), 0x07938000 },
1251 { _MMIO(0x9888), 0x09938000 },
1252 { _MMIO(0x9888), 0x0b938000 },
1253 { _MMIO(0x9888), 0x0d938000 },
1254 { _MMIO(0x9888), 0x15904000 },
1255 { _MMIO(0x9888), 0x17904000 },
1256 { _MMIO(0x9888), 0x19904000 },
1257 { _MMIO(0x9888), 0x1b904000 },
1258 { _MMIO(0x9888), 0x1d904000 },
1259 { _MMIO(0x9888), 0x1f904000 },
1260 { _MMIO(0x9888), 0x37900000 },
1261 { _MMIO(0x9888), 0x53900000 },
1262 { _MMIO(0x9888), 0x439014a0 },
1263 { _MMIO(0x9888), 0x459000a4 },
1264 { _MMIO(0x9888), 0x55900000 },
1265 { _MMIO(0x9888), 0x47900001 },
1266 { _MMIO(0x9888), 0x33900000 },
1267};
1268
1269static int
1270get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1271 const struct i915_oa_reg **regs,
1272 int *lens)
1273{
1274 int n = 0;
1275
1276 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1277 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1278
1279 regs[n] = mux_config_rasterizer_and_pixel_backend;
1280 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1281 n++;
1282
1283 return n;
1284}
1285
1286static const struct i915_oa_reg b_counter_config_sampler[] = {
1287 { _MMIO(0x2740), 0x00000000 },
1288 { _MMIO(0x2744), 0x00800000 },
1289 { _MMIO(0x2710), 0x00000000 },
1290 { _MMIO(0x2714), 0x70800000 },
1291 { _MMIO(0x2720), 0x00000000 },
1292 { _MMIO(0x2724), 0x00800000 },
1293 { _MMIO(0x2770), 0x0000c000 },
1294 { _MMIO(0x2774), 0x0000e7ff },
1295 { _MMIO(0x2778), 0x00003000 },
1296 { _MMIO(0x277c), 0x0000f9ff },
1297 { _MMIO(0x2780), 0x00000c00 },
1298 { _MMIO(0x2784), 0x0000fe7f },
1299};
1300
1301static const struct i915_oa_reg flex_eu_config_sampler[] = {
1302 { _MMIO(0xe458), 0x00005004 },
1303 { _MMIO(0xe558), 0x00010003 },
1304 { _MMIO(0xe658), 0x00012011 },
1305 { _MMIO(0xe758), 0x00015014 },
1306 { _MMIO(0xe45c), 0x00051050 },
1307 { _MMIO(0xe55c), 0x00053052 },
1308 { _MMIO(0xe65c), 0x00055054 },
1309};
1310
1311static const struct i915_oa_reg mux_config_sampler[] = {
1312 { _MMIO(0x9888), 0x121300a0 },
1313 { _MMIO(0x9888), 0x141600ab },
1314 { _MMIO(0x9888), 0x123300a0 },
1315 { _MMIO(0x9888), 0x143600ab },
1316 { _MMIO(0x9888), 0x125300a0 },
1317 { _MMIO(0x9888), 0x145600ab },
1318 { _MMIO(0x9888), 0x0c2d4000 },
1319 { _MMIO(0x9888), 0x0e2d5000 },
1320 { _MMIO(0x9888), 0x002d4000 },
1321 { _MMIO(0x9888), 0x022d5000 },
1322 { _MMIO(0x9888), 0x042d5000 },
1323 { _MMIO(0x9888), 0x062d1000 },
1324 { _MMIO(0x9888), 0x102e01a0 },
1325 { _MMIO(0x9888), 0x0c2e5000 },
1326 { _MMIO(0x9888), 0x0e2e0065 },
1327 { _MMIO(0x9888), 0x164c2000 },
1328 { _MMIO(0x9888), 0x044c8000 },
1329 { _MMIO(0x9888), 0x064cc000 },
1330 { _MMIO(0x9888), 0x084c4000 },
1331 { _MMIO(0x9888), 0x0a4c4000 },
1332 { _MMIO(0x9888), 0x0e4e8000 },
1333 { _MMIO(0x9888), 0x004e8000 },
1334 { _MMIO(0x9888), 0x024ea000 },
1335 { _MMIO(0x9888), 0x044e2000 },
1336 { _MMIO(0x9888), 0x064e2000 },
1337 { _MMIO(0x9888), 0x1c0f0800 },
1338 { _MMIO(0x9888), 0x180f4000 },
1339 { _MMIO(0x9888), 0x1a0f023f },
1340 { _MMIO(0x9888), 0x1e2c0003 },
1341 { _MMIO(0x9888), 0x1a2cc030 },
1342 { _MMIO(0x9888), 0x04132180 },
1343 { _MMIO(0x9888), 0x02130000 },
1344 { _MMIO(0x9888), 0x0c148000 },
1345 { _MMIO(0x9888), 0x0e142000 },
1346 { _MMIO(0x9888), 0x04148000 },
1347 { _MMIO(0x9888), 0x1e150140 },
1348 { _MMIO(0x9888), 0x1c150040 },
1349 { _MMIO(0x9888), 0x0c163000 },
1350 { _MMIO(0x9888), 0x0e160068 },
1351 { _MMIO(0x9888), 0x10160000 },
1352 { _MMIO(0x9888), 0x18160000 },
1353 { _MMIO(0x9888), 0x0a164000 },
1354 { _MMIO(0x9888), 0x04330043 },
1355 { _MMIO(0x9888), 0x02330000 },
1356 { _MMIO(0x9888), 0x0234a000 },
1357 { _MMIO(0x9888), 0x04342000 },
1358 { _MMIO(0x9888), 0x1c350015 },
1359 { _MMIO(0x9888), 0x02363460 },
1360 { _MMIO(0x9888), 0x10360000 },
1361 { _MMIO(0x9888), 0x04360000 },
1362 { _MMIO(0x9888), 0x06360000 },
1363 { _MMIO(0x9888), 0x08364000 },
1364 { _MMIO(0x9888), 0x06530043 },
1365 { _MMIO(0x9888), 0x02530000 },
1366 { _MMIO(0x9888), 0x0e548000 },
1367 { _MMIO(0x9888), 0x00548000 },
1368 { _MMIO(0x9888), 0x06542000 },
1369 { _MMIO(0x9888), 0x1e550400 },
1370 { _MMIO(0x9888), 0x1a552000 },
1371 { _MMIO(0x9888), 0x1c550100 },
1372 { _MMIO(0x9888), 0x0e563000 },
1373 { _MMIO(0x9888), 0x00563400 },
1374 { _MMIO(0x9888), 0x10560000 },
1375 { _MMIO(0x9888), 0x18560000 },
1376 { _MMIO(0x9888), 0x02560000 },
1377 { _MMIO(0x9888), 0x0c564000 },
1378 { _MMIO(0x9888), 0x1993a800 },
1379 { _MMIO(0x9888), 0x03938000 },
1380 { _MMIO(0x9888), 0x05938000 },
1381 { _MMIO(0x9888), 0x07938000 },
1382 { _MMIO(0x9888), 0x09938000 },
1383 { _MMIO(0x9888), 0x0b938000 },
1384 { _MMIO(0x9888), 0x0d938000 },
1385 { _MMIO(0x9888), 0x2d904000 },
1386 { _MMIO(0x9888), 0x2f904000 },
1387 { _MMIO(0x9888), 0x31904000 },
1388 { _MMIO(0x9888), 0x15904000 },
1389 { _MMIO(0x9888), 0x17904000 },
1390 { _MMIO(0x9888), 0x19904000 },
1391 { _MMIO(0x9888), 0x1b904000 },
1392 { _MMIO(0x9888), 0x1d904000 },
1393 { _MMIO(0x9888), 0x1f904000 },
1394 { _MMIO(0x9888), 0x59900000 },
1395 { _MMIO(0x9888), 0x4b9014a0 },
1396 { _MMIO(0x9888), 0x37900000 },
1397 { _MMIO(0x9888), 0x33900000 },
1398 { _MMIO(0x9888), 0x4d900001 },
1399 { _MMIO(0x9888), 0x53900000 },
1400 { _MMIO(0x9888), 0x43900820 },
1401 { _MMIO(0x9888), 0x45901022 },
1402 { _MMIO(0x9888), 0x55900000 },
1403 { _MMIO(0x9888), 0x47900000 },
1404};
1405
1406static int
1407get_sampler_mux_config(struct drm_i915_private *dev_priv,
1408 const struct i915_oa_reg **regs,
1409 int *lens)
1410{
1411 int n = 0;
1412
1413 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1414 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1415
1416 regs[n] = mux_config_sampler;
1417 lens[n] = ARRAY_SIZE(mux_config_sampler);
1418 n++;
1419
1420 return n;
1421}
1422
1423static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1424 { _MMIO(0x2740), 0x00000000 },
1425 { _MMIO(0x2744), 0x00800000 },
1426 { _MMIO(0x2710), 0x00000000 },
1427 { _MMIO(0x2714), 0xf0800000 },
1428 { _MMIO(0x2720), 0x00000000 },
1429 { _MMIO(0x2724), 0x30800000 },
1430 { _MMIO(0x2770), 0x00000002 },
1431 { _MMIO(0x2774), 0x00007fff },
1432 { _MMIO(0x2778), 0x00000000 },
1433 { _MMIO(0x277c), 0x00009fff },
1434 { _MMIO(0x2780), 0x00000002 },
1435 { _MMIO(0x2784), 0x0000efff },
1436 { _MMIO(0x2788), 0x00000000 },
1437 { _MMIO(0x278c), 0x0000f3ff },
1438 { _MMIO(0x2790), 0x00000002 },
1439 { _MMIO(0x2794), 0x0000fdff },
1440 { _MMIO(0x2798), 0x00000000 },
1441 { _MMIO(0x279c), 0x0000fe7f },
1442};
1443
1444static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1445 { _MMIO(0xe458), 0x00005004 },
1446 { _MMIO(0xe558), 0x00010003 },
1447 { _MMIO(0xe658), 0x00012011 },
1448 { _MMIO(0xe758), 0x00015014 },
1449 { _MMIO(0xe45c), 0x00051050 },
1450 { _MMIO(0xe55c), 0x00053052 },
1451 { _MMIO(0xe65c), 0x00055054 },
1452};
1453
1454static const struct i915_oa_reg mux_config_tdl_1[] = {
1455 { _MMIO(0x9888), 0x141a0000 },
1456 { _MMIO(0x9888), 0x143a0000 },
1457 { _MMIO(0x9888), 0x145a0000 },
1458 { _MMIO(0x9888), 0x0c2d4000 },
1459 { _MMIO(0x9888), 0x0e2d5000 },
1460 { _MMIO(0x9888), 0x002d4000 },
1461 { _MMIO(0x9888), 0x022d5000 },
1462 { _MMIO(0x9888), 0x042d5000 },
1463 { _MMIO(0x9888), 0x062d1000 },
1464 { _MMIO(0x9888), 0x102e0150 },
1465 { _MMIO(0x9888), 0x0c2e5000 },
1466 { _MMIO(0x9888), 0x0e2e006a },
1467 { _MMIO(0x9888), 0x124c8000 },
1468 { _MMIO(0x9888), 0x144c8000 },
1469 { _MMIO(0x9888), 0x164c2000 },
1470 { _MMIO(0x9888), 0x044c8000 },
1471 { _MMIO(0x9888), 0x064c4000 },
1472 { _MMIO(0x9888), 0x0a4c4000 },
1473 { _MMIO(0x9888), 0x0c4e8000 },
1474 { _MMIO(0x9888), 0x0e4ea000 },
1475 { _MMIO(0x9888), 0x004e8000 },
1476 { _MMIO(0x9888), 0x024e2000 },
1477 { _MMIO(0x9888), 0x064e2000 },
1478 { _MMIO(0x9888), 0x1c0f0bc0 },
1479 { _MMIO(0x9888), 0x180f4000 },
1480 { _MMIO(0x9888), 0x1a0f0302 },
1481 { _MMIO(0x9888), 0x1e2c0003 },
1482 { _MMIO(0x9888), 0x1a2c00f0 },
1483 { _MMIO(0x9888), 0x021a3080 },
1484 { _MMIO(0x9888), 0x041a31e5 },
1485 { _MMIO(0x9888), 0x02148000 },
1486 { _MMIO(0x9888), 0x0414a000 },
1487 { _MMIO(0x9888), 0x1c150054 },
1488 { _MMIO(0x9888), 0x06168000 },
1489 { _MMIO(0x9888), 0x08168000 },
1490 { _MMIO(0x9888), 0x0a168000 },
1491 { _MMIO(0x9888), 0x0c3a3280 },
1492 { _MMIO(0x9888), 0x0e3a0063 },
1493 { _MMIO(0x9888), 0x063a0061 },
1494 { _MMIO(0x9888), 0x023a0000 },
1495 { _MMIO(0x9888), 0x0c348000 },
1496 { _MMIO(0x9888), 0x0e342000 },
1497 { _MMIO(0x9888), 0x06342000 },
1498 { _MMIO(0x9888), 0x1e350140 },
1499 { _MMIO(0x9888), 0x1c350100 },
1500 { _MMIO(0x9888), 0x18360028 },
1501 { _MMIO(0x9888), 0x0c368000 },
1502 { _MMIO(0x9888), 0x0e5a3080 },
1503 { _MMIO(0x9888), 0x005a3280 },
1504 { _MMIO(0x9888), 0x025a0063 },
1505 { _MMIO(0x9888), 0x0e548000 },
1506 { _MMIO(0x9888), 0x00548000 },
1507 { _MMIO(0x9888), 0x02542000 },
1508 { _MMIO(0x9888), 0x1e550400 },
1509 { _MMIO(0x9888), 0x1a552000 },
1510 { _MMIO(0x9888), 0x1c550001 },
1511 { _MMIO(0x9888), 0x18560080 },
1512 { _MMIO(0x9888), 0x02568000 },
1513 { _MMIO(0x9888), 0x04568000 },
1514 { _MMIO(0x9888), 0x1993a800 },
1515 { _MMIO(0x9888), 0x03938000 },
1516 { _MMIO(0x9888), 0x05938000 },
1517 { _MMIO(0x9888), 0x07938000 },
1518 { _MMIO(0x9888), 0x09938000 },
1519 { _MMIO(0x9888), 0x0b938000 },
1520 { _MMIO(0x9888), 0x0d938000 },
1521 { _MMIO(0x9888), 0x2d904000 },
1522 { _MMIO(0x9888), 0x2f904000 },
1523 { _MMIO(0x9888), 0x31904000 },
1524 { _MMIO(0x9888), 0x15904000 },
1525 { _MMIO(0x9888), 0x17904000 },
1526 { _MMIO(0x9888), 0x19904000 },
1527 { _MMIO(0x9888), 0x1b904000 },
1528 { _MMIO(0x9888), 0x1d904000 },
1529 { _MMIO(0x9888), 0x1f904000 },
1530 { _MMIO(0x9888), 0x59900000 },
1531 { _MMIO(0x9888), 0x4b900420 },
1532 { _MMIO(0x9888), 0x37900000 },
1533 { _MMIO(0x9888), 0x33900000 },
1534 { _MMIO(0x9888), 0x4d900000 },
1535 { _MMIO(0x9888), 0x53900000 },
1536 { _MMIO(0x9888), 0x43900000 },
1537 { _MMIO(0x9888), 0x45901084 },
1538 { _MMIO(0x9888), 0x55900000 },
1539 { _MMIO(0x9888), 0x47900001 },
1540};
1541
1542static int
1543get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1544 const struct i915_oa_reg **regs,
1545 int *lens)
1546{
1547 int n = 0;
1548
1549 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1550 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1551
1552 regs[n] = mux_config_tdl_1;
1553 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1554 n++;
1555
1556 return n;
1557}
1558
1559static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1560 { _MMIO(0x2740), 0x00000000 },
1561 { _MMIO(0x2744), 0x00800000 },
1562 { _MMIO(0x2710), 0x00000000 },
1563 { _MMIO(0x2714), 0x00800000 },
1564 { _MMIO(0x2720), 0x00000000 },
1565 { _MMIO(0x2724), 0x00800000 },
1566};
1567
1568static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1569 { _MMIO(0xe458), 0x00005004 },
1570 { _MMIO(0xe558), 0x00010003 },
1571 { _MMIO(0xe658), 0x00012011 },
1572 { _MMIO(0xe758), 0x00015014 },
1573 { _MMIO(0xe45c), 0x00051050 },
1574 { _MMIO(0xe55c), 0x00053052 },
1575 { _MMIO(0xe65c), 0x00055054 },
1576};
1577
1578static const struct i915_oa_reg mux_config_tdl_2[] = {
1579 { _MMIO(0x9888), 0x141a026b },
1580 { _MMIO(0x9888), 0x143a0173 },
1581 { _MMIO(0x9888), 0x145a026b },
1582 { _MMIO(0x9888), 0x002d4000 },
1583 { _MMIO(0x9888), 0x022d5000 },
1584 { _MMIO(0x9888), 0x042d5000 },
1585 { _MMIO(0x9888), 0x062d1000 },
1586 { _MMIO(0x9888), 0x0c2e5000 },
1587 { _MMIO(0x9888), 0x0e2e0069 },
1588 { _MMIO(0x9888), 0x044c8000 },
1589 { _MMIO(0x9888), 0x064cc000 },
1590 { _MMIO(0x9888), 0x0a4c4000 },
1591 { _MMIO(0x9888), 0x004e8000 },
1592 { _MMIO(0x9888), 0x024ea000 },
1593 { _MMIO(0x9888), 0x064e2000 },
1594 { _MMIO(0x9888), 0x180f6000 },
1595 { _MMIO(0x9888), 0x1a0f030a },
1596 { _MMIO(0x9888), 0x1a2c03c0 },
1597 { _MMIO(0x9888), 0x041a37e7 },
1598 { _MMIO(0x9888), 0x021a0000 },
1599 { _MMIO(0x9888), 0x0414a000 },
1600 { _MMIO(0x9888), 0x1c150050 },
1601 { _MMIO(0x9888), 0x08168000 },
1602 { _MMIO(0x9888), 0x0a168000 },
1603 { _MMIO(0x9888), 0x003a3380 },
1604 { _MMIO(0x9888), 0x063a006f },
1605 { _MMIO(0x9888), 0x023a0000 },
1606 { _MMIO(0x9888), 0x00348000 },
1607 { _MMIO(0x9888), 0x06342000 },
1608 { _MMIO(0x9888), 0x1a352000 },
1609 { _MMIO(0x9888), 0x1c350100 },
1610 { _MMIO(0x9888), 0x02368000 },
1611 { _MMIO(0x9888), 0x0c368000 },
1612 { _MMIO(0x9888), 0x025a37e7 },
1613 { _MMIO(0x9888), 0x0254a000 },
1614 { _MMIO(0x9888), 0x1c550005 },
1615 { _MMIO(0x9888), 0x04568000 },
1616 { _MMIO(0x9888), 0x06568000 },
1617 { _MMIO(0x9888), 0x03938000 },
1618 { _MMIO(0x9888), 0x05938000 },
1619 { _MMIO(0x9888), 0x07938000 },
1620 { _MMIO(0x9888), 0x09938000 },
1621 { _MMIO(0x9888), 0x0b938000 },
1622 { _MMIO(0x9888), 0x0d938000 },
1623 { _MMIO(0x9888), 0x15904000 },
1624 { _MMIO(0x9888), 0x17904000 },
1625 { _MMIO(0x9888), 0x19904000 },
1626 { _MMIO(0x9888), 0x1b904000 },
1627 { _MMIO(0x9888), 0x1d904000 },
1628 { _MMIO(0x9888), 0x1f904000 },
1629 { _MMIO(0x9888), 0x37900000 },
1630 { _MMIO(0x9888), 0x53900000 },
1631 { _MMIO(0x9888), 0x43900020 },
1632 { _MMIO(0x9888), 0x45901080 },
1633 { _MMIO(0x9888), 0x55900000 },
1634 { _MMIO(0x9888), 0x47900001 },
1635 { _MMIO(0x9888), 0x33900000 },
1636};
1637
1638static int
1639get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1640 const struct i915_oa_reg **regs,
1641 int *lens)
1642{
1643 int n = 0;
1644
1645 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1646 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1647
1648 regs[n] = mux_config_tdl_2;
1649 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1650 n++;
1651
1652 return n;
1653}
1654
1655static const struct i915_oa_reg b_counter_config_compute_extra[] = {
1656 { _MMIO(0x2740), 0x00000000 },
1657 { _MMIO(0x2744), 0x00800000 },
1658 { _MMIO(0x2710), 0x00000000 },
1659 { _MMIO(0x2714), 0x00800000 },
1660 { _MMIO(0x2720), 0x00000000 },
1661 { _MMIO(0x2724), 0x00800000 },
1662};
1663
1664static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
1665 { _MMIO(0xe458), 0x00001000 },
1666 { _MMIO(0xe558), 0x00003002 },
1667 { _MMIO(0xe658), 0x00005004 },
1668 { _MMIO(0xe758), 0x00011010 },
1669 { _MMIO(0xe45c), 0x00050012 },
1670 { _MMIO(0xe55c), 0x00052051 },
1671 { _MMIO(0xe65c), 0x00000008 },
1672};
1673
1674static const struct i915_oa_reg mux_config_compute_extra[] = {
1675 { _MMIO(0x9888), 0x141a001f },
1676 { _MMIO(0x9888), 0x143a001f },
1677 { _MMIO(0x9888), 0x145a001f },
1678 { _MMIO(0x9888), 0x042d5000 },
1679 { _MMIO(0x9888), 0x062d1000 },
1680 { _MMIO(0x9888), 0x0e2e0094 },
1681 { _MMIO(0x9888), 0x084cc000 },
1682 { _MMIO(0x9888), 0x044ea000 },
1683 { _MMIO(0x9888), 0x1a0f00e0 },
1684 { _MMIO(0x9888), 0x1a2c0c00 },
1685 { _MMIO(0x9888), 0x061a0063 },
1686 { _MMIO(0x9888), 0x021a0000 },
1687 { _MMIO(0x9888), 0x06142000 },
1688 { _MMIO(0x9888), 0x1c150100 },
1689 { _MMIO(0x9888), 0x0c168000 },
1690 { _MMIO(0x9888), 0x043a3180 },
1691 { _MMIO(0x9888), 0x023a0000 },
1692 { _MMIO(0x9888), 0x04348000 },
1693 { _MMIO(0x9888), 0x1c350040 },
1694 { _MMIO(0x9888), 0x0a368000 },
1695 { _MMIO(0x9888), 0x045a0063 },
1696 { _MMIO(0x9888), 0x025a0000 },
1697 { _MMIO(0x9888), 0x04542000 },
1698 { _MMIO(0x9888), 0x1c550010 },
1699 { _MMIO(0x9888), 0x08568000 },
1700 { _MMIO(0x9888), 0x09938000 },
1701 { _MMIO(0x9888), 0x0b938000 },
1702 { _MMIO(0x9888), 0x0d938000 },
1703 { _MMIO(0x9888), 0x1b904000 },
1704 { _MMIO(0x9888), 0x1d904000 },
1705 { _MMIO(0x9888), 0x1f904000 },
1706 { _MMIO(0x9888), 0x37900000 },
1707 { _MMIO(0x9888), 0x55900000 },
1708 { _MMIO(0x9888), 0x45900400 },
1709 { _MMIO(0x9888), 0x47900004 },
1710 { _MMIO(0x9888), 0x33900000 },
1711};
1712
1713static int
1714get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
1715 const struct i915_oa_reg **regs,
1716 int *lens)
1717{
1718 int n = 0;
1719
1720 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1721 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1722
1723 regs[n] = mux_config_compute_extra;
1724 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
1725 n++;
1726
1727 return n;
1728}
1729
1730static const struct i915_oa_reg b_counter_config_test_oa[] = {
1731 { _MMIO(0x2740), 0x00000000 },
1732 { _MMIO(0x2744), 0x00800000 },
1733 { _MMIO(0x2714), 0xf0800000 },
1734 { _MMIO(0x2710), 0x00000000 },
1735 { _MMIO(0x2724), 0xf0800000 },
1736 { _MMIO(0x2720), 0x00000000 },
1737 { _MMIO(0x2770), 0x00000004 },
1738 { _MMIO(0x2774), 0x00000000 },
1739 { _MMIO(0x2778), 0x00000003 },
1740 { _MMIO(0x277c), 0x00000000 },
1741 { _MMIO(0x2780), 0x00000007 },
1742 { _MMIO(0x2784), 0x00000000 },
1743 { _MMIO(0x2788), 0x00100002 },
1744 { _MMIO(0x278c), 0x0000fff7 },
1745 { _MMIO(0x2790), 0x00100002 },
1746 { _MMIO(0x2794), 0x0000ffcf },
1747 { _MMIO(0x2798), 0x00100082 },
1748 { _MMIO(0x279c), 0x0000ffef },
1749 { _MMIO(0x27a0), 0x001000c2 },
1750 { _MMIO(0x27a4), 0x0000ffe7 },
1751 { _MMIO(0x27a8), 0x00100001 },
1752 { _MMIO(0x27ac), 0x0000ffe7 },
1753};
1754
1755static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1756};
1757
1758static const struct i915_oa_reg mux_config_test_oa[] = {
1759 { _MMIO(0x9888), 0x19800000 },
1760 { _MMIO(0x9888), 0x07800063 },
1761 { _MMIO(0x9888), 0x11800000 },
1762 { _MMIO(0x9888), 0x23810008 },
1763 { _MMIO(0x9888), 0x1d950400 },
1764 { _MMIO(0x9888), 0x0f922000 },
1765 { _MMIO(0x9888), 0x1f908000 },
1766 { _MMIO(0x9888), 0x37900000 },
1767 { _MMIO(0x9888), 0x55900000 },
1768 { _MMIO(0x9888), 0x47900000 },
1769 { _MMIO(0x9888), 0x33900000 },
1770};
1771
1772static int
1773get_test_oa_mux_config(struct drm_i915_private *dev_priv,
1774 const struct i915_oa_reg **regs,
1775 int *lens)
1776{
1777 int n = 0;
1778
1779 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1780 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1781
1782 regs[n] = mux_config_test_oa;
1783 lens[n] = ARRAY_SIZE(mux_config_test_oa);
1784 n++;
1785
1786 return n;
1787}
1788
1789int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv)
1790{
1791 dev_priv->perf.oa.n_mux_configs = 0;
1792 dev_priv->perf.oa.b_counter_regs = NULL;
1793 dev_priv->perf.oa.b_counter_regs_len = 0;
1794 dev_priv->perf.oa.flex_regs = NULL;
1795 dev_priv->perf.oa.flex_regs_len = 0;
1796
1797 switch (dev_priv->perf.oa.metrics_set) {
1798 case METRIC_SET_ID_RENDER_BASIC:
1799 dev_priv->perf.oa.n_mux_configs =
1800 get_render_basic_mux_config(dev_priv,
1801 dev_priv->perf.oa.mux_regs,
1802 dev_priv->perf.oa.mux_regs_lens);
1803 if (dev_priv->perf.oa.n_mux_configs == 0) {
1804 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
1805
1806 /* EINVAL because *_register_sysfs already checked this
1807 * and so it wouldn't have been advertised to userspace and
1808 * so shouldn't have been requested
1809 */
1810 return -EINVAL;
1811 }
1812
1813 dev_priv->perf.oa.b_counter_regs =
1814 b_counter_config_render_basic;
1815 dev_priv->perf.oa.b_counter_regs_len =
1816 ARRAY_SIZE(b_counter_config_render_basic);
1817
1818 dev_priv->perf.oa.flex_regs =
1819 flex_eu_config_render_basic;
1820 dev_priv->perf.oa.flex_regs_len =
1821 ARRAY_SIZE(flex_eu_config_render_basic);
1822
1823 return 0;
1824 case METRIC_SET_ID_COMPUTE_BASIC:
1825 dev_priv->perf.oa.n_mux_configs =
1826 get_compute_basic_mux_config(dev_priv,
1827 dev_priv->perf.oa.mux_regs,
1828 dev_priv->perf.oa.mux_regs_lens);
1829 if (dev_priv->perf.oa.n_mux_configs == 0) {
1830 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
1831
1832 /* EINVAL because *_register_sysfs already checked this
1833 * and so it wouldn't have been advertised to userspace and
1834 * so shouldn't have been requested
1835 */
1836 return -EINVAL;
1837 }
1838
1839 dev_priv->perf.oa.b_counter_regs =
1840 b_counter_config_compute_basic;
1841 dev_priv->perf.oa.b_counter_regs_len =
1842 ARRAY_SIZE(b_counter_config_compute_basic);
1843
1844 dev_priv->perf.oa.flex_regs =
1845 flex_eu_config_compute_basic;
1846 dev_priv->perf.oa.flex_regs_len =
1847 ARRAY_SIZE(flex_eu_config_compute_basic);
1848
1849 return 0;
1850 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
1851 dev_priv->perf.oa.n_mux_configs =
1852 get_render_pipe_profile_mux_config(dev_priv,
1853 dev_priv->perf.oa.mux_regs,
1854 dev_priv->perf.oa.mux_regs_lens);
1855 if (dev_priv->perf.oa.n_mux_configs == 0) {
1856 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
1857
1858 /* EINVAL because *_register_sysfs already checked this
1859 * and so it wouldn't have been advertised to userspace and
1860 * so shouldn't have been requested
1861 */
1862 return -EINVAL;
1863 }
1864
1865 dev_priv->perf.oa.b_counter_regs =
1866 b_counter_config_render_pipe_profile;
1867 dev_priv->perf.oa.b_counter_regs_len =
1868 ARRAY_SIZE(b_counter_config_render_pipe_profile);
1869
1870 dev_priv->perf.oa.flex_regs =
1871 flex_eu_config_render_pipe_profile;
1872 dev_priv->perf.oa.flex_regs_len =
1873 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
1874
1875 return 0;
1876 case METRIC_SET_ID_MEMORY_READS:
1877 dev_priv->perf.oa.n_mux_configs =
1878 get_memory_reads_mux_config(dev_priv,
1879 dev_priv->perf.oa.mux_regs,
1880 dev_priv->perf.oa.mux_regs_lens);
1881 if (dev_priv->perf.oa.n_mux_configs == 0) {
1882 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
1883
1884 /* EINVAL because *_register_sysfs already checked this
1885 * and so it wouldn't have been advertised to userspace and
1886 * so shouldn't have been requested
1887 */
1888 return -EINVAL;
1889 }
1890
1891 dev_priv->perf.oa.b_counter_regs =
1892 b_counter_config_memory_reads;
1893 dev_priv->perf.oa.b_counter_regs_len =
1894 ARRAY_SIZE(b_counter_config_memory_reads);
1895
1896 dev_priv->perf.oa.flex_regs =
1897 flex_eu_config_memory_reads;
1898 dev_priv->perf.oa.flex_regs_len =
1899 ARRAY_SIZE(flex_eu_config_memory_reads);
1900
1901 return 0;
1902 case METRIC_SET_ID_MEMORY_WRITES:
1903 dev_priv->perf.oa.n_mux_configs =
1904 get_memory_writes_mux_config(dev_priv,
1905 dev_priv->perf.oa.mux_regs,
1906 dev_priv->perf.oa.mux_regs_lens);
1907 if (dev_priv->perf.oa.n_mux_configs == 0) {
1908 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
1909
1910 /* EINVAL because *_register_sysfs already checked this
1911 * and so it wouldn't have been advertised to userspace and
1912 * so shouldn't have been requested
1913 */
1914 return -EINVAL;
1915 }
1916
1917 dev_priv->perf.oa.b_counter_regs =
1918 b_counter_config_memory_writes;
1919 dev_priv->perf.oa.b_counter_regs_len =
1920 ARRAY_SIZE(b_counter_config_memory_writes);
1921
1922 dev_priv->perf.oa.flex_regs =
1923 flex_eu_config_memory_writes;
1924 dev_priv->perf.oa.flex_regs_len =
1925 ARRAY_SIZE(flex_eu_config_memory_writes);
1926
1927 return 0;
1928 case METRIC_SET_ID_COMPUTE_EXTENDED:
1929 dev_priv->perf.oa.n_mux_configs =
1930 get_compute_extended_mux_config(dev_priv,
1931 dev_priv->perf.oa.mux_regs,
1932 dev_priv->perf.oa.mux_regs_lens);
1933 if (dev_priv->perf.oa.n_mux_configs == 0) {
1934 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
1935
1936 /* EINVAL because *_register_sysfs already checked this
1937 * and so it wouldn't have been advertised to userspace and
1938 * so shouldn't have been requested
1939 */
1940 return -EINVAL;
1941 }
1942
1943 dev_priv->perf.oa.b_counter_regs =
1944 b_counter_config_compute_extended;
1945 dev_priv->perf.oa.b_counter_regs_len =
1946 ARRAY_SIZE(b_counter_config_compute_extended);
1947
1948 dev_priv->perf.oa.flex_regs =
1949 flex_eu_config_compute_extended;
1950 dev_priv->perf.oa.flex_regs_len =
1951 ARRAY_SIZE(flex_eu_config_compute_extended);
1952
1953 return 0;
1954 case METRIC_SET_ID_COMPUTE_L3_CACHE:
1955 dev_priv->perf.oa.n_mux_configs =
1956 get_compute_l3_cache_mux_config(dev_priv,
1957 dev_priv->perf.oa.mux_regs,
1958 dev_priv->perf.oa.mux_regs_lens);
1959 if (dev_priv->perf.oa.n_mux_configs == 0) {
1960 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
1961
1962 /* EINVAL because *_register_sysfs already checked this
1963 * and so it wouldn't have been advertised to userspace and
1964 * so shouldn't have been requested
1965 */
1966 return -EINVAL;
1967 }
1968
1969 dev_priv->perf.oa.b_counter_regs =
1970 b_counter_config_compute_l3_cache;
1971 dev_priv->perf.oa.b_counter_regs_len =
1972 ARRAY_SIZE(b_counter_config_compute_l3_cache);
1973
1974 dev_priv->perf.oa.flex_regs =
1975 flex_eu_config_compute_l3_cache;
1976 dev_priv->perf.oa.flex_regs_len =
1977 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
1978
1979 return 0;
1980 case METRIC_SET_ID_HDC_AND_SF:
1981 dev_priv->perf.oa.n_mux_configs =
1982 get_hdc_and_sf_mux_config(dev_priv,
1983 dev_priv->perf.oa.mux_regs,
1984 dev_priv->perf.oa.mux_regs_lens);
1985 if (dev_priv->perf.oa.n_mux_configs == 0) {
1986 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
1987
1988 /* EINVAL because *_register_sysfs already checked this
1989 * and so it wouldn't have been advertised to userspace and
1990 * so shouldn't have been requested
1991 */
1992 return -EINVAL;
1993 }
1994
1995 dev_priv->perf.oa.b_counter_regs =
1996 b_counter_config_hdc_and_sf;
1997 dev_priv->perf.oa.b_counter_regs_len =
1998 ARRAY_SIZE(b_counter_config_hdc_and_sf);
1999
2000 dev_priv->perf.oa.flex_regs =
2001 flex_eu_config_hdc_and_sf;
2002 dev_priv->perf.oa.flex_regs_len =
2003 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2004
2005 return 0;
2006 case METRIC_SET_ID_L3_1:
2007 dev_priv->perf.oa.n_mux_configs =
2008 get_l3_1_mux_config(dev_priv,
2009 dev_priv->perf.oa.mux_regs,
2010 dev_priv->perf.oa.mux_regs_lens);
2011 if (dev_priv->perf.oa.n_mux_configs == 0) {
2012 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2013
2014 /* EINVAL because *_register_sysfs already checked this
2015 * and so it wouldn't have been advertised to userspace and
2016 * so shouldn't have been requested
2017 */
2018 return -EINVAL;
2019 }
2020
2021 dev_priv->perf.oa.b_counter_regs =
2022 b_counter_config_l3_1;
2023 dev_priv->perf.oa.b_counter_regs_len =
2024 ARRAY_SIZE(b_counter_config_l3_1);
2025
2026 dev_priv->perf.oa.flex_regs =
2027 flex_eu_config_l3_1;
2028 dev_priv->perf.oa.flex_regs_len =
2029 ARRAY_SIZE(flex_eu_config_l3_1);
2030
2031 return 0;
2032 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2033 dev_priv->perf.oa.n_mux_configs =
2034 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2035 dev_priv->perf.oa.mux_regs,
2036 dev_priv->perf.oa.mux_regs_lens);
2037 if (dev_priv->perf.oa.n_mux_configs == 0) {
2038 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2039
2040 /* EINVAL because *_register_sysfs already checked this
2041 * and so it wouldn't have been advertised to userspace and
2042 * so shouldn't have been requested
2043 */
2044 return -EINVAL;
2045 }
2046
2047 dev_priv->perf.oa.b_counter_regs =
2048 b_counter_config_rasterizer_and_pixel_backend;
2049 dev_priv->perf.oa.b_counter_regs_len =
2050 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2051
2052 dev_priv->perf.oa.flex_regs =
2053 flex_eu_config_rasterizer_and_pixel_backend;
2054 dev_priv->perf.oa.flex_regs_len =
2055 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2056
2057 return 0;
2058 case METRIC_SET_ID_SAMPLER:
2059 dev_priv->perf.oa.n_mux_configs =
2060 get_sampler_mux_config(dev_priv,
2061 dev_priv->perf.oa.mux_regs,
2062 dev_priv->perf.oa.mux_regs_lens);
2063 if (dev_priv->perf.oa.n_mux_configs == 0) {
2064 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
2065
2066 /* EINVAL because *_register_sysfs already checked this
2067 * and so it wouldn't have been advertised to userspace and
2068 * so shouldn't have been requested
2069 */
2070 return -EINVAL;
2071 }
2072
2073 dev_priv->perf.oa.b_counter_regs =
2074 b_counter_config_sampler;
2075 dev_priv->perf.oa.b_counter_regs_len =
2076 ARRAY_SIZE(b_counter_config_sampler);
2077
2078 dev_priv->perf.oa.flex_regs =
2079 flex_eu_config_sampler;
2080 dev_priv->perf.oa.flex_regs_len =
2081 ARRAY_SIZE(flex_eu_config_sampler);
2082
2083 return 0;
2084 case METRIC_SET_ID_TDL_1:
2085 dev_priv->perf.oa.n_mux_configs =
2086 get_tdl_1_mux_config(dev_priv,
2087 dev_priv->perf.oa.mux_regs,
2088 dev_priv->perf.oa.mux_regs_lens);
2089 if (dev_priv->perf.oa.n_mux_configs == 0) {
2090 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2091
2092 /* EINVAL because *_register_sysfs already checked this
2093 * and so it wouldn't have been advertised to userspace and
2094 * so shouldn't have been requested
2095 */
2096 return -EINVAL;
2097 }
2098
2099 dev_priv->perf.oa.b_counter_regs =
2100 b_counter_config_tdl_1;
2101 dev_priv->perf.oa.b_counter_regs_len =
2102 ARRAY_SIZE(b_counter_config_tdl_1);
2103
2104 dev_priv->perf.oa.flex_regs =
2105 flex_eu_config_tdl_1;
2106 dev_priv->perf.oa.flex_regs_len =
2107 ARRAY_SIZE(flex_eu_config_tdl_1);
2108
2109 return 0;
2110 case METRIC_SET_ID_TDL_2:
2111 dev_priv->perf.oa.n_mux_configs =
2112 get_tdl_2_mux_config(dev_priv,
2113 dev_priv->perf.oa.mux_regs,
2114 dev_priv->perf.oa.mux_regs_lens);
2115 if (dev_priv->perf.oa.n_mux_configs == 0) {
2116 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2117
2118 /* EINVAL because *_register_sysfs already checked this
2119 * and so it wouldn't have been advertised to userspace and
2120 * so shouldn't have been requested
2121 */
2122 return -EINVAL;
2123 }
2124
2125 dev_priv->perf.oa.b_counter_regs =
2126 b_counter_config_tdl_2;
2127 dev_priv->perf.oa.b_counter_regs_len =
2128 ARRAY_SIZE(b_counter_config_tdl_2);
2129
2130 dev_priv->perf.oa.flex_regs =
2131 flex_eu_config_tdl_2;
2132 dev_priv->perf.oa.flex_regs_len =
2133 ARRAY_SIZE(flex_eu_config_tdl_2);
2134
2135 return 0;
2136 case METRIC_SET_ID_COMPUTE_EXTRA:
2137 dev_priv->perf.oa.n_mux_configs =
2138 get_compute_extra_mux_config(dev_priv,
2139 dev_priv->perf.oa.mux_regs,
2140 dev_priv->perf.oa.mux_regs_lens);
2141 if (dev_priv->perf.oa.n_mux_configs == 0) {
2142 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2143
2144 /* EINVAL because *_register_sysfs already checked this
2145 * and so it wouldn't have been advertised to userspace and
2146 * so shouldn't have been requested
2147 */
2148 return -EINVAL;
2149 }
2150
2151 dev_priv->perf.oa.b_counter_regs =
2152 b_counter_config_compute_extra;
2153 dev_priv->perf.oa.b_counter_regs_len =
2154 ARRAY_SIZE(b_counter_config_compute_extra);
2155
2156 dev_priv->perf.oa.flex_regs =
2157 flex_eu_config_compute_extra;
2158 dev_priv->perf.oa.flex_regs_len =
2159 ARRAY_SIZE(flex_eu_config_compute_extra);
2160
2161 return 0;
2162 case METRIC_SET_ID_TEST_OA:
2163 dev_priv->perf.oa.n_mux_configs =
2164 get_test_oa_mux_config(dev_priv,
2165 dev_priv->perf.oa.mux_regs,
2166 dev_priv->perf.oa.mux_regs_lens);
2167 if (dev_priv->perf.oa.n_mux_configs == 0) {
2168 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2169
2170 /* EINVAL because *_register_sysfs already checked this
2171 * and so it wouldn't have been advertised to userspace and
2172 * so shouldn't have been requested
2173 */
2174 return -EINVAL;
2175 }
2176
2177 dev_priv->perf.oa.b_counter_regs =
2178 b_counter_config_test_oa;
2179 dev_priv->perf.oa.b_counter_regs_len =
2180 ARRAY_SIZE(b_counter_config_test_oa);
2181
2182 dev_priv->perf.oa.flex_regs =
2183 flex_eu_config_test_oa;
2184 dev_priv->perf.oa.flex_regs_len =
2185 ARRAY_SIZE(flex_eu_config_test_oa);
2186
2187 return 0;
2188 default:
2189 return -ENODEV;
2190 }
2191}
2192
2193static ssize_t
2194show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2195{
2196 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2197}
2198
2199static struct device_attribute dev_attr_render_basic_id = {
2200 .attr = { .name = "id", .mode = 0444 },
2201 .show = show_render_basic_id,
2202 .store = NULL,
2203};
2204
2205static struct attribute *attrs_render_basic[] = {
2206 &dev_attr_render_basic_id.attr,
2207 NULL,
2208};
2209
2210static struct attribute_group group_render_basic = {
2211 .name = "22b9519a-e9ba-4c41-8b54-f4f8ca14fa0a",
2212 .attrs = attrs_render_basic,
2213};
2214
2215static ssize_t
2216show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2217{
2218 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2219}
2220
2221static struct device_attribute dev_attr_compute_basic_id = {
2222 .attr = { .name = "id", .mode = 0444 },
2223 .show = show_compute_basic_id,
2224 .store = NULL,
2225};
2226
2227static struct attribute *attrs_compute_basic[] = {
2228 &dev_attr_compute_basic_id.attr,
2229 NULL,
2230};
2231
2232static struct attribute_group group_compute_basic = {
2233 .name = "012d72cf-82a9-4d25-8ddf-74076fd30797",
2234 .attrs = attrs_compute_basic,
2235};
2236
2237static ssize_t
2238show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2239{
2240 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2241}
2242
2243static struct device_attribute dev_attr_render_pipe_profile_id = {
2244 .attr = { .name = "id", .mode = 0444 },
2245 .show = show_render_pipe_profile_id,
2246 .store = NULL,
2247};
2248
2249static struct attribute *attrs_render_pipe_profile[] = {
2250 &dev_attr_render_pipe_profile_id.attr,
2251 NULL,
2252};
2253
2254static struct attribute_group group_render_pipe_profile = {
2255 .name = "ce416533-e49e-4211-80af-ec513590a914",
2256 .attrs = attrs_render_pipe_profile,
2257};
2258
2259static ssize_t
2260show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2261{
2262 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2263}
2264
2265static struct device_attribute dev_attr_memory_reads_id = {
2266 .attr = { .name = "id", .mode = 0444 },
2267 .show = show_memory_reads_id,
2268 .store = NULL,
2269};
2270
2271static struct attribute *attrs_memory_reads[] = {
2272 &dev_attr_memory_reads_id.attr,
2273 NULL,
2274};
2275
2276static struct attribute_group group_memory_reads = {
2277 .name = "398e2452-18d7-42d0-b241-e4d0a9148ada",
2278 .attrs = attrs_memory_reads,
2279};
2280
2281static ssize_t
2282show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2283{
2284 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2285}
2286
2287static struct device_attribute dev_attr_memory_writes_id = {
2288 .attr = { .name = "id", .mode = 0444 },
2289 .show = show_memory_writes_id,
2290 .store = NULL,
2291};
2292
2293static struct attribute *attrs_memory_writes[] = {
2294 &dev_attr_memory_writes_id.attr,
2295 NULL,
2296};
2297
2298static struct attribute_group group_memory_writes = {
2299 .name = "d324a0d6-7269-4847-a5c2-6f71ddc7fed5",
2300 .attrs = attrs_memory_writes,
2301};
2302
2303static ssize_t
2304show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2305{
2306 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
2307}
2308
2309static struct device_attribute dev_attr_compute_extended_id = {
2310 .attr = { .name = "id", .mode = 0444 },
2311 .show = show_compute_extended_id,
2312 .store = NULL,
2313};
2314
2315static struct attribute *attrs_compute_extended[] = {
2316 &dev_attr_compute_extended_id.attr,
2317 NULL,
2318};
2319
2320static struct attribute_group group_compute_extended = {
2321 .name = "caf3596a-7bb1-4dec-b3b3-2a080d283b49",
2322 .attrs = attrs_compute_extended,
2323};
2324
2325static ssize_t
2326show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
2327{
2328 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
2329}
2330
2331static struct device_attribute dev_attr_compute_l3_cache_id = {
2332 .attr = { .name = "id", .mode = 0444 },
2333 .show = show_compute_l3_cache_id,
2334 .store = NULL,
2335};
2336
2337static struct attribute *attrs_compute_l3_cache[] = {
2338 &dev_attr_compute_l3_cache_id.attr,
2339 NULL,
2340};
2341
2342static struct attribute_group group_compute_l3_cache = {
2343 .name = "49b956e2-d5b9-47e0-9d8a-cee5e8cec527",
2344 .attrs = attrs_compute_l3_cache,
2345};
2346
2347static ssize_t
2348show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2349{
2350 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2351}
2352
2353static struct device_attribute dev_attr_hdc_and_sf_id = {
2354 .attr = { .name = "id", .mode = 0444 },
2355 .show = show_hdc_and_sf_id,
2356 .store = NULL,
2357};
2358
2359static struct attribute *attrs_hdc_and_sf[] = {
2360 &dev_attr_hdc_and_sf_id.attr,
2361 NULL,
2362};
2363
2364static struct attribute_group group_hdc_and_sf = {
2365 .name = "f64ef50a-bdba-4b35-8f09-203c13d8ee5a",
2366 .attrs = attrs_hdc_and_sf,
2367};
2368
2369static ssize_t
2370show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2371{
2372 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2373}
2374
2375static struct device_attribute dev_attr_l3_1_id = {
2376 .attr = { .name = "id", .mode = 0444 },
2377 .show = show_l3_1_id,
2378 .store = NULL,
2379};
2380
2381static struct attribute *attrs_l3_1[] = {
2382 &dev_attr_l3_1_id.attr,
2383 NULL,
2384};
2385
2386static struct attribute_group group_l3_1 = {
2387 .name = "00ad5a41-7eab-4f7a-9103-49d411c67219",
2388 .attrs = attrs_l3_1,
2389};
2390
2391static ssize_t
2392show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2393{
2394 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2395}
2396
2397static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2398 .attr = { .name = "id", .mode = 0444 },
2399 .show = show_rasterizer_and_pixel_backend_id,
2400 .store = NULL,
2401};
2402
2403static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2404 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2405 NULL,
2406};
2407
2408static struct attribute_group group_rasterizer_and_pixel_backend = {
2409 .name = "46dc44ca-491c-4cc1-a951-e7b3e62bf02b",
2410 .attrs = attrs_rasterizer_and_pixel_backend,
2411};
2412
2413static ssize_t
2414show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
2415{
2416 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
2417}
2418
2419static struct device_attribute dev_attr_sampler_id = {
2420 .attr = { .name = "id", .mode = 0444 },
2421 .show = show_sampler_id,
2422 .store = NULL,
2423};
2424
2425static struct attribute *attrs_sampler[] = {
2426 &dev_attr_sampler_id.attr,
2427 NULL,
2428};
2429
2430static struct attribute_group group_sampler = {
2431 .name = "8364e2a8-af63-40af-b0d5-42969a255654",
2432 .attrs = attrs_sampler,
2433};
2434
2435static ssize_t
2436show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2437{
2438 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2439}
2440
2441static struct device_attribute dev_attr_tdl_1_id = {
2442 .attr = { .name = "id", .mode = 0444 },
2443 .show = show_tdl_1_id,
2444 .store = NULL,
2445};
2446
2447static struct attribute *attrs_tdl_1[] = {
2448 &dev_attr_tdl_1_id.attr,
2449 NULL,
2450};
2451
2452static struct attribute_group group_tdl_1 = {
2453 .name = "175c8092-cb25-4d1e-8dc7-b4fdd39e2d92",
2454 .attrs = attrs_tdl_1,
2455};
2456
2457static ssize_t
2458show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2459{
2460 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2461}
2462
2463static struct device_attribute dev_attr_tdl_2_id = {
2464 .attr = { .name = "id", .mode = 0444 },
2465 .show = show_tdl_2_id,
2466 .store = NULL,
2467};
2468
2469static struct attribute *attrs_tdl_2[] = {
2470 &dev_attr_tdl_2_id.attr,
2471 NULL,
2472};
2473
2474static struct attribute_group group_tdl_2 = {
2475 .name = "d260f03f-b34d-4b49-a44e-436819117332",
2476 .attrs = attrs_tdl_2,
2477};
2478
2479static ssize_t
2480show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
2481{
2482 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
2483}
2484
2485static struct device_attribute dev_attr_compute_extra_id = {
2486 .attr = { .name = "id", .mode = 0444 },
2487 .show = show_compute_extra_id,
2488 .store = NULL,
2489};
2490
2491static struct attribute *attrs_compute_extra[] = {
2492 &dev_attr_compute_extra_id.attr,
2493 NULL,
2494};
2495
2496static struct attribute_group group_compute_extra = {
2497 .name = "fa6ecf21-2cb8-4d0b-9308-6e4a7b4ca87a",
2498 .attrs = attrs_compute_extra,
2499};
2500
2501static ssize_t
2502show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2503{
2504 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2505}
2506
2507static struct device_attribute dev_attr_test_oa_id = {
2508 .attr = { .name = "id", .mode = 0444 },
2509 .show = show_test_oa_id,
2510 .store = NULL,
2511};
2512
2513static struct attribute *attrs_test_oa[] = {
2514 &dev_attr_test_oa_id.attr,
2515 NULL,
2516};
2517
2518static struct attribute_group group_test_oa = {
2519 .name = "5ee72f5c-092f-421e-8b70-225f7c3e9612",
2520 .attrs = attrs_test_oa,
2521};
2522
2523int
2524i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv)
2525{
2526 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2527 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2528 int ret = 0;
2529
2530 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2531 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2532 if (ret)
2533 goto error_render_basic;
2534 }
2535 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2536 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2537 if (ret)
2538 goto error_compute_basic;
2539 }
2540 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2541 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2542 if (ret)
2543 goto error_render_pipe_profile;
2544 }
2545 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
2546 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2547 if (ret)
2548 goto error_memory_reads;
2549 }
2550 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
2551 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2552 if (ret)
2553 goto error_memory_writes;
2554 }
2555 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
2556 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2557 if (ret)
2558 goto error_compute_extended;
2559 }
2560 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
2561 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2562 if (ret)
2563 goto error_compute_l3_cache;
2564 }
2565 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2566 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2567 if (ret)
2568 goto error_hdc_and_sf;
2569 }
2570 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2571 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2572 if (ret)
2573 goto error_l3_1;
2574 }
2575 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2576 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2577 if (ret)
2578 goto error_rasterizer_and_pixel_backend;
2579 }
2580 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
2581 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
2582 if (ret)
2583 goto error_sampler;
2584 }
2585 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2586 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2587 if (ret)
2588 goto error_tdl_1;
2589 }
2590 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2591 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2592 if (ret)
2593 goto error_tdl_2;
2594 }
2595 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
2596 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2597 if (ret)
2598 goto error_compute_extra;
2599 }
2600 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2601 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2602 if (ret)
2603 goto error_test_oa;
2604 }
2605
2606 return 0;
2607
2608error_test_oa:
2609 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2610 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2611error_compute_extra:
2612 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2613 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2614error_tdl_2:
2615 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2616 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2617error_tdl_1:
2618 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2619 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2620error_sampler:
2621 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2622 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2623error_rasterizer_and_pixel_backend:
2624 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2625 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2626error_l3_1:
2627 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2628 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2629error_hdc_and_sf:
2630 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2631 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2632error_compute_l3_cache:
2633 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2634 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2635error_compute_extended:
2636 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2637 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2638error_memory_writes:
2639 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2640 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2641error_memory_reads:
2642 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2643 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2644error_render_pipe_profile:
2645 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2646 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2647error_compute_basic:
2648 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2649 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2650error_render_basic:
2651 return ret;
2652}
2653
2654void
2655i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv)
2656{
2657 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2658 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2659
2660 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2661 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2662 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2663 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2664 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2665 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2666 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2667 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2668 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2669 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2670 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2671 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2672 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2673 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2674 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2675 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2676 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2677 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2678 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2679 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2680 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2681 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2682 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2683 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2684 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2685 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2686 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2687 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2688 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
2689 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2690}
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
new file mode 100644
index 000000000000..6cf7ba746e7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_BXT_H__
30#define __I915_OA_BXT_H__
31
32extern int i915_oa_n_builtin_metric_sets_bxt;
33
34extern int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
new file mode 100644
index 000000000000..aa6bece7e75f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -0,0 +1,2873 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_chv.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_HDC_AND_SF,
39 METRIC_SET_ID_L3_1,
40 METRIC_SET_ID_L3_2,
41 METRIC_SET_ID_L3_3,
42 METRIC_SET_ID_L3_4,
43 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
44 METRIC_SET_ID_SAMPLER_1,
45 METRIC_SET_ID_SAMPLER_2,
46 METRIC_SET_ID_TDL_1,
47 METRIC_SET_ID_TDL_2,
48 METRIC_SET_ID_TEST_OA,
49};
50
51int i915_oa_n_builtin_metric_sets_chv = 14;
52
53static const struct i915_oa_reg b_counter_config_render_basic[] = {
54 { _MMIO(0x2740), 0x00000000 },
55 { _MMIO(0x2710), 0x00000000 },
56 { _MMIO(0x2714), 0x00800000 },
57 { _MMIO(0x2720), 0x00000000 },
58 { _MMIO(0x2724), 0x00800000 },
59};
60
61static const struct i915_oa_reg flex_eu_config_render_basic[] = {
62 { _MMIO(0xe458), 0x00005004 },
63 { _MMIO(0xe558), 0x00010003 },
64 { _MMIO(0xe658), 0x00012011 },
65 { _MMIO(0xe758), 0x00015014 },
66 { _MMIO(0xe45c), 0x00051050 },
67 { _MMIO(0xe55c), 0x00053052 },
68 { _MMIO(0xe65c), 0x00055054 },
69};
70
71static const struct i915_oa_reg mux_config_render_basic[] = {
72 { _MMIO(0x9888), 0x59800000 },
73 { _MMIO(0x9888), 0x59800001 },
74 { _MMIO(0x9888), 0x285a0006 },
75 { _MMIO(0x9888), 0x2c110014 },
76 { _MMIO(0x9888), 0x2e110000 },
77 { _MMIO(0x9888), 0x2c310014 },
78 { _MMIO(0x9888), 0x2e310000 },
79 { _MMIO(0x9888), 0x2b8303df },
80 { _MMIO(0x9888), 0x3580024f },
81 { _MMIO(0x9888), 0x00580888 },
82 { _MMIO(0x9888), 0x1e5a0015 },
83 { _MMIO(0x9888), 0x205a0014 },
84 { _MMIO(0x9888), 0x045a0000 },
85 { _MMIO(0x9888), 0x025a0000 },
86 { _MMIO(0x9888), 0x02180500 },
87 { _MMIO(0x9888), 0x00190555 },
88 { _MMIO(0x9888), 0x021d0500 },
89 { _MMIO(0x9888), 0x021f0a00 },
90 { _MMIO(0x9888), 0x00380444 },
91 { _MMIO(0x9888), 0x02390500 },
92 { _MMIO(0x9888), 0x003a0666 },
93 { _MMIO(0x9888), 0x00100111 },
94 { _MMIO(0x9888), 0x06110030 },
95 { _MMIO(0x9888), 0x0a110031 },
96 { _MMIO(0x9888), 0x0e110046 },
97 { _MMIO(0x9888), 0x04110000 },
98 { _MMIO(0x9888), 0x00110000 },
99 { _MMIO(0x9888), 0x00130111 },
100 { _MMIO(0x9888), 0x00300444 },
101 { _MMIO(0x9888), 0x08310030 },
102 { _MMIO(0x9888), 0x0c310031 },
103 { _MMIO(0x9888), 0x10310046 },
104 { _MMIO(0x9888), 0x04310000 },
105 { _MMIO(0x9888), 0x00310000 },
106 { _MMIO(0x9888), 0x00330444 },
107 { _MMIO(0x9888), 0x038a0a00 },
108 { _MMIO(0x9888), 0x018b0fff },
109 { _MMIO(0x9888), 0x038b0a00 },
110 { _MMIO(0x9888), 0x01855000 },
111 { _MMIO(0x9888), 0x03850055 },
112 { _MMIO(0x9888), 0x13830021 },
113 { _MMIO(0x9888), 0x15830020 },
114 { _MMIO(0x9888), 0x1783002f },
115 { _MMIO(0x9888), 0x1983002e },
116 { _MMIO(0x9888), 0x1b83002d },
117 { _MMIO(0x9888), 0x1d83002c },
118 { _MMIO(0x9888), 0x05830000 },
119 { _MMIO(0x9888), 0x01840555 },
120 { _MMIO(0x9888), 0x03840500 },
121 { _MMIO(0x9888), 0x23800074 },
122 { _MMIO(0x9888), 0x2580007d },
123 { _MMIO(0x9888), 0x05800000 },
124 { _MMIO(0x9888), 0x01805000 },
125 { _MMIO(0x9888), 0x03800055 },
126 { _MMIO(0x9888), 0x01865000 },
127 { _MMIO(0x9888), 0x03860055 },
128 { _MMIO(0x9888), 0x01875000 },
129 { _MMIO(0x9888), 0x03870055 },
130 { _MMIO(0x9888), 0x418000aa },
131 { _MMIO(0x9888), 0x4380000a },
132 { _MMIO(0x9888), 0x45800000 },
133 { _MMIO(0x9888), 0x4780000a },
134 { _MMIO(0x9888), 0x49800000 },
135 { _MMIO(0x9888), 0x4b800000 },
136 { _MMIO(0x9888), 0x4d800000 },
137 { _MMIO(0x9888), 0x4f800000 },
138 { _MMIO(0x9888), 0x51800000 },
139 { _MMIO(0x9888), 0x53800000 },
140 { _MMIO(0x9888), 0x55800000 },
141 { _MMIO(0x9888), 0x57800000 },
142 { _MMIO(0x9888), 0x59800000 },
143};
144
145static int
146get_render_basic_mux_config(struct drm_i915_private *dev_priv,
147 const struct i915_oa_reg **regs,
148 int *lens)
149{
150 int n = 0;
151
152 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
153 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
154
155 regs[n] = mux_config_render_basic;
156 lens[n] = ARRAY_SIZE(mux_config_render_basic);
157 n++;
158
159 return n;
160}
161
162static const struct i915_oa_reg b_counter_config_compute_basic[] = {
163 { _MMIO(0x2710), 0x00000000 },
164 { _MMIO(0x2714), 0x00800000 },
165 { _MMIO(0x2720), 0x00000000 },
166 { _MMIO(0x2724), 0x00800000 },
167};
168
169static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
170 { _MMIO(0xe458), 0x00005004 },
171 { _MMIO(0xe558), 0x00000003 },
172 { _MMIO(0xe658), 0x00002001 },
173 { _MMIO(0xe758), 0x00778008 },
174 { _MMIO(0xe45c), 0x00088078 },
175 { _MMIO(0xe55c), 0x00808708 },
176 { _MMIO(0xe65c), 0x00a08908 },
177};
178
179static const struct i915_oa_reg mux_config_compute_basic[] = {
180 { _MMIO(0x9888), 0x59800000 },
181 { _MMIO(0x9888), 0x59800001 },
182 { _MMIO(0x9888), 0x2e5800e0 },
183 { _MMIO(0x9888), 0x2e3800e0 },
184 { _MMIO(0x9888), 0x3580024f },
185 { _MMIO(0x9888), 0x3d800140 },
186 { _MMIO(0x9888), 0x08580042 },
187 { _MMIO(0x9888), 0x0c580040 },
188 { _MMIO(0x9888), 0x1058004c },
189 { _MMIO(0x9888), 0x1458004b },
190 { _MMIO(0x9888), 0x04580000 },
191 { _MMIO(0x9888), 0x00580000 },
192 { _MMIO(0x9888), 0x00195555 },
193 { _MMIO(0x9888), 0x06380042 },
194 { _MMIO(0x9888), 0x0a380040 },
195 { _MMIO(0x9888), 0x0e38004c },
196 { _MMIO(0x9888), 0x1238004b },
197 { _MMIO(0x9888), 0x04380000 },
198 { _MMIO(0x9888), 0x00384444 },
199 { _MMIO(0x9888), 0x003a5555 },
200 { _MMIO(0x9888), 0x018bffff },
201 { _MMIO(0x9888), 0x01845555 },
202 { _MMIO(0x9888), 0x17800074 },
203 { _MMIO(0x9888), 0x1980007d },
204 { _MMIO(0x9888), 0x1b80007c },
205 { _MMIO(0x9888), 0x1d8000b6 },
206 { _MMIO(0x9888), 0x1f8000b7 },
207 { _MMIO(0x9888), 0x05800000 },
208 { _MMIO(0x9888), 0x03800000 },
209 { _MMIO(0x9888), 0x418000aa },
210 { _MMIO(0x9888), 0x438000aa },
211 { _MMIO(0x9888), 0x45800000 },
212 { _MMIO(0x9888), 0x47800000 },
213 { _MMIO(0x9888), 0x4980012a },
214 { _MMIO(0x9888), 0x4b80012a },
215 { _MMIO(0x9888), 0x4d80012a },
216 { _MMIO(0x9888), 0x4f80012a },
217 { _MMIO(0x9888), 0x518001ce },
218 { _MMIO(0x9888), 0x538001ce },
219 { _MMIO(0x9888), 0x5580000e },
220 { _MMIO(0x9888), 0x59800000 },
221};
222
223static int
224get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
225 const struct i915_oa_reg **regs,
226 int *lens)
227{
228 int n = 0;
229
230 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
231 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
232
233 regs[n] = mux_config_compute_basic;
234 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
235 n++;
236
237 return n;
238}
239
240static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
241 { _MMIO(0x2724), 0xf0800000 },
242 { _MMIO(0x2720), 0x00000000 },
243 { _MMIO(0x2714), 0xf0800000 },
244 { _MMIO(0x2710), 0x00000000 },
245 { _MMIO(0x2770), 0x0007ffea },
246 { _MMIO(0x2774), 0x00007ffc },
247 { _MMIO(0x2778), 0x0007affa },
248 { _MMIO(0x277c), 0x0000f5fd },
249 { _MMIO(0x2780), 0x00079ffa },
250 { _MMIO(0x2784), 0x0000f3fb },
251 { _MMIO(0x2788), 0x0007bf7a },
252 { _MMIO(0x278c), 0x0000f7e7 },
253 { _MMIO(0x2790), 0x0007fefa },
254 { _MMIO(0x2794), 0x0000f7cf },
255 { _MMIO(0x2798), 0x00077ffa },
256 { _MMIO(0x279c), 0x0000efdf },
257 { _MMIO(0x27a0), 0x0006fffa },
258 { _MMIO(0x27a4), 0x0000cfbf },
259 { _MMIO(0x27a8), 0x0003fffa },
260 { _MMIO(0x27ac), 0x00005f7f },
261};
262
263static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
264 { _MMIO(0xe458), 0x00005004 },
265 { _MMIO(0xe558), 0x00015014 },
266 { _MMIO(0xe658), 0x00025024 },
267 { _MMIO(0xe758), 0x00035034 },
268 { _MMIO(0xe45c), 0x00045044 },
269 { _MMIO(0xe55c), 0x00055054 },
270 { _MMIO(0xe65c), 0x00065064 },
271};
272
273static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
274 { _MMIO(0x9888), 0x59800000 },
275 { _MMIO(0x9888), 0x59800001 },
276 { _MMIO(0x9888), 0x261e0000 },
277 { _MMIO(0x9888), 0x281f000f },
278 { _MMIO(0x9888), 0x2817001a },
279 { _MMIO(0x9888), 0x2791001f },
280 { _MMIO(0x9888), 0x27880019 },
281 { _MMIO(0x9888), 0x2d890000 },
282 { _MMIO(0x9888), 0x278a0007 },
283 { _MMIO(0x9888), 0x298d001f },
284 { _MMIO(0x9888), 0x278e0020 },
285 { _MMIO(0x9888), 0x2b8f0012 },
286 { _MMIO(0x9888), 0x29900000 },
287 { _MMIO(0x9888), 0x00184000 },
288 { _MMIO(0x9888), 0x02181000 },
289 { _MMIO(0x9888), 0x02194000 },
290 { _MMIO(0x9888), 0x141e0002 },
291 { _MMIO(0x9888), 0x041e0000 },
292 { _MMIO(0x9888), 0x001e0000 },
293 { _MMIO(0x9888), 0x221f0015 },
294 { _MMIO(0x9888), 0x041f0000 },
295 { _MMIO(0x9888), 0x001f4000 },
296 { _MMIO(0x9888), 0x021f0000 },
297 { _MMIO(0x9888), 0x023a8000 },
298 { _MMIO(0x9888), 0x0213c000 },
299 { _MMIO(0x9888), 0x02164000 },
300 { _MMIO(0x9888), 0x24170012 },
301 { _MMIO(0x9888), 0x04170000 },
302 { _MMIO(0x9888), 0x07910005 },
303 { _MMIO(0x9888), 0x05910000 },
304 { _MMIO(0x9888), 0x01911500 },
305 { _MMIO(0x9888), 0x03910501 },
306 { _MMIO(0x9888), 0x0d880002 },
307 { _MMIO(0x9888), 0x1d880003 },
308 { _MMIO(0x9888), 0x05880000 },
309 { _MMIO(0x9888), 0x0b890032 },
310 { _MMIO(0x9888), 0x1b890031 },
311 { _MMIO(0x9888), 0x05890000 },
312 { _MMIO(0x9888), 0x01890040 },
313 { _MMIO(0x9888), 0x03890040 },
314 { _MMIO(0x9888), 0x098a0000 },
315 { _MMIO(0x9888), 0x198a0004 },
316 { _MMIO(0x9888), 0x058a0000 },
317 { _MMIO(0x9888), 0x018a8050 },
318 { _MMIO(0x9888), 0x038a2050 },
319 { _MMIO(0x9888), 0x018b95a9 },
320 { _MMIO(0x9888), 0x038be5a9 },
321 { _MMIO(0x9888), 0x018c1500 },
322 { _MMIO(0x9888), 0x038c0501 },
323 { _MMIO(0x9888), 0x178d0015 },
324 { _MMIO(0x9888), 0x058d0000 },
325 { _MMIO(0x9888), 0x138e0004 },
326 { _MMIO(0x9888), 0x218e000c },
327 { _MMIO(0x9888), 0x058e0000 },
328 { _MMIO(0x9888), 0x018e0500 },
329 { _MMIO(0x9888), 0x038e0101 },
330 { _MMIO(0x9888), 0x0f8f0027 },
331 { _MMIO(0x9888), 0x058f0000 },
332 { _MMIO(0x9888), 0x018f0000 },
333 { _MMIO(0x9888), 0x038f0001 },
334 { _MMIO(0x9888), 0x11900013 },
335 { _MMIO(0x9888), 0x1f900017 },
336 { _MMIO(0x9888), 0x05900000 },
337 { _MMIO(0x9888), 0x01900100 },
338 { _MMIO(0x9888), 0x03900001 },
339 { _MMIO(0x9888), 0x01845555 },
340 { _MMIO(0x9888), 0x03845555 },
341 { _MMIO(0x9888), 0x418000aa },
342 { _MMIO(0x9888), 0x438000aa },
343 { _MMIO(0x9888), 0x458000aa },
344 { _MMIO(0x9888), 0x478000aa },
345 { _MMIO(0x9888), 0x4980018c },
346 { _MMIO(0x9888), 0x4b80014b },
347 { _MMIO(0x9888), 0x4d800128 },
348 { _MMIO(0x9888), 0x4f80012a },
349 { _MMIO(0x9888), 0x51800187 },
350 { _MMIO(0x9888), 0x5380014b },
351 { _MMIO(0x9888), 0x55800149 },
352 { _MMIO(0x9888), 0x5780010a },
353 { _MMIO(0x9888), 0x59800000 },
354};
355
356static int
357get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
358 const struct i915_oa_reg **regs,
359 int *lens)
360{
361 int n = 0;
362
363 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
364 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
365
366 regs[n] = mux_config_render_pipe_profile;
367 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
368 n++;
369
370 return n;
371}
372
373static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
374 { _MMIO(0x2740), 0x00000000 },
375 { _MMIO(0x2744), 0x00800000 },
376 { _MMIO(0x2710), 0x00000000 },
377 { _MMIO(0x2714), 0x10800000 },
378 { _MMIO(0x2720), 0x00000000 },
379 { _MMIO(0x2724), 0x00800000 },
380 { _MMIO(0x2770), 0x00000002 },
381 { _MMIO(0x2774), 0x0000fff7 },
382};
383
384static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
385 { _MMIO(0xe458), 0x00005004 },
386 { _MMIO(0xe558), 0x00010003 },
387 { _MMIO(0xe658), 0x00012011 },
388 { _MMIO(0xe758), 0x00015014 },
389 { _MMIO(0xe45c), 0x00051050 },
390 { _MMIO(0xe55c), 0x00053052 },
391 { _MMIO(0xe65c), 0x00055054 },
392};
393
394static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
395 { _MMIO(0x9888), 0x105c0232 },
396 { _MMIO(0x9888), 0x10580232 },
397 { _MMIO(0x9888), 0x10380232 },
398 { _MMIO(0x9888), 0x10dc0232 },
399 { _MMIO(0x9888), 0x10d80232 },
400 { _MMIO(0x9888), 0x10b80232 },
401 { _MMIO(0x9888), 0x118e4400 },
402 { _MMIO(0x9888), 0x025c6080 },
403 { _MMIO(0x9888), 0x045c004b },
404 { _MMIO(0x9888), 0x005c8000 },
405 { _MMIO(0x9888), 0x00582080 },
406 { _MMIO(0x9888), 0x0258004b },
407 { _MMIO(0x9888), 0x025b4000 },
408 { _MMIO(0x9888), 0x045b4000 },
409 { _MMIO(0x9888), 0x0c1fa000 },
410 { _MMIO(0x9888), 0x0e1f00aa },
411 { _MMIO(0x9888), 0x04386080 },
412 { _MMIO(0x9888), 0x0638404b },
413 { _MMIO(0x9888), 0x02384000 },
414 { _MMIO(0x9888), 0x08384000 },
415 { _MMIO(0x9888), 0x0a380000 },
416 { _MMIO(0x9888), 0x0c380000 },
417 { _MMIO(0x9888), 0x00398000 },
418 { _MMIO(0x9888), 0x0239a000 },
419 { _MMIO(0x9888), 0x0439a000 },
420 { _MMIO(0x9888), 0x06392000 },
421 { _MMIO(0x9888), 0x0cdc25c1 },
422 { _MMIO(0x9888), 0x0adcc000 },
423 { _MMIO(0x9888), 0x0ad825c1 },
424 { _MMIO(0x9888), 0x18db4000 },
425 { _MMIO(0x9888), 0x1adb0001 },
426 { _MMIO(0x9888), 0x0e9f8000 },
427 { _MMIO(0x9888), 0x109f02aa },
428 { _MMIO(0x9888), 0x0eb825c1 },
429 { _MMIO(0x9888), 0x18b80154 },
430 { _MMIO(0x9888), 0x0ab9a000 },
431 { _MMIO(0x9888), 0x0cb9a000 },
432 { _MMIO(0x9888), 0x0eb9a000 },
433 { _MMIO(0x9888), 0x0d88c000 },
434 { _MMIO(0x9888), 0x0f88000f },
435 { _MMIO(0x9888), 0x038a8000 },
436 { _MMIO(0x9888), 0x058a8000 },
437 { _MMIO(0x9888), 0x078a8000 },
438 { _MMIO(0x9888), 0x098a8000 },
439 { _MMIO(0x9888), 0x0b8a8000 },
440 { _MMIO(0x9888), 0x0d8a8000 },
441 { _MMIO(0x9888), 0x258baa05 },
442 { _MMIO(0x9888), 0x278b002a },
443 { _MMIO(0x9888), 0x238b2a80 },
444 { _MMIO(0x9888), 0x198c5400 },
445 { _MMIO(0x9888), 0x1b8c0015 },
446 { _MMIO(0x9888), 0x098dc000 },
447 { _MMIO(0x9888), 0x0b8da000 },
448 { _MMIO(0x9888), 0x0d8da000 },
449 { _MMIO(0x9888), 0x0f8da000 },
450 { _MMIO(0x9888), 0x098e05c0 },
451 { _MMIO(0x9888), 0x058e0000 },
452 { _MMIO(0x9888), 0x198f0020 },
453 { _MMIO(0x9888), 0x2185aa0a },
454 { _MMIO(0x9888), 0x2385002a },
455 { _MMIO(0x9888), 0x1f85aa00 },
456 { _MMIO(0x9888), 0x19835000 },
457 { _MMIO(0x9888), 0x1b830155 },
458 { _MMIO(0x9888), 0x03834000 },
459 { _MMIO(0x9888), 0x05834000 },
460 { _MMIO(0x9888), 0x07834000 },
461 { _MMIO(0x9888), 0x09834000 },
462 { _MMIO(0x9888), 0x0b834000 },
463 { _MMIO(0x9888), 0x0d834000 },
464 { _MMIO(0x9888), 0x09848000 },
465 { _MMIO(0x9888), 0x0b84c000 },
466 { _MMIO(0x9888), 0x0d84c000 },
467 { _MMIO(0x9888), 0x0f84c000 },
468 { _MMIO(0x9888), 0x01848000 },
469 { _MMIO(0x9888), 0x0384c000 },
470 { _MMIO(0x9888), 0x0584c000 },
471 { _MMIO(0x9888), 0x07844000 },
472 { _MMIO(0x9888), 0x19808000 },
473 { _MMIO(0x9888), 0x1b80c000 },
474 { _MMIO(0x9888), 0x1d80c000 },
475 { _MMIO(0x9888), 0x1f80c000 },
476 { _MMIO(0x9888), 0x11808000 },
477 { _MMIO(0x9888), 0x1380c000 },
478 { _MMIO(0x9888), 0x1580c000 },
479 { _MMIO(0x9888), 0x17804000 },
480 { _MMIO(0x9888), 0x51800040 },
481 { _MMIO(0x9888), 0x43800400 },
482 { _MMIO(0x9888), 0x45800800 },
483 { _MMIO(0x9888), 0x53800000 },
484 { _MMIO(0x9888), 0x47800c62 },
485 { _MMIO(0x9888), 0x21800000 },
486 { _MMIO(0x9888), 0x31800000 },
487 { _MMIO(0x9888), 0x4d800000 },
488 { _MMIO(0x9888), 0x3f801042 },
489 { _MMIO(0x9888), 0x4f800000 },
490 { _MMIO(0x9888), 0x418014a4 },
491};
492
493static int
494get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
495 const struct i915_oa_reg **regs,
496 int *lens)
497{
498 int n = 0;
499
500 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
501 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
502
503 regs[n] = mux_config_hdc_and_sf;
504 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
505 n++;
506
507 return n;
508}
509
510static const struct i915_oa_reg b_counter_config_l3_1[] = {
511 { _MMIO(0x2740), 0x00000000 },
512 { _MMIO(0x2744), 0x00800000 },
513 { _MMIO(0x2710), 0x00000000 },
514 { _MMIO(0x2714), 0xf0800000 },
515 { _MMIO(0x2720), 0x00000000 },
516 { _MMIO(0x2724), 0xf0800000 },
517 { _MMIO(0x2770), 0x00100070 },
518 { _MMIO(0x2774), 0x0000fff1 },
519 { _MMIO(0x2778), 0x00014002 },
520 { _MMIO(0x277c), 0x0000c3ff },
521 { _MMIO(0x2780), 0x00010002 },
522 { _MMIO(0x2784), 0x0000c7ff },
523 { _MMIO(0x2788), 0x00004002 },
524 { _MMIO(0x278c), 0x0000d3ff },
525 { _MMIO(0x2790), 0x00100700 },
526 { _MMIO(0x2794), 0x0000ff1f },
527 { _MMIO(0x2798), 0x00001402 },
528 { _MMIO(0x279c), 0x0000fc3f },
529 { _MMIO(0x27a0), 0x00001002 },
530 { _MMIO(0x27a4), 0x0000fc7f },
531 { _MMIO(0x27a8), 0x00000402 },
532 { _MMIO(0x27ac), 0x0000fd3f },
533};
534
535static const struct i915_oa_reg flex_eu_config_l3_1[] = {
536 { _MMIO(0xe458), 0x00005004 },
537 { _MMIO(0xe558), 0x00010003 },
538 { _MMIO(0xe658), 0x00012011 },
539 { _MMIO(0xe758), 0x00015014 },
540 { _MMIO(0xe45c), 0x00051050 },
541 { _MMIO(0xe55c), 0x00053052 },
542 { _MMIO(0xe65c), 0x00055054 },
543};
544
545static const struct i915_oa_reg mux_config_l3_1[] = {
546 { _MMIO(0x9888), 0x10bf03da },
547 { _MMIO(0x9888), 0x14bf0001 },
548 { _MMIO(0x9888), 0x12980340 },
549 { _MMIO(0x9888), 0x12990340 },
550 { _MMIO(0x9888), 0x0cbf1187 },
551 { _MMIO(0x9888), 0x0ebf1205 },
552 { _MMIO(0x9888), 0x00bf0500 },
553 { _MMIO(0x9888), 0x02bf042b },
554 { _MMIO(0x9888), 0x04bf002c },
555 { _MMIO(0x9888), 0x0cdac000 },
556 { _MMIO(0x9888), 0x0edac000 },
557 { _MMIO(0x9888), 0x00da8000 },
558 { _MMIO(0x9888), 0x02dac000 },
559 { _MMIO(0x9888), 0x04da4000 },
560 { _MMIO(0x9888), 0x04983400 },
561 { _MMIO(0x9888), 0x10980000 },
562 { _MMIO(0x9888), 0x06990034 },
563 { _MMIO(0x9888), 0x10990000 },
564 { _MMIO(0x9888), 0x0c9dc000 },
565 { _MMIO(0x9888), 0x0e9dc000 },
566 { _MMIO(0x9888), 0x009d8000 },
567 { _MMIO(0x9888), 0x029dc000 },
568 { _MMIO(0x9888), 0x049d4000 },
569 { _MMIO(0x9888), 0x109f02a8 },
570 { _MMIO(0x9888), 0x0c9fa000 },
571 { _MMIO(0x9888), 0x0e9f00ba },
572 { _MMIO(0x9888), 0x0cb88000 },
573 { _MMIO(0x9888), 0x0cb95000 },
574 { _MMIO(0x9888), 0x0eb95000 },
575 { _MMIO(0x9888), 0x00b94000 },
576 { _MMIO(0x9888), 0x02b95000 },
577 { _MMIO(0x9888), 0x04b91000 },
578 { _MMIO(0x9888), 0x06b92000 },
579 { _MMIO(0x9888), 0x0cba4000 },
580 { _MMIO(0x9888), 0x0f88000f },
581 { _MMIO(0x9888), 0x03888000 },
582 { _MMIO(0x9888), 0x05888000 },
583 { _MMIO(0x9888), 0x07888000 },
584 { _MMIO(0x9888), 0x09888000 },
585 { _MMIO(0x9888), 0x0b888000 },
586 { _MMIO(0x9888), 0x0d880400 },
587 { _MMIO(0x9888), 0x258b800a },
588 { _MMIO(0x9888), 0x278b002a },
589 { _MMIO(0x9888), 0x238b5500 },
590 { _MMIO(0x9888), 0x198c4000 },
591 { _MMIO(0x9888), 0x1b8c0015 },
592 { _MMIO(0x9888), 0x038c4000 },
593 { _MMIO(0x9888), 0x058c4000 },
594 { _MMIO(0x9888), 0x078c4000 },
595 { _MMIO(0x9888), 0x098c4000 },
596 { _MMIO(0x9888), 0x0b8c4000 },
597 { _MMIO(0x9888), 0x0d8c4000 },
598 { _MMIO(0x9888), 0x0d8da000 },
599 { _MMIO(0x9888), 0x0f8da000 },
600 { _MMIO(0x9888), 0x018d8000 },
601 { _MMIO(0x9888), 0x038da000 },
602 { _MMIO(0x9888), 0x058da000 },
603 { _MMIO(0x9888), 0x078d2000 },
604 { _MMIO(0x9888), 0x2185800a },
605 { _MMIO(0x9888), 0x2385002a },
606 { _MMIO(0x9888), 0x1f85aa00 },
607 { _MMIO(0x9888), 0x1b830154 },
608 { _MMIO(0x9888), 0x03834000 },
609 { _MMIO(0x9888), 0x05834000 },
610 { _MMIO(0x9888), 0x07834000 },
611 { _MMIO(0x9888), 0x09834000 },
612 { _MMIO(0x9888), 0x0b834000 },
613 { _MMIO(0x9888), 0x0d834000 },
614 { _MMIO(0x9888), 0x0d84c000 },
615 { _MMIO(0x9888), 0x0f84c000 },
616 { _MMIO(0x9888), 0x01848000 },
617 { _MMIO(0x9888), 0x0384c000 },
618 { _MMIO(0x9888), 0x0584c000 },
619 { _MMIO(0x9888), 0x07844000 },
620 { _MMIO(0x9888), 0x1d80c000 },
621 { _MMIO(0x9888), 0x1f80c000 },
622 { _MMIO(0x9888), 0x11808000 },
623 { _MMIO(0x9888), 0x1380c000 },
624 { _MMIO(0x9888), 0x1580c000 },
625 { _MMIO(0x9888), 0x17804000 },
626 { _MMIO(0x9888), 0x53800000 },
627 { _MMIO(0x9888), 0x45800000 },
628 { _MMIO(0x9888), 0x47800000 },
629 { _MMIO(0x9888), 0x21800000 },
630 { _MMIO(0x9888), 0x31800000 },
631 { _MMIO(0x9888), 0x4d800000 },
632 { _MMIO(0x9888), 0x3f800000 },
633 { _MMIO(0x9888), 0x4f800000 },
634 { _MMIO(0x9888), 0x41800060 },
635};
636
637static int
638get_l3_1_mux_config(struct drm_i915_private *dev_priv,
639 const struct i915_oa_reg **regs,
640 int *lens)
641{
642 int n = 0;
643
644 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
645 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
646
647 regs[n] = mux_config_l3_1;
648 lens[n] = ARRAY_SIZE(mux_config_l3_1);
649 n++;
650
651 return n;
652}
653
654static const struct i915_oa_reg b_counter_config_l3_2[] = {
655 { _MMIO(0x2740), 0x00000000 },
656 { _MMIO(0x2744), 0x00800000 },
657 { _MMIO(0x2710), 0x00000000 },
658 { _MMIO(0x2714), 0xf0800000 },
659 { _MMIO(0x2720), 0x00000000 },
660 { _MMIO(0x2724), 0xf0800000 },
661 { _MMIO(0x2770), 0x00100070 },
662 { _MMIO(0x2774), 0x0000fff1 },
663 { _MMIO(0x2778), 0x00014002 },
664 { _MMIO(0x277c), 0x0000c3ff },
665 { _MMIO(0x2780), 0x00010002 },
666 { _MMIO(0x2784), 0x0000c7ff },
667 { _MMIO(0x2788), 0x00004002 },
668 { _MMIO(0x278c), 0x0000d3ff },
669 { _MMIO(0x2790), 0x00100700 },
670 { _MMIO(0x2794), 0x0000ff1f },
671 { _MMIO(0x2798), 0x00001402 },
672 { _MMIO(0x279c), 0x0000fc3f },
673 { _MMIO(0x27a0), 0x00001002 },
674 { _MMIO(0x27a4), 0x0000fc7f },
675 { _MMIO(0x27a8), 0x00000402 },
676 { _MMIO(0x27ac), 0x0000fd3f },
677};
678
679static const struct i915_oa_reg flex_eu_config_l3_2[] = {
680 { _MMIO(0xe458), 0x00005004 },
681 { _MMIO(0xe558), 0x00010003 },
682 { _MMIO(0xe658), 0x00012011 },
683 { _MMIO(0xe758), 0x00015014 },
684 { _MMIO(0xe45c), 0x00051050 },
685 { _MMIO(0xe55c), 0x00053052 },
686 { _MMIO(0xe65c), 0x00055054 },
687};
688
689static const struct i915_oa_reg mux_config_l3_2[] = {
690 { _MMIO(0x9888), 0x103f03da },
691 { _MMIO(0x9888), 0x143f0001 },
692 { _MMIO(0x9888), 0x12180340 },
693 { _MMIO(0x9888), 0x12190340 },
694 { _MMIO(0x9888), 0x0c3f1187 },
695 { _MMIO(0x9888), 0x0e3f1205 },
696 { _MMIO(0x9888), 0x003f0500 },
697 { _MMIO(0x9888), 0x023f042b },
698 { _MMIO(0x9888), 0x043f002c },
699 { _MMIO(0x9888), 0x0c5ac000 },
700 { _MMIO(0x9888), 0x0e5ac000 },
701 { _MMIO(0x9888), 0x005a8000 },
702 { _MMIO(0x9888), 0x025ac000 },
703 { _MMIO(0x9888), 0x045a4000 },
704 { _MMIO(0x9888), 0x04183400 },
705 { _MMIO(0x9888), 0x10180000 },
706 { _MMIO(0x9888), 0x06190034 },
707 { _MMIO(0x9888), 0x10190000 },
708 { _MMIO(0x9888), 0x0c1dc000 },
709 { _MMIO(0x9888), 0x0e1dc000 },
710 { _MMIO(0x9888), 0x001d8000 },
711 { _MMIO(0x9888), 0x021dc000 },
712 { _MMIO(0x9888), 0x041d4000 },
713 { _MMIO(0x9888), 0x101f02a8 },
714 { _MMIO(0x9888), 0x0c1fa000 },
715 { _MMIO(0x9888), 0x0e1f00ba },
716 { _MMIO(0x9888), 0x0c388000 },
717 { _MMIO(0x9888), 0x0c395000 },
718 { _MMIO(0x9888), 0x0e395000 },
719 { _MMIO(0x9888), 0x00394000 },
720 { _MMIO(0x9888), 0x02395000 },
721 { _MMIO(0x9888), 0x04391000 },
722 { _MMIO(0x9888), 0x06392000 },
723 { _MMIO(0x9888), 0x0c3a4000 },
724 { _MMIO(0x9888), 0x1b8aa800 },
725 { _MMIO(0x9888), 0x1d8a0002 },
726 { _MMIO(0x9888), 0x038a8000 },
727 { _MMIO(0x9888), 0x058a8000 },
728 { _MMIO(0x9888), 0x078a8000 },
729 { _MMIO(0x9888), 0x098a8000 },
730 { _MMIO(0x9888), 0x0b8a8000 },
731 { _MMIO(0x9888), 0x0d8a8000 },
732 { _MMIO(0x9888), 0x258b4005 },
733 { _MMIO(0x9888), 0x278b0015 },
734 { _MMIO(0x9888), 0x238b2a80 },
735 { _MMIO(0x9888), 0x2185800a },
736 { _MMIO(0x9888), 0x2385002a },
737 { _MMIO(0x9888), 0x1f85aa00 },
738 { _MMIO(0x9888), 0x1b830154 },
739 { _MMIO(0x9888), 0x03834000 },
740 { _MMIO(0x9888), 0x05834000 },
741 { _MMIO(0x9888), 0x07834000 },
742 { _MMIO(0x9888), 0x09834000 },
743 { _MMIO(0x9888), 0x0b834000 },
744 { _MMIO(0x9888), 0x0d834000 },
745 { _MMIO(0x9888), 0x0d84c000 },
746 { _MMIO(0x9888), 0x0f84c000 },
747 { _MMIO(0x9888), 0x01848000 },
748 { _MMIO(0x9888), 0x0384c000 },
749 { _MMIO(0x9888), 0x0584c000 },
750 { _MMIO(0x9888), 0x07844000 },
751 { _MMIO(0x9888), 0x1d80c000 },
752 { _MMIO(0x9888), 0x1f80c000 },
753 { _MMIO(0x9888), 0x11808000 },
754 { _MMIO(0x9888), 0x1380c000 },
755 { _MMIO(0x9888), 0x1580c000 },
756 { _MMIO(0x9888), 0x17804000 },
757 { _MMIO(0x9888), 0x53800000 },
758 { _MMIO(0x9888), 0x45800000 },
759 { _MMIO(0x9888), 0x47800000 },
760 { _MMIO(0x9888), 0x21800000 },
761 { _MMIO(0x9888), 0x31800000 },
762 { _MMIO(0x9888), 0x4d800000 },
763 { _MMIO(0x9888), 0x3f800000 },
764 { _MMIO(0x9888), 0x4f800000 },
765 { _MMIO(0x9888), 0x41800060 },
766};
767
768static int
769get_l3_2_mux_config(struct drm_i915_private *dev_priv,
770 const struct i915_oa_reg **regs,
771 int *lens)
772{
773 int n = 0;
774
775 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
776 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
777
778 regs[n] = mux_config_l3_2;
779 lens[n] = ARRAY_SIZE(mux_config_l3_2);
780 n++;
781
782 return n;
783}
784
785static const struct i915_oa_reg b_counter_config_l3_3[] = {
786 { _MMIO(0x2740), 0x00000000 },
787 { _MMIO(0x2744), 0x00800000 },
788 { _MMIO(0x2710), 0x00000000 },
789 { _MMIO(0x2714), 0xf0800000 },
790 { _MMIO(0x2720), 0x00000000 },
791 { _MMIO(0x2724), 0xf0800000 },
792 { _MMIO(0x2770), 0x00100070 },
793 { _MMIO(0x2774), 0x0000fff1 },
794 { _MMIO(0x2778), 0x00014002 },
795 { _MMIO(0x277c), 0x0000c3ff },
796 { _MMIO(0x2780), 0x00010002 },
797 { _MMIO(0x2784), 0x0000c7ff },
798 { _MMIO(0x2788), 0x00004002 },
799 { _MMIO(0x278c), 0x0000d3ff },
800 { _MMIO(0x2790), 0x00100700 },
801 { _MMIO(0x2794), 0x0000ff1f },
802 { _MMIO(0x2798), 0x00001402 },
803 { _MMIO(0x279c), 0x0000fc3f },
804 { _MMIO(0x27a0), 0x00001002 },
805 { _MMIO(0x27a4), 0x0000fc7f },
806 { _MMIO(0x27a8), 0x00000402 },
807 { _MMIO(0x27ac), 0x0000fd3f },
808};
809
810static const struct i915_oa_reg flex_eu_config_l3_3[] = {
811 { _MMIO(0xe458), 0x00005004 },
812 { _MMIO(0xe558), 0x00010003 },
813 { _MMIO(0xe658), 0x00012011 },
814 { _MMIO(0xe758), 0x00015014 },
815 { _MMIO(0xe45c), 0x00051050 },
816 { _MMIO(0xe55c), 0x00053052 },
817 { _MMIO(0xe65c), 0x00055054 },
818};
819
820static const struct i915_oa_reg mux_config_l3_3[] = {
821 { _MMIO(0x9888), 0x121b0340 },
822 { _MMIO(0x9888), 0x103f0274 },
823 { _MMIO(0x9888), 0x123f0000 },
824 { _MMIO(0x9888), 0x129b0340 },
825 { _MMIO(0x9888), 0x10bf0274 },
826 { _MMIO(0x9888), 0x12bf0000 },
827 { _MMIO(0x9888), 0x041b3400 },
828 { _MMIO(0x9888), 0x101b0000 },
829 { _MMIO(0x9888), 0x045c8000 },
830 { _MMIO(0x9888), 0x0a3d4000 },
831 { _MMIO(0x9888), 0x003f0080 },
832 { _MMIO(0x9888), 0x023f0793 },
833 { _MMIO(0x9888), 0x043f0014 },
834 { _MMIO(0x9888), 0x04588000 },
835 { _MMIO(0x9888), 0x005a8000 },
836 { _MMIO(0x9888), 0x025ac000 },
837 { _MMIO(0x9888), 0x045a4000 },
838 { _MMIO(0x9888), 0x0a5b4000 },
839 { _MMIO(0x9888), 0x001d8000 },
840 { _MMIO(0x9888), 0x021dc000 },
841 { _MMIO(0x9888), 0x041d4000 },
842 { _MMIO(0x9888), 0x0c1fa000 },
843 { _MMIO(0x9888), 0x0e1f002a },
844 { _MMIO(0x9888), 0x0a384000 },
845 { _MMIO(0x9888), 0x00394000 },
846 { _MMIO(0x9888), 0x02395000 },
847 { _MMIO(0x9888), 0x04399000 },
848 { _MMIO(0x9888), 0x069b0034 },
849 { _MMIO(0x9888), 0x109b0000 },
850 { _MMIO(0x9888), 0x06dc4000 },
851 { _MMIO(0x9888), 0x0cbd4000 },
852 { _MMIO(0x9888), 0x0cbf0981 },
853 { _MMIO(0x9888), 0x0ebf0a0f },
854 { _MMIO(0x9888), 0x06d84000 },
855 { _MMIO(0x9888), 0x0cdac000 },
856 { _MMIO(0x9888), 0x0edac000 },
857 { _MMIO(0x9888), 0x0cdb4000 },
858 { _MMIO(0x9888), 0x0c9dc000 },
859 { _MMIO(0x9888), 0x0e9dc000 },
860 { _MMIO(0x9888), 0x109f02a8 },
861 { _MMIO(0x9888), 0x0e9f0080 },
862 { _MMIO(0x9888), 0x0cb84000 },
863 { _MMIO(0x9888), 0x0cb95000 },
864 { _MMIO(0x9888), 0x0eb95000 },
865 { _MMIO(0x9888), 0x06b92000 },
866 { _MMIO(0x9888), 0x0f88000f },
867 { _MMIO(0x9888), 0x0d880400 },
868 { _MMIO(0x9888), 0x038a8000 },
869 { _MMIO(0x9888), 0x058a8000 },
870 { _MMIO(0x9888), 0x078a8000 },
871 { _MMIO(0x9888), 0x098a8000 },
872 { _MMIO(0x9888), 0x0b8a8000 },
873 { _MMIO(0x9888), 0x258b8009 },
874 { _MMIO(0x9888), 0x278b002a },
875 { _MMIO(0x9888), 0x238b2a80 },
876 { _MMIO(0x9888), 0x198c4000 },
877 { _MMIO(0x9888), 0x1b8c0015 },
878 { _MMIO(0x9888), 0x0d8c4000 },
879 { _MMIO(0x9888), 0x0d8da000 },
880 { _MMIO(0x9888), 0x0f8da000 },
881 { _MMIO(0x9888), 0x078d2000 },
882 { _MMIO(0x9888), 0x2185800a },
883 { _MMIO(0x9888), 0x2385002a },
884 { _MMIO(0x9888), 0x1f85aa00 },
885 { _MMIO(0x9888), 0x1b830154 },
886 { _MMIO(0x9888), 0x03834000 },
887 { _MMIO(0x9888), 0x05834000 },
888 { _MMIO(0x9888), 0x07834000 },
889 { _MMIO(0x9888), 0x09834000 },
890 { _MMIO(0x9888), 0x0b834000 },
891 { _MMIO(0x9888), 0x0d834000 },
892 { _MMIO(0x9888), 0x0d84c000 },
893 { _MMIO(0x9888), 0x0f84c000 },
894 { _MMIO(0x9888), 0x01848000 },
895 { _MMIO(0x9888), 0x0384c000 },
896 { _MMIO(0x9888), 0x0584c000 },
897 { _MMIO(0x9888), 0x07844000 },
898 { _MMIO(0x9888), 0x1d80c000 },
899 { _MMIO(0x9888), 0x1f80c000 },
900 { _MMIO(0x9888), 0x11808000 },
901 { _MMIO(0x9888), 0x1380c000 },
902 { _MMIO(0x9888), 0x1580c000 },
903 { _MMIO(0x9888), 0x17804000 },
904 { _MMIO(0x9888), 0x53800000 },
905 { _MMIO(0x9888), 0x45800c00 },
906 { _MMIO(0x9888), 0x47800c63 },
907 { _MMIO(0x9888), 0x21800000 },
908 { _MMIO(0x9888), 0x31800000 },
909 { _MMIO(0x9888), 0x4d800000 },
910 { _MMIO(0x9888), 0x3f8014a5 },
911 { _MMIO(0x9888), 0x4f800000 },
912 { _MMIO(0x9888), 0x41800045 },
913};
914
915static int
916get_l3_3_mux_config(struct drm_i915_private *dev_priv,
917 const struct i915_oa_reg **regs,
918 int *lens)
919{
920 int n = 0;
921
922 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
923 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
924
925 regs[n] = mux_config_l3_3;
926 lens[n] = ARRAY_SIZE(mux_config_l3_3);
927 n++;
928
929 return n;
930}
931
932static const struct i915_oa_reg b_counter_config_l3_4[] = {
933 { _MMIO(0x2740), 0x00000000 },
934 { _MMIO(0x2744), 0x00800000 },
935 { _MMIO(0x2710), 0x00000000 },
936 { _MMIO(0x2714), 0xf0800000 },
937 { _MMIO(0x2720), 0x00000000 },
938 { _MMIO(0x2724), 0xf0800000 },
939 { _MMIO(0x2770), 0x00100070 },
940 { _MMIO(0x2774), 0x0000fff1 },
941 { _MMIO(0x2778), 0x00014002 },
942 { _MMIO(0x277c), 0x0000c3ff },
943 { _MMIO(0x2780), 0x00010002 },
944 { _MMIO(0x2784), 0x0000c7ff },
945 { _MMIO(0x2788), 0x00004002 },
946 { _MMIO(0x278c), 0x0000d3ff },
947 { _MMIO(0x2790), 0x00100700 },
948 { _MMIO(0x2794), 0x0000ff1f },
949 { _MMIO(0x2798), 0x00001402 },
950 { _MMIO(0x279c), 0x0000fc3f },
951 { _MMIO(0x27a0), 0x00001002 },
952 { _MMIO(0x27a4), 0x0000fc7f },
953 { _MMIO(0x27a8), 0x00000402 },
954 { _MMIO(0x27ac), 0x0000fd3f },
955};
956
957static const struct i915_oa_reg flex_eu_config_l3_4[] = {
958 { _MMIO(0xe458), 0x00005004 },
959 { _MMIO(0xe558), 0x00010003 },
960 { _MMIO(0xe658), 0x00012011 },
961 { _MMIO(0xe758), 0x00015014 },
962 { _MMIO(0xe45c), 0x00051050 },
963 { _MMIO(0xe55c), 0x00053052 },
964 { _MMIO(0xe65c), 0x00055054 },
965};
966
967static const struct i915_oa_reg mux_config_l3_4[] = {
968 { _MMIO(0x9888), 0x121a0340 },
969 { _MMIO(0x9888), 0x103f0017 },
970 { _MMIO(0x9888), 0x123f0020 },
971 { _MMIO(0x9888), 0x129a0340 },
972 { _MMIO(0x9888), 0x10bf0017 },
973 { _MMIO(0x9888), 0x12bf0020 },
974 { _MMIO(0x9888), 0x041a3400 },
975 { _MMIO(0x9888), 0x101a0000 },
976 { _MMIO(0x9888), 0x043b8000 },
977 { _MMIO(0x9888), 0x0a3e0010 },
978 { _MMIO(0x9888), 0x003f0200 },
979 { _MMIO(0x9888), 0x023f0113 },
980 { _MMIO(0x9888), 0x043f0014 },
981 { _MMIO(0x9888), 0x02592000 },
982 { _MMIO(0x9888), 0x005a8000 },
983 { _MMIO(0x9888), 0x025ac000 },
984 { _MMIO(0x9888), 0x045a4000 },
985 { _MMIO(0x9888), 0x0a1c8000 },
986 { _MMIO(0x9888), 0x001d8000 },
987 { _MMIO(0x9888), 0x021dc000 },
988 { _MMIO(0x9888), 0x041d4000 },
989 { _MMIO(0x9888), 0x0a1e8000 },
990 { _MMIO(0x9888), 0x0c1fa000 },
991 { _MMIO(0x9888), 0x0e1f001a },
992 { _MMIO(0x9888), 0x00394000 },
993 { _MMIO(0x9888), 0x02395000 },
994 { _MMIO(0x9888), 0x04391000 },
995 { _MMIO(0x9888), 0x069a0034 },
996 { _MMIO(0x9888), 0x109a0000 },
997 { _MMIO(0x9888), 0x06bb4000 },
998 { _MMIO(0x9888), 0x0abe0040 },
999 { _MMIO(0x9888), 0x0cbf0984 },
1000 { _MMIO(0x9888), 0x0ebf0a02 },
1001 { _MMIO(0x9888), 0x02d94000 },
1002 { _MMIO(0x9888), 0x0cdac000 },
1003 { _MMIO(0x9888), 0x0edac000 },
1004 { _MMIO(0x9888), 0x0c9c0400 },
1005 { _MMIO(0x9888), 0x0c9dc000 },
1006 { _MMIO(0x9888), 0x0e9dc000 },
1007 { _MMIO(0x9888), 0x0c9e0400 },
1008 { _MMIO(0x9888), 0x109f02a8 },
1009 { _MMIO(0x9888), 0x0e9f0040 },
1010 { _MMIO(0x9888), 0x0cb95000 },
1011 { _MMIO(0x9888), 0x0eb95000 },
1012 { _MMIO(0x9888), 0x0f88000f },
1013 { _MMIO(0x9888), 0x0d880400 },
1014 { _MMIO(0x9888), 0x038a8000 },
1015 { _MMIO(0x9888), 0x058a8000 },
1016 { _MMIO(0x9888), 0x078a8000 },
1017 { _MMIO(0x9888), 0x098a8000 },
1018 { _MMIO(0x9888), 0x0b8a8000 },
1019 { _MMIO(0x9888), 0x258b8009 },
1020 { _MMIO(0x9888), 0x278b002a },
1021 { _MMIO(0x9888), 0x238b2a80 },
1022 { _MMIO(0x9888), 0x198c4000 },
1023 { _MMIO(0x9888), 0x1b8c0015 },
1024 { _MMIO(0x9888), 0x0d8c4000 },
1025 { _MMIO(0x9888), 0x0d8da000 },
1026 { _MMIO(0x9888), 0x0f8da000 },
1027 { _MMIO(0x9888), 0x078d2000 },
1028 { _MMIO(0x9888), 0x2185800a },
1029 { _MMIO(0x9888), 0x2385002a },
1030 { _MMIO(0x9888), 0x1f85aa00 },
1031 { _MMIO(0x9888), 0x1b830154 },
1032 { _MMIO(0x9888), 0x03834000 },
1033 { _MMIO(0x9888), 0x05834000 },
1034 { _MMIO(0x9888), 0x07834000 },
1035 { _MMIO(0x9888), 0x09834000 },
1036 { _MMIO(0x9888), 0x0b834000 },
1037 { _MMIO(0x9888), 0x0d834000 },
1038 { _MMIO(0x9888), 0x0d84c000 },
1039 { _MMIO(0x9888), 0x0f84c000 },
1040 { _MMIO(0x9888), 0x01848000 },
1041 { _MMIO(0x9888), 0x0384c000 },
1042 { _MMIO(0x9888), 0x0584c000 },
1043 { _MMIO(0x9888), 0x07844000 },
1044 { _MMIO(0x9888), 0x1d80c000 },
1045 { _MMIO(0x9888), 0x1f80c000 },
1046 { _MMIO(0x9888), 0x11808000 },
1047 { _MMIO(0x9888), 0x1380c000 },
1048 { _MMIO(0x9888), 0x1580c000 },
1049 { _MMIO(0x9888), 0x17804000 },
1050 { _MMIO(0x9888), 0x53800000 },
1051 { _MMIO(0x9888), 0x45800800 },
1052 { _MMIO(0x9888), 0x47800842 },
1053 { _MMIO(0x9888), 0x21800000 },
1054 { _MMIO(0x9888), 0x31800000 },
1055 { _MMIO(0x9888), 0x4d800000 },
1056 { _MMIO(0x9888), 0x3f801084 },
1057 { _MMIO(0x9888), 0x4f800000 },
1058 { _MMIO(0x9888), 0x41800044 },
1059};
1060
1061static int
1062get_l3_4_mux_config(struct drm_i915_private *dev_priv,
1063 const struct i915_oa_reg **regs,
1064 int *lens)
1065{
1066 int n = 0;
1067
1068 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1069 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1070
1071 regs[n] = mux_config_l3_4;
1072 lens[n] = ARRAY_SIZE(mux_config_l3_4);
1073 n++;
1074
1075 return n;
1076}
1077
1078static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1079 { _MMIO(0x2740), 0x00000000 },
1080 { _MMIO(0x2744), 0x00800000 },
1081 { _MMIO(0x2710), 0x00000000 },
1082 { _MMIO(0x2714), 0xf0800000 },
1083 { _MMIO(0x2720), 0x00000000 },
1084 { _MMIO(0x2724), 0x30800000 },
1085 { _MMIO(0x2770), 0x00006000 },
1086 { _MMIO(0x2774), 0x0000f3ff },
1087 { _MMIO(0x2778), 0x00001800 },
1088 { _MMIO(0x277c), 0x0000fcff },
1089 { _MMIO(0x2780), 0x00000600 },
1090 { _MMIO(0x2784), 0x0000ff3f },
1091 { _MMIO(0x2788), 0x00000180 },
1092 { _MMIO(0x278c), 0x0000ffcf },
1093 { _MMIO(0x2790), 0x00000060 },
1094 { _MMIO(0x2794), 0x0000fff3 },
1095 { _MMIO(0x2798), 0x00000018 },
1096 { _MMIO(0x279c), 0x0000fffc },
1097};
1098
1099static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1100 { _MMIO(0xe458), 0x00005004 },
1101 { _MMIO(0xe558), 0x00010003 },
1102 { _MMIO(0xe658), 0x00012011 },
1103 { _MMIO(0xe758), 0x00015014 },
1104 { _MMIO(0xe45c), 0x00051050 },
1105 { _MMIO(0xe55c), 0x00053052 },
1106 { _MMIO(0xe65c), 0x00055054 },
1107};
1108
1109static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1110 { _MMIO(0x9888), 0x143b000e },
1111 { _MMIO(0x9888), 0x043c55c0 },
1112 { _MMIO(0x9888), 0x0a1e0280 },
1113 { _MMIO(0x9888), 0x0c1e0408 },
1114 { _MMIO(0x9888), 0x10390000 },
1115 { _MMIO(0x9888), 0x12397a1f },
1116 { _MMIO(0x9888), 0x14bb000e },
1117 { _MMIO(0x9888), 0x04bc5000 },
1118 { _MMIO(0x9888), 0x0a9e0296 },
1119 { _MMIO(0x9888), 0x0c9e0008 },
1120 { _MMIO(0x9888), 0x10b90000 },
1121 { _MMIO(0x9888), 0x12b97a1f },
1122 { _MMIO(0x9888), 0x063b0042 },
1123 { _MMIO(0x9888), 0x103b0000 },
1124 { _MMIO(0x9888), 0x083c0000 },
1125 { _MMIO(0x9888), 0x0a3e0040 },
1126 { _MMIO(0x9888), 0x043f8000 },
1127 { _MMIO(0x9888), 0x02594000 },
1128 { _MMIO(0x9888), 0x045a8000 },
1129 { _MMIO(0x9888), 0x0c1c0400 },
1130 { _MMIO(0x9888), 0x041d8000 },
1131 { _MMIO(0x9888), 0x081e02c0 },
1132 { _MMIO(0x9888), 0x0e1e0000 },
1133 { _MMIO(0x9888), 0x0c1fa800 },
1134 { _MMIO(0x9888), 0x0e1f0260 },
1135 { _MMIO(0x9888), 0x101f0014 },
1136 { _MMIO(0x9888), 0x003905e0 },
1137 { _MMIO(0x9888), 0x06390bc0 },
1138 { _MMIO(0x9888), 0x02390018 },
1139 { _MMIO(0x9888), 0x04394000 },
1140 { _MMIO(0x9888), 0x04bb0042 },
1141 { _MMIO(0x9888), 0x10bb0000 },
1142 { _MMIO(0x9888), 0x02bc05c0 },
1143 { _MMIO(0x9888), 0x08bc0000 },
1144 { _MMIO(0x9888), 0x0abe0004 },
1145 { _MMIO(0x9888), 0x02bf8000 },
1146 { _MMIO(0x9888), 0x02d91000 },
1147 { _MMIO(0x9888), 0x02da8000 },
1148 { _MMIO(0x9888), 0x089c8000 },
1149 { _MMIO(0x9888), 0x029d8000 },
1150 { _MMIO(0x9888), 0x089e8000 },
1151 { _MMIO(0x9888), 0x0e9e0000 },
1152 { _MMIO(0x9888), 0x0e9fa806 },
1153 { _MMIO(0x9888), 0x109f0142 },
1154 { _MMIO(0x9888), 0x08b90617 },
1155 { _MMIO(0x9888), 0x0ab90be0 },
1156 { _MMIO(0x9888), 0x02b94000 },
1157 { _MMIO(0x9888), 0x0d88f000 },
1158 { _MMIO(0x9888), 0x0f88000c },
1159 { _MMIO(0x9888), 0x07888000 },
1160 { _MMIO(0x9888), 0x09888000 },
1161 { _MMIO(0x9888), 0x018a8000 },
1162 { _MMIO(0x9888), 0x0f8a8000 },
1163 { _MMIO(0x9888), 0x1b8a2800 },
1164 { _MMIO(0x9888), 0x038a8000 },
1165 { _MMIO(0x9888), 0x058a8000 },
1166 { _MMIO(0x9888), 0x0b8a8000 },
1167 { _MMIO(0x9888), 0x0d8a8000 },
1168 { _MMIO(0x9888), 0x238b52a0 },
1169 { _MMIO(0x9888), 0x258b6a95 },
1170 { _MMIO(0x9888), 0x278b0029 },
1171 { _MMIO(0x9888), 0x178c2000 },
1172 { _MMIO(0x9888), 0x198c1500 },
1173 { _MMIO(0x9888), 0x1b8c0014 },
1174 { _MMIO(0x9888), 0x078c4000 },
1175 { _MMIO(0x9888), 0x098c4000 },
1176 { _MMIO(0x9888), 0x098da000 },
1177 { _MMIO(0x9888), 0x0b8da000 },
1178 { _MMIO(0x9888), 0x0f8da000 },
1179 { _MMIO(0x9888), 0x038d8000 },
1180 { _MMIO(0x9888), 0x058d2000 },
1181 { _MMIO(0x9888), 0x1f85aa80 },
1182 { _MMIO(0x9888), 0x2185aaaa },
1183 { _MMIO(0x9888), 0x2385002a },
1184 { _MMIO(0x9888), 0x01834000 },
1185 { _MMIO(0x9888), 0x0f834000 },
1186 { _MMIO(0x9888), 0x19835400 },
1187 { _MMIO(0x9888), 0x1b830155 },
1188 { _MMIO(0x9888), 0x03834000 },
1189 { _MMIO(0x9888), 0x05834000 },
1190 { _MMIO(0x9888), 0x07834000 },
1191 { _MMIO(0x9888), 0x09834000 },
1192 { _MMIO(0x9888), 0x0b834000 },
1193 { _MMIO(0x9888), 0x0d834000 },
1194 { _MMIO(0x9888), 0x0184c000 },
1195 { _MMIO(0x9888), 0x0784c000 },
1196 { _MMIO(0x9888), 0x0984c000 },
1197 { _MMIO(0x9888), 0x0b84c000 },
1198 { _MMIO(0x9888), 0x0d84c000 },
1199 { _MMIO(0x9888), 0x0f84c000 },
1200 { _MMIO(0x9888), 0x0384c000 },
1201 { _MMIO(0x9888), 0x0584c000 },
1202 { _MMIO(0x9888), 0x1180c000 },
1203 { _MMIO(0x9888), 0x1780c000 },
1204 { _MMIO(0x9888), 0x1980c000 },
1205 { _MMIO(0x9888), 0x1b80c000 },
1206 { _MMIO(0x9888), 0x1d80c000 },
1207 { _MMIO(0x9888), 0x1f80c000 },
1208 { _MMIO(0x9888), 0x1380c000 },
1209 { _MMIO(0x9888), 0x1580c000 },
1210 { _MMIO(0x9888), 0x4d800444 },
1211 { _MMIO(0x9888), 0x3d800000 },
1212 { _MMIO(0x9888), 0x4f804000 },
1213 { _MMIO(0x9888), 0x43801080 },
1214 { _MMIO(0x9888), 0x51800000 },
1215 { _MMIO(0x9888), 0x45800084 },
1216 { _MMIO(0x9888), 0x53800044 },
1217 { _MMIO(0x9888), 0x47801080 },
1218 { _MMIO(0x9888), 0x21800000 },
1219 { _MMIO(0x9888), 0x31800000 },
1220 { _MMIO(0x9888), 0x3f800000 },
1221 { _MMIO(0x9888), 0x41800840 },
1222};
1223
1224static int
1225get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1226 const struct i915_oa_reg **regs,
1227 int *lens)
1228{
1229 int n = 0;
1230
1231 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1232 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1233
1234 regs[n] = mux_config_rasterizer_and_pixel_backend;
1235 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1236 n++;
1237
1238 return n;
1239}
1240
1241static const struct i915_oa_reg b_counter_config_sampler_1[] = {
1242 { _MMIO(0x2740), 0x00000000 },
1243 { _MMIO(0x2744), 0x00800000 },
1244 { _MMIO(0x2710), 0x00000000 },
1245 { _MMIO(0x2714), 0x70800000 },
1246 { _MMIO(0x2720), 0x00000000 },
1247 { _MMIO(0x2724), 0x00800000 },
1248 { _MMIO(0x2770), 0x0000c000 },
1249 { _MMIO(0x2774), 0x0000e7ff },
1250 { _MMIO(0x2778), 0x00003000 },
1251 { _MMIO(0x277c), 0x0000f9ff },
1252 { _MMIO(0x2780), 0x00000c00 },
1253 { _MMIO(0x2784), 0x0000fe7f },
1254};
1255
1256static const struct i915_oa_reg flex_eu_config_sampler_1[] = {
1257 { _MMIO(0xe458), 0x00005004 },
1258 { _MMIO(0xe558), 0x00010003 },
1259 { _MMIO(0xe658), 0x00012011 },
1260 { _MMIO(0xe758), 0x00015014 },
1261 { _MMIO(0xe45c), 0x00051050 },
1262 { _MMIO(0xe55c), 0x00053052 },
1263 { _MMIO(0xe65c), 0x00055054 },
1264};
1265
1266static const struct i915_oa_reg mux_config_sampler_1[] = {
1267 { _MMIO(0x9888), 0x18921400 },
1268 { _MMIO(0x9888), 0x149500ab },
1269 { _MMIO(0x9888), 0x18b21400 },
1270 { _MMIO(0x9888), 0x14b500ab },
1271 { _MMIO(0x9888), 0x18d21400 },
1272 { _MMIO(0x9888), 0x14d500ab },
1273 { _MMIO(0x9888), 0x0cdc8000 },
1274 { _MMIO(0x9888), 0x0edc4000 },
1275 { _MMIO(0x9888), 0x02dcc000 },
1276 { _MMIO(0x9888), 0x04dcc000 },
1277 { _MMIO(0x9888), 0x1abd00a0 },
1278 { _MMIO(0x9888), 0x0abd8000 },
1279 { _MMIO(0x9888), 0x0cd88000 },
1280 { _MMIO(0x9888), 0x0ed84000 },
1281 { _MMIO(0x9888), 0x04d88000 },
1282 { _MMIO(0x9888), 0x1adb0050 },
1283 { _MMIO(0x9888), 0x04db8000 },
1284 { _MMIO(0x9888), 0x06db8000 },
1285 { _MMIO(0x9888), 0x08db8000 },
1286 { _MMIO(0x9888), 0x0adb4000 },
1287 { _MMIO(0x9888), 0x109f02a0 },
1288 { _MMIO(0x9888), 0x0c9fa000 },
1289 { _MMIO(0x9888), 0x0e9f00aa },
1290 { _MMIO(0x9888), 0x18b82500 },
1291 { _MMIO(0x9888), 0x02b88000 },
1292 { _MMIO(0x9888), 0x04b84000 },
1293 { _MMIO(0x9888), 0x06b84000 },
1294 { _MMIO(0x9888), 0x08b84000 },
1295 { _MMIO(0x9888), 0x0ab84000 },
1296 { _MMIO(0x9888), 0x0cb88000 },
1297 { _MMIO(0x9888), 0x0cb98000 },
1298 { _MMIO(0x9888), 0x0eb9a000 },
1299 { _MMIO(0x9888), 0x00b98000 },
1300 { _MMIO(0x9888), 0x02b9a000 },
1301 { _MMIO(0x9888), 0x04b9a000 },
1302 { _MMIO(0x9888), 0x06b92000 },
1303 { _MMIO(0x9888), 0x1aba0200 },
1304 { _MMIO(0x9888), 0x02ba8000 },
1305 { _MMIO(0x9888), 0x0cba8000 },
1306 { _MMIO(0x9888), 0x04908000 },
1307 { _MMIO(0x9888), 0x04918000 },
1308 { _MMIO(0x9888), 0x04927300 },
1309 { _MMIO(0x9888), 0x10920000 },
1310 { _MMIO(0x9888), 0x1893000a },
1311 { _MMIO(0x9888), 0x0a934000 },
1312 { _MMIO(0x9888), 0x0a946000 },
1313 { _MMIO(0x9888), 0x0c959000 },
1314 { _MMIO(0x9888), 0x0e950098 },
1315 { _MMIO(0x9888), 0x10950000 },
1316 { _MMIO(0x9888), 0x04b04000 },
1317 { _MMIO(0x9888), 0x04b14000 },
1318 { _MMIO(0x9888), 0x04b20073 },
1319 { _MMIO(0x9888), 0x10b20000 },
1320 { _MMIO(0x9888), 0x04b38000 },
1321 { _MMIO(0x9888), 0x06b38000 },
1322 { _MMIO(0x9888), 0x08b34000 },
1323 { _MMIO(0x9888), 0x04b4c000 },
1324 { _MMIO(0x9888), 0x02b59890 },
1325 { _MMIO(0x9888), 0x10b50000 },
1326 { _MMIO(0x9888), 0x06d04000 },
1327 { _MMIO(0x9888), 0x06d14000 },
1328 { _MMIO(0x9888), 0x06d20073 },
1329 { _MMIO(0x9888), 0x10d20000 },
1330 { _MMIO(0x9888), 0x18d30020 },
1331 { _MMIO(0x9888), 0x02d38000 },
1332 { _MMIO(0x9888), 0x0cd34000 },
1333 { _MMIO(0x9888), 0x0ad48000 },
1334 { _MMIO(0x9888), 0x04d42000 },
1335 { _MMIO(0x9888), 0x0ed59000 },
1336 { _MMIO(0x9888), 0x00d59800 },
1337 { _MMIO(0x9888), 0x10d50000 },
1338 { _MMIO(0x9888), 0x0f88000e },
1339 { _MMIO(0x9888), 0x03888000 },
1340 { _MMIO(0x9888), 0x05888000 },
1341 { _MMIO(0x9888), 0x07888000 },
1342 { _MMIO(0x9888), 0x09888000 },
1343 { _MMIO(0x9888), 0x0b888000 },
1344 { _MMIO(0x9888), 0x0d880400 },
1345 { _MMIO(0x9888), 0x278b002a },
1346 { _MMIO(0x9888), 0x238b5500 },
1347 { _MMIO(0x9888), 0x258b000a },
1348 { _MMIO(0x9888), 0x1b8c0015 },
1349 { _MMIO(0x9888), 0x038c4000 },
1350 { _MMIO(0x9888), 0x058c4000 },
1351 { _MMIO(0x9888), 0x078c4000 },
1352 { _MMIO(0x9888), 0x098c4000 },
1353 { _MMIO(0x9888), 0x0b8c4000 },
1354 { _MMIO(0x9888), 0x0d8c4000 },
1355 { _MMIO(0x9888), 0x0d8d8000 },
1356 { _MMIO(0x9888), 0x0f8da000 },
1357 { _MMIO(0x9888), 0x018d8000 },
1358 { _MMIO(0x9888), 0x038da000 },
1359 { _MMIO(0x9888), 0x058da000 },
1360 { _MMIO(0x9888), 0x078d2000 },
1361 { _MMIO(0x9888), 0x2385002a },
1362 { _MMIO(0x9888), 0x1f85aa00 },
1363 { _MMIO(0x9888), 0x2185000a },
1364 { _MMIO(0x9888), 0x1b830150 },
1365 { _MMIO(0x9888), 0x03834000 },
1366 { _MMIO(0x9888), 0x05834000 },
1367 { _MMIO(0x9888), 0x07834000 },
1368 { _MMIO(0x9888), 0x09834000 },
1369 { _MMIO(0x9888), 0x0b834000 },
1370 { _MMIO(0x9888), 0x0d834000 },
1371 { _MMIO(0x9888), 0x0d848000 },
1372 { _MMIO(0x9888), 0x0f84c000 },
1373 { _MMIO(0x9888), 0x01848000 },
1374 { _MMIO(0x9888), 0x0384c000 },
1375 { _MMIO(0x9888), 0x0584c000 },
1376 { _MMIO(0x9888), 0x07844000 },
1377 { _MMIO(0x9888), 0x1d808000 },
1378 { _MMIO(0x9888), 0x1f80c000 },
1379 { _MMIO(0x9888), 0x11808000 },
1380 { _MMIO(0x9888), 0x1380c000 },
1381 { _MMIO(0x9888), 0x1580c000 },
1382 { _MMIO(0x9888), 0x17804000 },
1383 { _MMIO(0x9888), 0x53800000 },
1384 { _MMIO(0x9888), 0x47801021 },
1385 { _MMIO(0x9888), 0x21800000 },
1386 { _MMIO(0x9888), 0x31800000 },
1387 { _MMIO(0x9888), 0x4d800000 },
1388 { _MMIO(0x9888), 0x3f800c64 },
1389 { _MMIO(0x9888), 0x4f800000 },
1390 { _MMIO(0x9888), 0x41800c02 },
1391};
1392
1393static int
1394get_sampler_1_mux_config(struct drm_i915_private *dev_priv,
1395 const struct i915_oa_reg **regs,
1396 int *lens)
1397{
1398 int n = 0;
1399
1400 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1401 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1402
1403 regs[n] = mux_config_sampler_1;
1404 lens[n] = ARRAY_SIZE(mux_config_sampler_1);
1405 n++;
1406
1407 return n;
1408}
1409
1410static const struct i915_oa_reg b_counter_config_sampler_2[] = {
1411 { _MMIO(0x2740), 0x00000000 },
1412 { _MMIO(0x2744), 0x00800000 },
1413 { _MMIO(0x2710), 0x00000000 },
1414 { _MMIO(0x2714), 0x70800000 },
1415 { _MMIO(0x2720), 0x00000000 },
1416 { _MMIO(0x2724), 0x00800000 },
1417 { _MMIO(0x2770), 0x0000c000 },
1418 { _MMIO(0x2774), 0x0000e7ff },
1419 { _MMIO(0x2778), 0x00003000 },
1420 { _MMIO(0x277c), 0x0000f9ff },
1421 { _MMIO(0x2780), 0x00000c00 },
1422 { _MMIO(0x2784), 0x0000fe7f },
1423};
1424
1425static const struct i915_oa_reg flex_eu_config_sampler_2[] = {
1426 { _MMIO(0xe458), 0x00005004 },
1427 { _MMIO(0xe558), 0x00010003 },
1428 { _MMIO(0xe658), 0x00012011 },
1429 { _MMIO(0xe758), 0x00015014 },
1430 { _MMIO(0xe45c), 0x00051050 },
1431 { _MMIO(0xe55c), 0x00053052 },
1432 { _MMIO(0xe65c), 0x00055054 },
1433};
1434
1435static const struct i915_oa_reg mux_config_sampler_2[] = {
1436 { _MMIO(0x9888), 0x18121400 },
1437 { _MMIO(0x9888), 0x141500ab },
1438 { _MMIO(0x9888), 0x18321400 },
1439 { _MMIO(0x9888), 0x143500ab },
1440 { _MMIO(0x9888), 0x18521400 },
1441 { _MMIO(0x9888), 0x145500ab },
1442 { _MMIO(0x9888), 0x0c5c8000 },
1443 { _MMIO(0x9888), 0x0e5c4000 },
1444 { _MMIO(0x9888), 0x025cc000 },
1445 { _MMIO(0x9888), 0x045cc000 },
1446 { _MMIO(0x9888), 0x1a3d00a0 },
1447 { _MMIO(0x9888), 0x0a3d8000 },
1448 { _MMIO(0x9888), 0x0c588000 },
1449 { _MMIO(0x9888), 0x0e584000 },
1450 { _MMIO(0x9888), 0x04588000 },
1451 { _MMIO(0x9888), 0x1a5b0050 },
1452 { _MMIO(0x9888), 0x045b8000 },
1453 { _MMIO(0x9888), 0x065b8000 },
1454 { _MMIO(0x9888), 0x085b8000 },
1455 { _MMIO(0x9888), 0x0a5b4000 },
1456 { _MMIO(0x9888), 0x101f02a0 },
1457 { _MMIO(0x9888), 0x0c1fa000 },
1458 { _MMIO(0x9888), 0x0e1f00aa },
1459 { _MMIO(0x9888), 0x18382500 },
1460 { _MMIO(0x9888), 0x02388000 },
1461 { _MMIO(0x9888), 0x04384000 },
1462 { _MMIO(0x9888), 0x06384000 },
1463 { _MMIO(0x9888), 0x08384000 },
1464 { _MMIO(0x9888), 0x0a384000 },
1465 { _MMIO(0x9888), 0x0c388000 },
1466 { _MMIO(0x9888), 0x0c398000 },
1467 { _MMIO(0x9888), 0x0e39a000 },
1468 { _MMIO(0x9888), 0x00398000 },
1469 { _MMIO(0x9888), 0x0239a000 },
1470 { _MMIO(0x9888), 0x0439a000 },
1471 { _MMIO(0x9888), 0x06392000 },
1472 { _MMIO(0x9888), 0x1a3a0200 },
1473 { _MMIO(0x9888), 0x023a8000 },
1474 { _MMIO(0x9888), 0x0c3a8000 },
1475 { _MMIO(0x9888), 0x04108000 },
1476 { _MMIO(0x9888), 0x04118000 },
1477 { _MMIO(0x9888), 0x04127300 },
1478 { _MMIO(0x9888), 0x10120000 },
1479 { _MMIO(0x9888), 0x1813000a },
1480 { _MMIO(0x9888), 0x0a134000 },
1481 { _MMIO(0x9888), 0x0a146000 },
1482 { _MMIO(0x9888), 0x0c159000 },
1483 { _MMIO(0x9888), 0x0e150098 },
1484 { _MMIO(0x9888), 0x10150000 },
1485 { _MMIO(0x9888), 0x04304000 },
1486 { _MMIO(0x9888), 0x04314000 },
1487 { _MMIO(0x9888), 0x04320073 },
1488 { _MMIO(0x9888), 0x10320000 },
1489 { _MMIO(0x9888), 0x04338000 },
1490 { _MMIO(0x9888), 0x06338000 },
1491 { _MMIO(0x9888), 0x08334000 },
1492 { _MMIO(0x9888), 0x0434c000 },
1493 { _MMIO(0x9888), 0x02359890 },
1494 { _MMIO(0x9888), 0x10350000 },
1495 { _MMIO(0x9888), 0x06504000 },
1496 { _MMIO(0x9888), 0x06514000 },
1497 { _MMIO(0x9888), 0x06520073 },
1498 { _MMIO(0x9888), 0x10520000 },
1499 { _MMIO(0x9888), 0x18530020 },
1500 { _MMIO(0x9888), 0x02538000 },
1501 { _MMIO(0x9888), 0x0c534000 },
1502 { _MMIO(0x9888), 0x0a548000 },
1503 { _MMIO(0x9888), 0x04542000 },
1504 { _MMIO(0x9888), 0x0e559000 },
1505 { _MMIO(0x9888), 0x00559800 },
1506 { _MMIO(0x9888), 0x10550000 },
1507 { _MMIO(0x9888), 0x1b8aa000 },
1508 { _MMIO(0x9888), 0x1d8a0002 },
1509 { _MMIO(0x9888), 0x038a8000 },
1510 { _MMIO(0x9888), 0x058a8000 },
1511 { _MMIO(0x9888), 0x078a8000 },
1512 { _MMIO(0x9888), 0x098a8000 },
1513 { _MMIO(0x9888), 0x0b8a8000 },
1514 { _MMIO(0x9888), 0x0d8a8000 },
1515 { _MMIO(0x9888), 0x278b0015 },
1516 { _MMIO(0x9888), 0x238b2a80 },
1517 { _MMIO(0x9888), 0x258b0005 },
1518 { _MMIO(0x9888), 0x2385002a },
1519 { _MMIO(0x9888), 0x1f85aa00 },
1520 { _MMIO(0x9888), 0x2185000a },
1521 { _MMIO(0x9888), 0x1b830150 },
1522 { _MMIO(0x9888), 0x03834000 },
1523 { _MMIO(0x9888), 0x05834000 },
1524 { _MMIO(0x9888), 0x07834000 },
1525 { _MMIO(0x9888), 0x09834000 },
1526 { _MMIO(0x9888), 0x0b834000 },
1527 { _MMIO(0x9888), 0x0d834000 },
1528 { _MMIO(0x9888), 0x0d848000 },
1529 { _MMIO(0x9888), 0x0f84c000 },
1530 { _MMIO(0x9888), 0x01848000 },
1531 { _MMIO(0x9888), 0x0384c000 },
1532 { _MMIO(0x9888), 0x0584c000 },
1533 { _MMIO(0x9888), 0x07844000 },
1534 { _MMIO(0x9888), 0x1d808000 },
1535 { _MMIO(0x9888), 0x1f80c000 },
1536 { _MMIO(0x9888), 0x11808000 },
1537 { _MMIO(0x9888), 0x1380c000 },
1538 { _MMIO(0x9888), 0x1580c000 },
1539 { _MMIO(0x9888), 0x17804000 },
1540 { _MMIO(0x9888), 0x53800000 },
1541 { _MMIO(0x9888), 0x47801021 },
1542 { _MMIO(0x9888), 0x21800000 },
1543 { _MMIO(0x9888), 0x31800000 },
1544 { _MMIO(0x9888), 0x4d800000 },
1545 { _MMIO(0x9888), 0x3f800c64 },
1546 { _MMIO(0x9888), 0x4f800000 },
1547 { _MMIO(0x9888), 0x41800c02 },
1548};
1549
1550static int
1551get_sampler_2_mux_config(struct drm_i915_private *dev_priv,
1552 const struct i915_oa_reg **regs,
1553 int *lens)
1554{
1555 int n = 0;
1556
1557 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1558 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1559
1560 regs[n] = mux_config_sampler_2;
1561 lens[n] = ARRAY_SIZE(mux_config_sampler_2);
1562 n++;
1563
1564 return n;
1565}
1566
1567static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1568 { _MMIO(0x2740), 0x00000000 },
1569 { _MMIO(0x2744), 0x00800000 },
1570 { _MMIO(0x2710), 0x00000000 },
1571 { _MMIO(0x2714), 0xf0800000 },
1572 { _MMIO(0x2720), 0x00000000 },
1573 { _MMIO(0x2724), 0x30800000 },
1574 { _MMIO(0x2770), 0x00000002 },
1575 { _MMIO(0x2774), 0x0000fdff },
1576 { _MMIO(0x2778), 0x00000000 },
1577 { _MMIO(0x277c), 0x0000fe7f },
1578 { _MMIO(0x2780), 0x00000002 },
1579 { _MMIO(0x2784), 0x0000ffbf },
1580 { _MMIO(0x2788), 0x00000000 },
1581 { _MMIO(0x278c), 0x0000ffcf },
1582 { _MMIO(0x2790), 0x00000002 },
1583 { _MMIO(0x2794), 0x0000fff7 },
1584 { _MMIO(0x2798), 0x00000000 },
1585 { _MMIO(0x279c), 0x0000fff9 },
1586};
1587
1588static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1589 { _MMIO(0xe458), 0x00005004 },
1590 { _MMIO(0xe558), 0x00010003 },
1591 { _MMIO(0xe658), 0x00012011 },
1592 { _MMIO(0xe758), 0x00015014 },
1593 { _MMIO(0xe45c), 0x00051050 },
1594 { _MMIO(0xe55c), 0x00053052 },
1595 { _MMIO(0xe65c), 0x00055054 },
1596};
1597
1598static const struct i915_oa_reg mux_config_tdl_1[] = {
1599 { _MMIO(0x9888), 0x16154d60 },
1600 { _MMIO(0x9888), 0x16352e60 },
1601 { _MMIO(0x9888), 0x16554d60 },
1602 { _MMIO(0x9888), 0x16950000 },
1603 { _MMIO(0x9888), 0x16b50000 },
1604 { _MMIO(0x9888), 0x16d50000 },
1605 { _MMIO(0x9888), 0x005c8000 },
1606 { _MMIO(0x9888), 0x045cc000 },
1607 { _MMIO(0x9888), 0x065c4000 },
1608 { _MMIO(0x9888), 0x083d8000 },
1609 { _MMIO(0x9888), 0x0a3d8000 },
1610 { _MMIO(0x9888), 0x0458c000 },
1611 { _MMIO(0x9888), 0x025b8000 },
1612 { _MMIO(0x9888), 0x085b4000 },
1613 { _MMIO(0x9888), 0x0a5b4000 },
1614 { _MMIO(0x9888), 0x0c5b8000 },
1615 { _MMIO(0x9888), 0x0c1fa000 },
1616 { _MMIO(0x9888), 0x0e1f00aa },
1617 { _MMIO(0x9888), 0x02384000 },
1618 { _MMIO(0x9888), 0x04388000 },
1619 { _MMIO(0x9888), 0x06388000 },
1620 { _MMIO(0x9888), 0x08384000 },
1621 { _MMIO(0x9888), 0x0a384000 },
1622 { _MMIO(0x9888), 0x0c384000 },
1623 { _MMIO(0x9888), 0x00398000 },
1624 { _MMIO(0x9888), 0x0239a000 },
1625 { _MMIO(0x9888), 0x0439a000 },
1626 { _MMIO(0x9888), 0x06392000 },
1627 { _MMIO(0x9888), 0x043a8000 },
1628 { _MMIO(0x9888), 0x063a8000 },
1629 { _MMIO(0x9888), 0x08138000 },
1630 { _MMIO(0x9888), 0x0a138000 },
1631 { _MMIO(0x9888), 0x06143000 },
1632 { _MMIO(0x9888), 0x0415cfc7 },
1633 { _MMIO(0x9888), 0x10150000 },
1634 { _MMIO(0x9888), 0x02338000 },
1635 { _MMIO(0x9888), 0x0c338000 },
1636 { _MMIO(0x9888), 0x04342000 },
1637 { _MMIO(0x9888), 0x06344000 },
1638 { _MMIO(0x9888), 0x0035c700 },
1639 { _MMIO(0x9888), 0x063500cf },
1640 { _MMIO(0x9888), 0x10350000 },
1641 { _MMIO(0x9888), 0x04538000 },
1642 { _MMIO(0x9888), 0x06538000 },
1643 { _MMIO(0x9888), 0x0454c000 },
1644 { _MMIO(0x9888), 0x0255cfc7 },
1645 { _MMIO(0x9888), 0x10550000 },
1646 { _MMIO(0x9888), 0x06dc8000 },
1647 { _MMIO(0x9888), 0x08dc4000 },
1648 { _MMIO(0x9888), 0x0cdcc000 },
1649 { _MMIO(0x9888), 0x0edcc000 },
1650 { _MMIO(0x9888), 0x1abd00a8 },
1651 { _MMIO(0x9888), 0x0cd8c000 },
1652 { _MMIO(0x9888), 0x0ed84000 },
1653 { _MMIO(0x9888), 0x0edb8000 },
1654 { _MMIO(0x9888), 0x18db0800 },
1655 { _MMIO(0x9888), 0x1adb0254 },
1656 { _MMIO(0x9888), 0x0e9faa00 },
1657 { _MMIO(0x9888), 0x109f02aa },
1658 { _MMIO(0x9888), 0x0eb84000 },
1659 { _MMIO(0x9888), 0x16b84000 },
1660 { _MMIO(0x9888), 0x18b8156a },
1661 { _MMIO(0x9888), 0x06b98000 },
1662 { _MMIO(0x9888), 0x08b9a000 },
1663 { _MMIO(0x9888), 0x0ab9a000 },
1664 { _MMIO(0x9888), 0x0cb9a000 },
1665 { _MMIO(0x9888), 0x0eb9a000 },
1666 { _MMIO(0x9888), 0x18baa000 },
1667 { _MMIO(0x9888), 0x1aba0002 },
1668 { _MMIO(0x9888), 0x16934000 },
1669 { _MMIO(0x9888), 0x1893000a },
1670 { _MMIO(0x9888), 0x0a947000 },
1671 { _MMIO(0x9888), 0x0c95c5c1 },
1672 { _MMIO(0x9888), 0x0e9500c3 },
1673 { _MMIO(0x9888), 0x10950000 },
1674 { _MMIO(0x9888), 0x0eb38000 },
1675 { _MMIO(0x9888), 0x16b30040 },
1676 { _MMIO(0x9888), 0x18b30020 },
1677 { _MMIO(0x9888), 0x06b48000 },
1678 { _MMIO(0x9888), 0x08b41000 },
1679 { _MMIO(0x9888), 0x0ab48000 },
1680 { _MMIO(0x9888), 0x06b5c500 },
1681 { _MMIO(0x9888), 0x08b500c3 },
1682 { _MMIO(0x9888), 0x0eb5c100 },
1683 { _MMIO(0x9888), 0x10b50000 },
1684 { _MMIO(0x9888), 0x16d31500 },
1685 { _MMIO(0x9888), 0x08d4e000 },
1686 { _MMIO(0x9888), 0x08d5c100 },
1687 { _MMIO(0x9888), 0x0ad5c3c5 },
1688 { _MMIO(0x9888), 0x10d50000 },
1689 { _MMIO(0x9888), 0x0d88f800 },
1690 { _MMIO(0x9888), 0x0f88000f },
1691 { _MMIO(0x9888), 0x038a8000 },
1692 { _MMIO(0x9888), 0x058a8000 },
1693 { _MMIO(0x9888), 0x078a8000 },
1694 { _MMIO(0x9888), 0x098a8000 },
1695 { _MMIO(0x9888), 0x0b8a8000 },
1696 { _MMIO(0x9888), 0x0d8a8000 },
1697 { _MMIO(0x9888), 0x258baaa5 },
1698 { _MMIO(0x9888), 0x278b002a },
1699 { _MMIO(0x9888), 0x238b2a80 },
1700 { _MMIO(0x9888), 0x0f8c4000 },
1701 { _MMIO(0x9888), 0x178c2000 },
1702 { _MMIO(0x9888), 0x198c5500 },
1703 { _MMIO(0x9888), 0x1b8c0015 },
1704 { _MMIO(0x9888), 0x078d8000 },
1705 { _MMIO(0x9888), 0x098da000 },
1706 { _MMIO(0x9888), 0x0b8da000 },
1707 { _MMIO(0x9888), 0x0d8da000 },
1708 { _MMIO(0x9888), 0x0f8da000 },
1709 { _MMIO(0x9888), 0x2185aaaa },
1710 { _MMIO(0x9888), 0x2385002a },
1711 { _MMIO(0x9888), 0x1f85aa00 },
1712 { _MMIO(0x9888), 0x0f834000 },
1713 { _MMIO(0x9888), 0x19835400 },
1714 { _MMIO(0x9888), 0x1b830155 },
1715 { _MMIO(0x9888), 0x03834000 },
1716 { _MMIO(0x9888), 0x05834000 },
1717 { _MMIO(0x9888), 0x07834000 },
1718 { _MMIO(0x9888), 0x09834000 },
1719 { _MMIO(0x9888), 0x0b834000 },
1720 { _MMIO(0x9888), 0x0d834000 },
1721 { _MMIO(0x9888), 0x0784c000 },
1722 { _MMIO(0x9888), 0x0984c000 },
1723 { _MMIO(0x9888), 0x0b84c000 },
1724 { _MMIO(0x9888), 0x0d84c000 },
1725 { _MMIO(0x9888), 0x0f84c000 },
1726 { _MMIO(0x9888), 0x01848000 },
1727 { _MMIO(0x9888), 0x0384c000 },
1728 { _MMIO(0x9888), 0x0584c000 },
1729 { _MMIO(0x9888), 0x1780c000 },
1730 { _MMIO(0x9888), 0x1980c000 },
1731 { _MMIO(0x9888), 0x1b80c000 },
1732 { _MMIO(0x9888), 0x1d80c000 },
1733 { _MMIO(0x9888), 0x1f80c000 },
1734 { _MMIO(0x9888), 0x11808000 },
1735 { _MMIO(0x9888), 0x1380c000 },
1736 { _MMIO(0x9888), 0x1580c000 },
1737 { _MMIO(0x9888), 0x4f800000 },
1738 { _MMIO(0x9888), 0x43800c42 },
1739 { _MMIO(0x9888), 0x51800000 },
1740 { _MMIO(0x9888), 0x45800063 },
1741 { _MMIO(0x9888), 0x53800000 },
1742 { _MMIO(0x9888), 0x47800800 },
1743 { _MMIO(0x9888), 0x21800000 },
1744 { _MMIO(0x9888), 0x31800000 },
1745 { _MMIO(0x9888), 0x4d800000 },
1746 { _MMIO(0x9888), 0x3f8014a4 },
1747 { _MMIO(0x9888), 0x41801042 },
1748};
1749
1750static int
1751get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1752 const struct i915_oa_reg **regs,
1753 int *lens)
1754{
1755 int n = 0;
1756
1757 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1758 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1759
1760 regs[n] = mux_config_tdl_1;
1761 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1762 n++;
1763
1764 return n;
1765}
1766
1767static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1768 { _MMIO(0x2740), 0x00000000 },
1769 { _MMIO(0x2744), 0x00800000 },
1770 { _MMIO(0x2710), 0x00000000 },
1771 { _MMIO(0x2714), 0xf0800000 },
1772 { _MMIO(0x2720), 0x00000000 },
1773 { _MMIO(0x2724), 0x30800000 },
1774 { _MMIO(0x2770), 0x00000002 },
1775 { _MMIO(0x2774), 0x0000fdff },
1776 { _MMIO(0x2778), 0x00000000 },
1777 { _MMIO(0x277c), 0x0000fe7f },
1778 { _MMIO(0x2780), 0x00000000 },
1779 { _MMIO(0x2784), 0x0000ff9f },
1780 { _MMIO(0x2788), 0x00000000 },
1781 { _MMIO(0x278c), 0x0000ffe7 },
1782 { _MMIO(0x2790), 0x00000002 },
1783 { _MMIO(0x2794), 0x0000fffb },
1784 { _MMIO(0x2798), 0x00000002 },
1785 { _MMIO(0x279c), 0x0000fffd },
1786};
1787
1788static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1789 { _MMIO(0xe458), 0x00005004 },
1790 { _MMIO(0xe558), 0x00010003 },
1791 { _MMIO(0xe658), 0x00012011 },
1792 { _MMIO(0xe758), 0x00015014 },
1793 { _MMIO(0xe45c), 0x00051050 },
1794 { _MMIO(0xe55c), 0x00053052 },
1795 { _MMIO(0xe65c), 0x00055054 },
1796};
1797
1798static const struct i915_oa_reg mux_config_tdl_2[] = {
1799 { _MMIO(0x9888), 0x16150000 },
1800 { _MMIO(0x9888), 0x16350000 },
1801 { _MMIO(0x9888), 0x16550000 },
1802 { _MMIO(0x9888), 0x16952e60 },
1803 { _MMIO(0x9888), 0x16b54d60 },
1804 { _MMIO(0x9888), 0x16d52e60 },
1805 { _MMIO(0x9888), 0x065c8000 },
1806 { _MMIO(0x9888), 0x085cc000 },
1807 { _MMIO(0x9888), 0x0a5cc000 },
1808 { _MMIO(0x9888), 0x0c5c4000 },
1809 { _MMIO(0x9888), 0x0e3d8000 },
1810 { _MMIO(0x9888), 0x183da000 },
1811 { _MMIO(0x9888), 0x06588000 },
1812 { _MMIO(0x9888), 0x08588000 },
1813 { _MMIO(0x9888), 0x0a584000 },
1814 { _MMIO(0x9888), 0x0e5b4000 },
1815 { _MMIO(0x9888), 0x185b5800 },
1816 { _MMIO(0x9888), 0x1a5b000a },
1817 { _MMIO(0x9888), 0x0e1faa00 },
1818 { _MMIO(0x9888), 0x101f02aa },
1819 { _MMIO(0x9888), 0x0e384000 },
1820 { _MMIO(0x9888), 0x16384000 },
1821 { _MMIO(0x9888), 0x18382a55 },
1822 { _MMIO(0x9888), 0x06398000 },
1823 { _MMIO(0x9888), 0x0839a000 },
1824 { _MMIO(0x9888), 0x0a39a000 },
1825 { _MMIO(0x9888), 0x0c39a000 },
1826 { _MMIO(0x9888), 0x0e39a000 },
1827 { _MMIO(0x9888), 0x1a3a02a0 },
1828 { _MMIO(0x9888), 0x0e138000 },
1829 { _MMIO(0x9888), 0x16130500 },
1830 { _MMIO(0x9888), 0x06148000 },
1831 { _MMIO(0x9888), 0x08146000 },
1832 { _MMIO(0x9888), 0x0615c100 },
1833 { _MMIO(0x9888), 0x0815c500 },
1834 { _MMIO(0x9888), 0x0a1500c3 },
1835 { _MMIO(0x9888), 0x10150000 },
1836 { _MMIO(0x9888), 0x16335040 },
1837 { _MMIO(0x9888), 0x08349000 },
1838 { _MMIO(0x9888), 0x0a341000 },
1839 { _MMIO(0x9888), 0x083500c1 },
1840 { _MMIO(0x9888), 0x0a35c500 },
1841 { _MMIO(0x9888), 0x0c3500c3 },
1842 { _MMIO(0x9888), 0x10350000 },
1843 { _MMIO(0x9888), 0x1853002a },
1844 { _MMIO(0x9888), 0x0a54e000 },
1845 { _MMIO(0x9888), 0x0c55c500 },
1846 { _MMIO(0x9888), 0x0e55c1c3 },
1847 { _MMIO(0x9888), 0x10550000 },
1848 { _MMIO(0x9888), 0x00dc8000 },
1849 { _MMIO(0x9888), 0x02dcc000 },
1850 { _MMIO(0x9888), 0x04dc4000 },
1851 { _MMIO(0x9888), 0x04bd8000 },
1852 { _MMIO(0x9888), 0x06bd8000 },
1853 { _MMIO(0x9888), 0x02d8c000 },
1854 { _MMIO(0x9888), 0x02db8000 },
1855 { _MMIO(0x9888), 0x04db4000 },
1856 { _MMIO(0x9888), 0x06db4000 },
1857 { _MMIO(0x9888), 0x08db8000 },
1858 { _MMIO(0x9888), 0x0c9fa000 },
1859 { _MMIO(0x9888), 0x0e9f00aa },
1860 { _MMIO(0x9888), 0x02b84000 },
1861 { _MMIO(0x9888), 0x04b84000 },
1862 { _MMIO(0x9888), 0x06b84000 },
1863 { _MMIO(0x9888), 0x08b84000 },
1864 { _MMIO(0x9888), 0x0ab88000 },
1865 { _MMIO(0x9888), 0x0cb88000 },
1866 { _MMIO(0x9888), 0x00b98000 },
1867 { _MMIO(0x9888), 0x02b9a000 },
1868 { _MMIO(0x9888), 0x04b9a000 },
1869 { _MMIO(0x9888), 0x06b92000 },
1870 { _MMIO(0x9888), 0x0aba8000 },
1871 { _MMIO(0x9888), 0x0cba8000 },
1872 { _MMIO(0x9888), 0x04938000 },
1873 { _MMIO(0x9888), 0x06938000 },
1874 { _MMIO(0x9888), 0x0494c000 },
1875 { _MMIO(0x9888), 0x0295cfc7 },
1876 { _MMIO(0x9888), 0x10950000 },
1877 { _MMIO(0x9888), 0x02b38000 },
1878 { _MMIO(0x9888), 0x08b38000 },
1879 { _MMIO(0x9888), 0x04b42000 },
1880 { _MMIO(0x9888), 0x06b41000 },
1881 { _MMIO(0x9888), 0x00b5c700 },
1882 { _MMIO(0x9888), 0x04b500cf },
1883 { _MMIO(0x9888), 0x10b50000 },
1884 { _MMIO(0x9888), 0x0ad38000 },
1885 { _MMIO(0x9888), 0x0cd38000 },
1886 { _MMIO(0x9888), 0x06d46000 },
1887 { _MMIO(0x9888), 0x04d5c700 },
1888 { _MMIO(0x9888), 0x06d500cf },
1889 { _MMIO(0x9888), 0x10d50000 },
1890 { _MMIO(0x9888), 0x03888000 },
1891 { _MMIO(0x9888), 0x05888000 },
1892 { _MMIO(0x9888), 0x07888000 },
1893 { _MMIO(0x9888), 0x09888000 },
1894 { _MMIO(0x9888), 0x0b888000 },
1895 { _MMIO(0x9888), 0x0d880400 },
1896 { _MMIO(0x9888), 0x0f8a8000 },
1897 { _MMIO(0x9888), 0x198a8000 },
1898 { _MMIO(0x9888), 0x1b8aaaa0 },
1899 { _MMIO(0x9888), 0x1d8a0002 },
1900 { _MMIO(0x9888), 0x258b555a },
1901 { _MMIO(0x9888), 0x278b0015 },
1902 { _MMIO(0x9888), 0x238b5500 },
1903 { _MMIO(0x9888), 0x038c4000 },
1904 { _MMIO(0x9888), 0x058c4000 },
1905 { _MMIO(0x9888), 0x078c4000 },
1906 { _MMIO(0x9888), 0x098c4000 },
1907 { _MMIO(0x9888), 0x0b8c4000 },
1908 { _MMIO(0x9888), 0x0d8c4000 },
1909 { _MMIO(0x9888), 0x018d8000 },
1910 { _MMIO(0x9888), 0x038da000 },
1911 { _MMIO(0x9888), 0x058da000 },
1912 { _MMIO(0x9888), 0x078d2000 },
1913 { _MMIO(0x9888), 0x2185aaaa },
1914 { _MMIO(0x9888), 0x2385002a },
1915 { _MMIO(0x9888), 0x1f85aa00 },
1916 { _MMIO(0x9888), 0x0f834000 },
1917 { _MMIO(0x9888), 0x19835400 },
1918 { _MMIO(0x9888), 0x1b830155 },
1919 { _MMIO(0x9888), 0x03834000 },
1920 { _MMIO(0x9888), 0x05834000 },
1921 { _MMIO(0x9888), 0x07834000 },
1922 { _MMIO(0x9888), 0x09834000 },
1923 { _MMIO(0x9888), 0x0b834000 },
1924 { _MMIO(0x9888), 0x0d834000 },
1925 { _MMIO(0x9888), 0x0784c000 },
1926 { _MMIO(0x9888), 0x0984c000 },
1927 { _MMIO(0x9888), 0x0b84c000 },
1928 { _MMIO(0x9888), 0x0d84c000 },
1929 { _MMIO(0x9888), 0x0f84c000 },
1930 { _MMIO(0x9888), 0x01848000 },
1931 { _MMIO(0x9888), 0x0384c000 },
1932 { _MMIO(0x9888), 0x0584c000 },
1933 { _MMIO(0x9888), 0x1780c000 },
1934 { _MMIO(0x9888), 0x1980c000 },
1935 { _MMIO(0x9888), 0x1b80c000 },
1936 { _MMIO(0x9888), 0x1d80c000 },
1937 { _MMIO(0x9888), 0x1f80c000 },
1938 { _MMIO(0x9888), 0x11808000 },
1939 { _MMIO(0x9888), 0x1380c000 },
1940 { _MMIO(0x9888), 0x1580c000 },
1941 { _MMIO(0x9888), 0x4f800000 },
1942 { _MMIO(0x9888), 0x43800882 },
1943 { _MMIO(0x9888), 0x51800000 },
1944 { _MMIO(0x9888), 0x45801082 },
1945 { _MMIO(0x9888), 0x53800000 },
1946 { _MMIO(0x9888), 0x478014a5 },
1947 { _MMIO(0x9888), 0x21800000 },
1948 { _MMIO(0x9888), 0x31800000 },
1949 { _MMIO(0x9888), 0x4d800000 },
1950 { _MMIO(0x9888), 0x3f800002 },
1951 { _MMIO(0x9888), 0x41800c62 },
1952};
1953
1954static int
1955get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1956 const struct i915_oa_reg **regs,
1957 int *lens)
1958{
1959 int n = 0;
1960
1961 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1962 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1963
1964 regs[n] = mux_config_tdl_2;
1965 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1966 n++;
1967
1968 return n;
1969}
1970
1971static const struct i915_oa_reg b_counter_config_test_oa[] = {
1972 { _MMIO(0x2740), 0x00000000 },
1973 { _MMIO(0x2744), 0x00800000 },
1974 { _MMIO(0x2714), 0xf0800000 },
1975 { _MMIO(0x2710), 0x00000000 },
1976 { _MMIO(0x2724), 0xf0800000 },
1977 { _MMIO(0x2720), 0x00000000 },
1978 { _MMIO(0x2770), 0x00000004 },
1979 { _MMIO(0x2774), 0x00000000 },
1980 { _MMIO(0x2778), 0x00000003 },
1981 { _MMIO(0x277c), 0x00000000 },
1982 { _MMIO(0x2780), 0x00000007 },
1983 { _MMIO(0x2784), 0x00000000 },
1984 { _MMIO(0x2788), 0x00100002 },
1985 { _MMIO(0x278c), 0x0000fff7 },
1986 { _MMIO(0x2790), 0x00100002 },
1987 { _MMIO(0x2794), 0x0000ffcf },
1988 { _MMIO(0x2798), 0x00100082 },
1989 { _MMIO(0x279c), 0x0000ffef },
1990 { _MMIO(0x27a0), 0x001000c2 },
1991 { _MMIO(0x27a4), 0x0000ffe7 },
1992 { _MMIO(0x27a8), 0x00100001 },
1993 { _MMIO(0x27ac), 0x0000ffe7 },
1994};
1995
1996static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1997};
1998
1999static const struct i915_oa_reg mux_config_test_oa[] = {
2000 { _MMIO(0x9888), 0x59800000 },
2001 { _MMIO(0x9888), 0x59800001 },
2002 { _MMIO(0x9888), 0x338b0000 },
2003 { _MMIO(0x9888), 0x258b0066 },
2004 { _MMIO(0x9888), 0x058b0000 },
2005 { _MMIO(0x9888), 0x038b0000 },
2006 { _MMIO(0x9888), 0x03844000 },
2007 { _MMIO(0x9888), 0x47800080 },
2008 { _MMIO(0x9888), 0x57800000 },
2009 { _MMIO(0x1823a4), 0x00000000 },
2010 { _MMIO(0x9888), 0x59800000 },
2011};
2012
2013static int
2014get_test_oa_mux_config(struct drm_i915_private *dev_priv,
2015 const struct i915_oa_reg **regs,
2016 int *lens)
2017{
2018 int n = 0;
2019
2020 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2021 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2022
2023 regs[n] = mux_config_test_oa;
2024 lens[n] = ARRAY_SIZE(mux_config_test_oa);
2025 n++;
2026
2027 return n;
2028}
2029
2030int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv)
2031{
2032 dev_priv->perf.oa.n_mux_configs = 0;
2033 dev_priv->perf.oa.b_counter_regs = NULL;
2034 dev_priv->perf.oa.b_counter_regs_len = 0;
2035 dev_priv->perf.oa.flex_regs = NULL;
2036 dev_priv->perf.oa.flex_regs_len = 0;
2037
2038 switch (dev_priv->perf.oa.metrics_set) {
2039 case METRIC_SET_ID_RENDER_BASIC:
2040 dev_priv->perf.oa.n_mux_configs =
2041 get_render_basic_mux_config(dev_priv,
2042 dev_priv->perf.oa.mux_regs,
2043 dev_priv->perf.oa.mux_regs_lens);
2044 if (dev_priv->perf.oa.n_mux_configs == 0) {
2045 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
2046
2047 /* EINVAL because *_register_sysfs already checked this
2048 * and so it wouldn't have been advertised to userspace and
2049 * so shouldn't have been requested
2050 */
2051 return -EINVAL;
2052 }
2053
2054 dev_priv->perf.oa.b_counter_regs =
2055 b_counter_config_render_basic;
2056 dev_priv->perf.oa.b_counter_regs_len =
2057 ARRAY_SIZE(b_counter_config_render_basic);
2058
2059 dev_priv->perf.oa.flex_regs =
2060 flex_eu_config_render_basic;
2061 dev_priv->perf.oa.flex_regs_len =
2062 ARRAY_SIZE(flex_eu_config_render_basic);
2063
2064 return 0;
2065 case METRIC_SET_ID_COMPUTE_BASIC:
2066 dev_priv->perf.oa.n_mux_configs =
2067 get_compute_basic_mux_config(dev_priv,
2068 dev_priv->perf.oa.mux_regs,
2069 dev_priv->perf.oa.mux_regs_lens);
2070 if (dev_priv->perf.oa.n_mux_configs == 0) {
2071 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
2072
2073 /* EINVAL because *_register_sysfs already checked this
2074 * and so it wouldn't have been advertised to userspace and
2075 * so shouldn't have been requested
2076 */
2077 return -EINVAL;
2078 }
2079
2080 dev_priv->perf.oa.b_counter_regs =
2081 b_counter_config_compute_basic;
2082 dev_priv->perf.oa.b_counter_regs_len =
2083 ARRAY_SIZE(b_counter_config_compute_basic);
2084
2085 dev_priv->perf.oa.flex_regs =
2086 flex_eu_config_compute_basic;
2087 dev_priv->perf.oa.flex_regs_len =
2088 ARRAY_SIZE(flex_eu_config_compute_basic);
2089
2090 return 0;
2091 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
2092 dev_priv->perf.oa.n_mux_configs =
2093 get_render_pipe_profile_mux_config(dev_priv,
2094 dev_priv->perf.oa.mux_regs,
2095 dev_priv->perf.oa.mux_regs_lens);
2096 if (dev_priv->perf.oa.n_mux_configs == 0) {
2097 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
2098
2099 /* EINVAL because *_register_sysfs already checked this
2100 * and so it wouldn't have been advertised to userspace and
2101 * so shouldn't have been requested
2102 */
2103 return -EINVAL;
2104 }
2105
2106 dev_priv->perf.oa.b_counter_regs =
2107 b_counter_config_render_pipe_profile;
2108 dev_priv->perf.oa.b_counter_regs_len =
2109 ARRAY_SIZE(b_counter_config_render_pipe_profile);
2110
2111 dev_priv->perf.oa.flex_regs =
2112 flex_eu_config_render_pipe_profile;
2113 dev_priv->perf.oa.flex_regs_len =
2114 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
2115
2116 return 0;
2117 case METRIC_SET_ID_HDC_AND_SF:
2118 dev_priv->perf.oa.n_mux_configs =
2119 get_hdc_and_sf_mux_config(dev_priv,
2120 dev_priv->perf.oa.mux_regs,
2121 dev_priv->perf.oa.mux_regs_lens);
2122 if (dev_priv->perf.oa.n_mux_configs == 0) {
2123 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
2124
2125 /* EINVAL because *_register_sysfs already checked this
2126 * and so it wouldn't have been advertised to userspace and
2127 * so shouldn't have been requested
2128 */
2129 return -EINVAL;
2130 }
2131
2132 dev_priv->perf.oa.b_counter_regs =
2133 b_counter_config_hdc_and_sf;
2134 dev_priv->perf.oa.b_counter_regs_len =
2135 ARRAY_SIZE(b_counter_config_hdc_and_sf);
2136
2137 dev_priv->perf.oa.flex_regs =
2138 flex_eu_config_hdc_and_sf;
2139 dev_priv->perf.oa.flex_regs_len =
2140 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2141
2142 return 0;
2143 case METRIC_SET_ID_L3_1:
2144 dev_priv->perf.oa.n_mux_configs =
2145 get_l3_1_mux_config(dev_priv,
2146 dev_priv->perf.oa.mux_regs,
2147 dev_priv->perf.oa.mux_regs_lens);
2148 if (dev_priv->perf.oa.n_mux_configs == 0) {
2149 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2150
2151 /* EINVAL because *_register_sysfs already checked this
2152 * and so it wouldn't have been advertised to userspace and
2153 * so shouldn't have been requested
2154 */
2155 return -EINVAL;
2156 }
2157
2158 dev_priv->perf.oa.b_counter_regs =
2159 b_counter_config_l3_1;
2160 dev_priv->perf.oa.b_counter_regs_len =
2161 ARRAY_SIZE(b_counter_config_l3_1);
2162
2163 dev_priv->perf.oa.flex_regs =
2164 flex_eu_config_l3_1;
2165 dev_priv->perf.oa.flex_regs_len =
2166 ARRAY_SIZE(flex_eu_config_l3_1);
2167
2168 return 0;
2169 case METRIC_SET_ID_L3_2:
2170 dev_priv->perf.oa.n_mux_configs =
2171 get_l3_2_mux_config(dev_priv,
2172 dev_priv->perf.oa.mux_regs,
2173 dev_priv->perf.oa.mux_regs_lens);
2174 if (dev_priv->perf.oa.n_mux_configs == 0) {
2175 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
2176
2177 /* EINVAL because *_register_sysfs already checked this
2178 * and so it wouldn't have been advertised to userspace and
2179 * so shouldn't have been requested
2180 */
2181 return -EINVAL;
2182 }
2183
2184 dev_priv->perf.oa.b_counter_regs =
2185 b_counter_config_l3_2;
2186 dev_priv->perf.oa.b_counter_regs_len =
2187 ARRAY_SIZE(b_counter_config_l3_2);
2188
2189 dev_priv->perf.oa.flex_regs =
2190 flex_eu_config_l3_2;
2191 dev_priv->perf.oa.flex_regs_len =
2192 ARRAY_SIZE(flex_eu_config_l3_2);
2193
2194 return 0;
2195 case METRIC_SET_ID_L3_3:
2196 dev_priv->perf.oa.n_mux_configs =
2197 get_l3_3_mux_config(dev_priv,
2198 dev_priv->perf.oa.mux_regs,
2199 dev_priv->perf.oa.mux_regs_lens);
2200 if (dev_priv->perf.oa.n_mux_configs == 0) {
2201 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
2202
2203 /* EINVAL because *_register_sysfs already checked this
2204 * and so it wouldn't have been advertised to userspace and
2205 * so shouldn't have been requested
2206 */
2207 return -EINVAL;
2208 }
2209
2210 dev_priv->perf.oa.b_counter_regs =
2211 b_counter_config_l3_3;
2212 dev_priv->perf.oa.b_counter_regs_len =
2213 ARRAY_SIZE(b_counter_config_l3_3);
2214
2215 dev_priv->perf.oa.flex_regs =
2216 flex_eu_config_l3_3;
2217 dev_priv->perf.oa.flex_regs_len =
2218 ARRAY_SIZE(flex_eu_config_l3_3);
2219
2220 return 0;
2221 case METRIC_SET_ID_L3_4:
2222 dev_priv->perf.oa.n_mux_configs =
2223 get_l3_4_mux_config(dev_priv,
2224 dev_priv->perf.oa.mux_regs,
2225 dev_priv->perf.oa.mux_regs_lens);
2226 if (dev_priv->perf.oa.n_mux_configs == 0) {
2227 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_4\" metric set\n");
2228
2229 /* EINVAL because *_register_sysfs already checked this
2230 * and so it wouldn't have been advertised to userspace and
2231 * so shouldn't have been requested
2232 */
2233 return -EINVAL;
2234 }
2235
2236 dev_priv->perf.oa.b_counter_regs =
2237 b_counter_config_l3_4;
2238 dev_priv->perf.oa.b_counter_regs_len =
2239 ARRAY_SIZE(b_counter_config_l3_4);
2240
2241 dev_priv->perf.oa.flex_regs =
2242 flex_eu_config_l3_4;
2243 dev_priv->perf.oa.flex_regs_len =
2244 ARRAY_SIZE(flex_eu_config_l3_4);
2245
2246 return 0;
2247 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2248 dev_priv->perf.oa.n_mux_configs =
2249 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2250 dev_priv->perf.oa.mux_regs,
2251 dev_priv->perf.oa.mux_regs_lens);
2252 if (dev_priv->perf.oa.n_mux_configs == 0) {
2253 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2254
2255 /* EINVAL because *_register_sysfs already checked this
2256 * and so it wouldn't have been advertised to userspace and
2257 * so shouldn't have been requested
2258 */
2259 return -EINVAL;
2260 }
2261
2262 dev_priv->perf.oa.b_counter_regs =
2263 b_counter_config_rasterizer_and_pixel_backend;
2264 dev_priv->perf.oa.b_counter_regs_len =
2265 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2266
2267 dev_priv->perf.oa.flex_regs =
2268 flex_eu_config_rasterizer_and_pixel_backend;
2269 dev_priv->perf.oa.flex_regs_len =
2270 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2271
2272 return 0;
2273 case METRIC_SET_ID_SAMPLER_1:
2274 dev_priv->perf.oa.n_mux_configs =
2275 get_sampler_1_mux_config(dev_priv,
2276 dev_priv->perf.oa.mux_regs,
2277 dev_priv->perf.oa.mux_regs_lens);
2278 if (dev_priv->perf.oa.n_mux_configs == 0) {
2279 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_1\" metric set\n");
2280
2281 /* EINVAL because *_register_sysfs already checked this
2282 * and so it wouldn't have been advertised to userspace and
2283 * so shouldn't have been requested
2284 */
2285 return -EINVAL;
2286 }
2287
2288 dev_priv->perf.oa.b_counter_regs =
2289 b_counter_config_sampler_1;
2290 dev_priv->perf.oa.b_counter_regs_len =
2291 ARRAY_SIZE(b_counter_config_sampler_1);
2292
2293 dev_priv->perf.oa.flex_regs =
2294 flex_eu_config_sampler_1;
2295 dev_priv->perf.oa.flex_regs_len =
2296 ARRAY_SIZE(flex_eu_config_sampler_1);
2297
2298 return 0;
2299 case METRIC_SET_ID_SAMPLER_2:
2300 dev_priv->perf.oa.n_mux_configs =
2301 get_sampler_2_mux_config(dev_priv,
2302 dev_priv->perf.oa.mux_regs,
2303 dev_priv->perf.oa.mux_regs_lens);
2304 if (dev_priv->perf.oa.n_mux_configs == 0) {
2305 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_2\" metric set\n");
2306
2307 /* EINVAL because *_register_sysfs already checked this
2308 * and so it wouldn't have been advertised to userspace and
2309 * so shouldn't have been requested
2310 */
2311 return -EINVAL;
2312 }
2313
2314 dev_priv->perf.oa.b_counter_regs =
2315 b_counter_config_sampler_2;
2316 dev_priv->perf.oa.b_counter_regs_len =
2317 ARRAY_SIZE(b_counter_config_sampler_2);
2318
2319 dev_priv->perf.oa.flex_regs =
2320 flex_eu_config_sampler_2;
2321 dev_priv->perf.oa.flex_regs_len =
2322 ARRAY_SIZE(flex_eu_config_sampler_2);
2323
2324 return 0;
2325 case METRIC_SET_ID_TDL_1:
2326 dev_priv->perf.oa.n_mux_configs =
2327 get_tdl_1_mux_config(dev_priv,
2328 dev_priv->perf.oa.mux_regs,
2329 dev_priv->perf.oa.mux_regs_lens);
2330 if (dev_priv->perf.oa.n_mux_configs == 0) {
2331 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2332
2333 /* EINVAL because *_register_sysfs already checked this
2334 * and so it wouldn't have been advertised to userspace and
2335 * so shouldn't have been requested
2336 */
2337 return -EINVAL;
2338 }
2339
2340 dev_priv->perf.oa.b_counter_regs =
2341 b_counter_config_tdl_1;
2342 dev_priv->perf.oa.b_counter_regs_len =
2343 ARRAY_SIZE(b_counter_config_tdl_1);
2344
2345 dev_priv->perf.oa.flex_regs =
2346 flex_eu_config_tdl_1;
2347 dev_priv->perf.oa.flex_regs_len =
2348 ARRAY_SIZE(flex_eu_config_tdl_1);
2349
2350 return 0;
2351 case METRIC_SET_ID_TDL_2:
2352 dev_priv->perf.oa.n_mux_configs =
2353 get_tdl_2_mux_config(dev_priv,
2354 dev_priv->perf.oa.mux_regs,
2355 dev_priv->perf.oa.mux_regs_lens);
2356 if (dev_priv->perf.oa.n_mux_configs == 0) {
2357 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2358
2359 /* EINVAL because *_register_sysfs already checked this
2360 * and so it wouldn't have been advertised to userspace and
2361 * so shouldn't have been requested
2362 */
2363 return -EINVAL;
2364 }
2365
2366 dev_priv->perf.oa.b_counter_regs =
2367 b_counter_config_tdl_2;
2368 dev_priv->perf.oa.b_counter_regs_len =
2369 ARRAY_SIZE(b_counter_config_tdl_2);
2370
2371 dev_priv->perf.oa.flex_regs =
2372 flex_eu_config_tdl_2;
2373 dev_priv->perf.oa.flex_regs_len =
2374 ARRAY_SIZE(flex_eu_config_tdl_2);
2375
2376 return 0;
2377 case METRIC_SET_ID_TEST_OA:
2378 dev_priv->perf.oa.n_mux_configs =
2379 get_test_oa_mux_config(dev_priv,
2380 dev_priv->perf.oa.mux_regs,
2381 dev_priv->perf.oa.mux_regs_lens);
2382 if (dev_priv->perf.oa.n_mux_configs == 0) {
2383 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2384
2385 /* EINVAL because *_register_sysfs already checked this
2386 * and so it wouldn't have been advertised to userspace and
2387 * so shouldn't have been requested
2388 */
2389 return -EINVAL;
2390 }
2391
2392 dev_priv->perf.oa.b_counter_regs =
2393 b_counter_config_test_oa;
2394 dev_priv->perf.oa.b_counter_regs_len =
2395 ARRAY_SIZE(b_counter_config_test_oa);
2396
2397 dev_priv->perf.oa.flex_regs =
2398 flex_eu_config_test_oa;
2399 dev_priv->perf.oa.flex_regs_len =
2400 ARRAY_SIZE(flex_eu_config_test_oa);
2401
2402 return 0;
2403 default:
2404 return -ENODEV;
2405 }
2406}
2407
2408static ssize_t
2409show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2410{
2411 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2412}
2413
2414static struct device_attribute dev_attr_render_basic_id = {
2415 .attr = { .name = "id", .mode = 0444 },
2416 .show = show_render_basic_id,
2417 .store = NULL,
2418};
2419
2420static struct attribute *attrs_render_basic[] = {
2421 &dev_attr_render_basic_id.attr,
2422 NULL,
2423};
2424
2425static struct attribute_group group_render_basic = {
2426 .name = "9d8a3af5-c02c-4a4a-b947-f1672469e0fb",
2427 .attrs = attrs_render_basic,
2428};
2429
2430static ssize_t
2431show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2432{
2433 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2434}
2435
2436static struct device_attribute dev_attr_compute_basic_id = {
2437 .attr = { .name = "id", .mode = 0444 },
2438 .show = show_compute_basic_id,
2439 .store = NULL,
2440};
2441
2442static struct attribute *attrs_compute_basic[] = {
2443 &dev_attr_compute_basic_id.attr,
2444 NULL,
2445};
2446
2447static struct attribute_group group_compute_basic = {
2448 .name = "f522a89c-ecd1-4522-8331-3383c54af5f5",
2449 .attrs = attrs_compute_basic,
2450};
2451
2452static ssize_t
2453show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2454{
2455 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2456}
2457
2458static struct device_attribute dev_attr_render_pipe_profile_id = {
2459 .attr = { .name = "id", .mode = 0444 },
2460 .show = show_render_pipe_profile_id,
2461 .store = NULL,
2462};
2463
2464static struct attribute *attrs_render_pipe_profile[] = {
2465 &dev_attr_render_pipe_profile_id.attr,
2466 NULL,
2467};
2468
2469static struct attribute_group group_render_pipe_profile = {
2470 .name = "a9ccc03d-a943-4e6b-9cd6-13e063075927",
2471 .attrs = attrs_render_pipe_profile,
2472};
2473
2474static ssize_t
2475show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2476{
2477 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2478}
2479
2480static struct device_attribute dev_attr_hdc_and_sf_id = {
2481 .attr = { .name = "id", .mode = 0444 },
2482 .show = show_hdc_and_sf_id,
2483 .store = NULL,
2484};
2485
2486static struct attribute *attrs_hdc_and_sf[] = {
2487 &dev_attr_hdc_and_sf_id.attr,
2488 NULL,
2489};
2490
2491static struct attribute_group group_hdc_and_sf = {
2492 .name = "2cf0c064-68df-4fac-9b3f-57f51ca8a069",
2493 .attrs = attrs_hdc_and_sf,
2494};
2495
2496static ssize_t
2497show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2498{
2499 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2500}
2501
2502static struct device_attribute dev_attr_l3_1_id = {
2503 .attr = { .name = "id", .mode = 0444 },
2504 .show = show_l3_1_id,
2505 .store = NULL,
2506};
2507
2508static struct attribute *attrs_l3_1[] = {
2509 &dev_attr_l3_1_id.attr,
2510 NULL,
2511};
2512
2513static struct attribute_group group_l3_1 = {
2514 .name = "78a87ff9-543a-49ce-95ea-26d86071ea93",
2515 .attrs = attrs_l3_1,
2516};
2517
2518static ssize_t
2519show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2520{
2521 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
2522}
2523
2524static struct device_attribute dev_attr_l3_2_id = {
2525 .attr = { .name = "id", .mode = 0444 },
2526 .show = show_l3_2_id,
2527 .store = NULL,
2528};
2529
2530static struct attribute *attrs_l3_2[] = {
2531 &dev_attr_l3_2_id.attr,
2532 NULL,
2533};
2534
2535static struct attribute_group group_l3_2 = {
2536 .name = "9f2cece5-7bfe-4320-ad66-8c7cc526bec5",
2537 .attrs = attrs_l3_2,
2538};
2539
2540static ssize_t
2541show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
2542{
2543 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
2544}
2545
2546static struct device_attribute dev_attr_l3_3_id = {
2547 .attr = { .name = "id", .mode = 0444 },
2548 .show = show_l3_3_id,
2549 .store = NULL,
2550};
2551
2552static struct attribute *attrs_l3_3[] = {
2553 &dev_attr_l3_3_id.attr,
2554 NULL,
2555};
2556
2557static struct attribute_group group_l3_3 = {
2558 .name = "d890ef38-d309-47e4-b8b5-aa779bb19ab0",
2559 .attrs = attrs_l3_3,
2560};
2561
2562static ssize_t
2563show_l3_4_id(struct device *kdev, struct device_attribute *attr, char *buf)
2564{
2565 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_4);
2566}
2567
2568static struct device_attribute dev_attr_l3_4_id = {
2569 .attr = { .name = "id", .mode = 0444 },
2570 .show = show_l3_4_id,
2571 .store = NULL,
2572};
2573
2574static struct attribute *attrs_l3_4[] = {
2575 &dev_attr_l3_4_id.attr,
2576 NULL,
2577};
2578
2579static struct attribute_group group_l3_4 = {
2580 .name = "5fdff4a6-9dc8-45e1-bfda-ef54869fbdd4",
2581 .attrs = attrs_l3_4,
2582};
2583
2584static ssize_t
2585show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2586{
2587 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2588}
2589
2590static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2591 .attr = { .name = "id", .mode = 0444 },
2592 .show = show_rasterizer_and_pixel_backend_id,
2593 .store = NULL,
2594};
2595
2596static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2597 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2598 NULL,
2599};
2600
2601static struct attribute_group group_rasterizer_and_pixel_backend = {
2602 .name = "2c0e45e1-7e2c-4a14-ae00-0b7ec868b8aa",
2603 .attrs = attrs_rasterizer_and_pixel_backend,
2604};
2605
2606static ssize_t
2607show_sampler_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2608{
2609 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_1);
2610}
2611
2612static struct device_attribute dev_attr_sampler_1_id = {
2613 .attr = { .name = "id", .mode = 0444 },
2614 .show = show_sampler_1_id,
2615 .store = NULL,
2616};
2617
2618static struct attribute *attrs_sampler_1[] = {
2619 &dev_attr_sampler_1_id.attr,
2620 NULL,
2621};
2622
2623static struct attribute_group group_sampler_1 = {
2624 .name = "71148d78-baf5-474f-878a-e23158d0265d",
2625 .attrs = attrs_sampler_1,
2626};
2627
2628static ssize_t
2629show_sampler_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2630{
2631 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_2);
2632}
2633
2634static struct device_attribute dev_attr_sampler_2_id = {
2635 .attr = { .name = "id", .mode = 0444 },
2636 .show = show_sampler_2_id,
2637 .store = NULL,
2638};
2639
2640static struct attribute *attrs_sampler_2[] = {
2641 &dev_attr_sampler_2_id.attr,
2642 NULL,
2643};
2644
2645static struct attribute_group group_sampler_2 = {
2646 .name = "b996a2b7-c59c-492d-877a-8cd54fd6df84",
2647 .attrs = attrs_sampler_2,
2648};
2649
2650static ssize_t
2651show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2652{
2653 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2654}
2655
2656static struct device_attribute dev_attr_tdl_1_id = {
2657 .attr = { .name = "id", .mode = 0444 },
2658 .show = show_tdl_1_id,
2659 .store = NULL,
2660};
2661
2662static struct attribute *attrs_tdl_1[] = {
2663 &dev_attr_tdl_1_id.attr,
2664 NULL,
2665};
2666
2667static struct attribute_group group_tdl_1 = {
2668 .name = "eb2fecba-b431-42e7-8261-fe9429a6e67a",
2669 .attrs = attrs_tdl_1,
2670};
2671
2672static ssize_t
2673show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2674{
2675 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2676}
2677
2678static struct device_attribute dev_attr_tdl_2_id = {
2679 .attr = { .name = "id", .mode = 0444 },
2680 .show = show_tdl_2_id,
2681 .store = NULL,
2682};
2683
2684static struct attribute *attrs_tdl_2[] = {
2685 &dev_attr_tdl_2_id.attr,
2686 NULL,
2687};
2688
2689static struct attribute_group group_tdl_2 = {
2690 .name = "60749470-a648-4a4b-9f10-dbfe1e36e44d",
2691 .attrs = attrs_tdl_2,
2692};
2693
2694static ssize_t
2695show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2696{
2697 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2698}
2699
2700static struct device_attribute dev_attr_test_oa_id = {
2701 .attr = { .name = "id", .mode = 0444 },
2702 .show = show_test_oa_id,
2703 .store = NULL,
2704};
2705
2706static struct attribute *attrs_test_oa[] = {
2707 &dev_attr_test_oa_id.attr,
2708 NULL,
2709};
2710
2711static struct attribute_group group_test_oa = {
2712 .name = "4a534b07-cba3-414d-8d60-874830e883aa",
2713 .attrs = attrs_test_oa,
2714};
2715
2716int
2717i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv)
2718{
2719 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2720 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2721 int ret = 0;
2722
2723 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2724 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2725 if (ret)
2726 goto error_render_basic;
2727 }
2728 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2729 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2730 if (ret)
2731 goto error_compute_basic;
2732 }
2733 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2734 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2735 if (ret)
2736 goto error_render_pipe_profile;
2737 }
2738 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2739 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2740 if (ret)
2741 goto error_hdc_and_sf;
2742 }
2743 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2744 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2745 if (ret)
2746 goto error_l3_1;
2747 }
2748 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2749 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2750 if (ret)
2751 goto error_l3_2;
2752 }
2753 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
2754 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2755 if (ret)
2756 goto error_l3_3;
2757 }
2758 if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens)) {
2759 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_4);
2760 if (ret)
2761 goto error_l3_4;
2762 }
2763 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2764 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2765 if (ret)
2766 goto error_rasterizer_and_pixel_backend;
2767 }
2768 if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2769 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
2770 if (ret)
2771 goto error_sampler_1;
2772 }
2773 if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2774 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
2775 if (ret)
2776 goto error_sampler_2;
2777 }
2778 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2779 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2780 if (ret)
2781 goto error_tdl_1;
2782 }
2783 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2784 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2785 if (ret)
2786 goto error_tdl_2;
2787 }
2788 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2789 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2790 if (ret)
2791 goto error_test_oa;
2792 }
2793
2794 return 0;
2795
2796error_test_oa:
2797 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2798 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2799error_tdl_2:
2800 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2801 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2802error_tdl_1:
2803 if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
2804 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
2805error_sampler_2:
2806 if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
2807 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
2808error_sampler_1:
2809 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2810 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2811error_rasterizer_and_pixel_backend:
2812 if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
2813 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
2814error_l3_4:
2815 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
2816 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2817error_l3_3:
2818 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
2819 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2820error_l3_2:
2821 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2822 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2823error_l3_1:
2824 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2825 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2826error_hdc_and_sf:
2827 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2828 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2829error_render_pipe_profile:
2830 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2831 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2832error_compute_basic:
2833 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2834 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2835error_render_basic:
2836 return ret;
2837}
2838
2839void
2840i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv)
2841{
2842 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2843 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2844
2845 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2846 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2847 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2848 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2849 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2850 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2851 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2852 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2853 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2854 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2855 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
2856 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2857 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
2858 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2859 if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
2860 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
2861 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2862 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2863 if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
2864 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
2865 if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
2866 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
2867 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2868 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2869 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2870 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2871 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
2872 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2873}
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
new file mode 100644
index 000000000000..8b8bdc26d726
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_CHV_H__
30#define __I915_OA_CHV_H__
31
32extern int i915_oa_n_builtin_metric_sets_chv;
33
34extern int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
new file mode 100644
index 000000000000..2f356d51bff8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -0,0 +1,2602 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_glk.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
45 METRIC_SET_ID_SAMPLER,
46 METRIC_SET_ID_TDL_1,
47 METRIC_SET_ID_TDL_2,
48 METRIC_SET_ID_COMPUTE_EXTRA,
49 METRIC_SET_ID_TEST_OA,
50};
51
52int i915_oa_n_builtin_metric_sets_glk = 15;
53
54static const struct i915_oa_reg b_counter_config_render_basic[] = {
55 { _MMIO(0x2710), 0x00000000 },
56 { _MMIO(0x2714), 0x00800000 },
57 { _MMIO(0x2720), 0x00000000 },
58 { _MMIO(0x2724), 0x00800000 },
59 { _MMIO(0x2740), 0x00000000 },
60};
61
62static const struct i915_oa_reg flex_eu_config_render_basic[] = {
63 { _MMIO(0xe458), 0x00005004 },
64 { _MMIO(0xe558), 0x00010003 },
65 { _MMIO(0xe658), 0x00012011 },
66 { _MMIO(0xe758), 0x00015014 },
67 { _MMIO(0xe45c), 0x00051050 },
68 { _MMIO(0xe55c), 0x00053052 },
69 { _MMIO(0xe65c), 0x00055054 },
70};
71
72static const struct i915_oa_reg mux_config_render_basic[] = {
73 { _MMIO(0x9888), 0x166c00f0 },
74 { _MMIO(0x9888), 0x12120280 },
75 { _MMIO(0x9888), 0x12320280 },
76 { _MMIO(0x9888), 0x11930317 },
77 { _MMIO(0x9888), 0x159303df },
78 { _MMIO(0x9888), 0x3f900c00 },
79 { _MMIO(0x9888), 0x419000a0 },
80 { _MMIO(0x9888), 0x002d1000 },
81 { _MMIO(0x9888), 0x062d4000 },
82 { _MMIO(0x9888), 0x082d5000 },
83 { _MMIO(0x9888), 0x0a2d1000 },
84 { _MMIO(0x9888), 0x0c2e0800 },
85 { _MMIO(0x9888), 0x0e2e5900 },
86 { _MMIO(0x9888), 0x0a4c8000 },
87 { _MMIO(0x9888), 0x0c4c8000 },
88 { _MMIO(0x9888), 0x0e4c4000 },
89 { _MMIO(0x9888), 0x064e8000 },
90 { _MMIO(0x9888), 0x084e8000 },
91 { _MMIO(0x9888), 0x0a4e2000 },
92 { _MMIO(0x9888), 0x1c4f0010 },
93 { _MMIO(0x9888), 0x0a6c0053 },
94 { _MMIO(0x9888), 0x106c0000 },
95 { _MMIO(0x9888), 0x1c6c0000 },
96 { _MMIO(0x9888), 0x1a0fcc00 },
97 { _MMIO(0x9888), 0x1c0f0002 },
98 { _MMIO(0x9888), 0x1c2c0040 },
99 { _MMIO(0x9888), 0x00101000 },
100 { _MMIO(0x9888), 0x04101000 },
101 { _MMIO(0x9888), 0x00114000 },
102 { _MMIO(0x9888), 0x08114000 },
103 { _MMIO(0x9888), 0x00120020 },
104 { _MMIO(0x9888), 0x08120021 },
105 { _MMIO(0x9888), 0x00141000 },
106 { _MMIO(0x9888), 0x08141000 },
107 { _MMIO(0x9888), 0x02308000 },
108 { _MMIO(0x9888), 0x04302000 },
109 { _MMIO(0x9888), 0x06318000 },
110 { _MMIO(0x9888), 0x08318000 },
111 { _MMIO(0x9888), 0x06320800 },
112 { _MMIO(0x9888), 0x08320840 },
113 { _MMIO(0x9888), 0x00320000 },
114 { _MMIO(0x9888), 0x06344000 },
115 { _MMIO(0x9888), 0x08344000 },
116 { _MMIO(0x9888), 0x0d931831 },
117 { _MMIO(0x9888), 0x0f939f3f },
118 { _MMIO(0x9888), 0x01939e80 },
119 { _MMIO(0x9888), 0x039303bc },
120 { _MMIO(0x9888), 0x0593000e },
121 { _MMIO(0x9888), 0x1993002a },
122 { _MMIO(0x9888), 0x07930000 },
123 { _MMIO(0x9888), 0x09930000 },
124 { _MMIO(0x9888), 0x1d900177 },
125 { _MMIO(0x9888), 0x1f900187 },
126 { _MMIO(0x9888), 0x35900000 },
127 { _MMIO(0x9888), 0x13904000 },
128 { _MMIO(0x9888), 0x21904000 },
129 { _MMIO(0x9888), 0x23904000 },
130 { _MMIO(0x9888), 0x25904000 },
131 { _MMIO(0x9888), 0x27904000 },
132 { _MMIO(0x9888), 0x2b904000 },
133 { _MMIO(0x9888), 0x2d904000 },
134 { _MMIO(0x9888), 0x2f904000 },
135 { _MMIO(0x9888), 0x31904000 },
136 { _MMIO(0x9888), 0x15904000 },
137 { _MMIO(0x9888), 0x17904000 },
138 { _MMIO(0x9888), 0x19904000 },
139 { _MMIO(0x9888), 0x1b904000 },
140 { _MMIO(0x9888), 0x53901110 },
141 { _MMIO(0x9888), 0x43900423 },
142 { _MMIO(0x9888), 0x55900111 },
143 { _MMIO(0x9888), 0x47900c02 },
144 { _MMIO(0x9888), 0x57900000 },
145 { _MMIO(0x9888), 0x49900020 },
146 { _MMIO(0x9888), 0x59901111 },
147 { _MMIO(0x9888), 0x4b900421 },
148 { _MMIO(0x9888), 0x37900000 },
149 { _MMIO(0x9888), 0x33900000 },
150 { _MMIO(0x9888), 0x4d900001 },
151 { _MMIO(0x9888), 0x45900821 },
152};
153
154static int
155get_render_basic_mux_config(struct drm_i915_private *dev_priv,
156 const struct i915_oa_reg **regs,
157 int *lens)
158{
159 int n = 0;
160
161 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
162 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
163
164 regs[n] = mux_config_render_basic;
165 lens[n] = ARRAY_SIZE(mux_config_render_basic);
166 n++;
167
168 return n;
169}
170
171static const struct i915_oa_reg b_counter_config_compute_basic[] = {
172 { _MMIO(0x2710), 0x00000000 },
173 { _MMIO(0x2714), 0x00800000 },
174 { _MMIO(0x2720), 0x00000000 },
175 { _MMIO(0x2724), 0x00800000 },
176 { _MMIO(0x2740), 0x00000000 },
177};
178
179static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
180 { _MMIO(0xe458), 0x00005004 },
181 { _MMIO(0xe558), 0x00000003 },
182 { _MMIO(0xe658), 0x00002001 },
183 { _MMIO(0xe758), 0x00778008 },
184 { _MMIO(0xe45c), 0x00088078 },
185 { _MMIO(0xe55c), 0x00808708 },
186 { _MMIO(0xe65c), 0x00a08908 },
187};
188
189static const struct i915_oa_reg mux_config_compute_basic[] = {
190 { _MMIO(0x9888), 0x104f00e0 },
191 { _MMIO(0x9888), 0x124f1c00 },
192 { _MMIO(0x9888), 0x39900340 },
193 { _MMIO(0x9888), 0x3f900c00 },
194 { _MMIO(0x9888), 0x41900000 },
195 { _MMIO(0x9888), 0x002d5000 },
196 { _MMIO(0x9888), 0x062d4000 },
197 { _MMIO(0x9888), 0x082d4000 },
198 { _MMIO(0x9888), 0x0a2d1000 },
199 { _MMIO(0x9888), 0x0c2d5000 },
200 { _MMIO(0x9888), 0x0e2d4000 },
201 { _MMIO(0x9888), 0x0c2e1400 },
202 { _MMIO(0x9888), 0x0e2e5100 },
203 { _MMIO(0x9888), 0x102e0114 },
204 { _MMIO(0x9888), 0x044cc000 },
205 { _MMIO(0x9888), 0x0a4c8000 },
206 { _MMIO(0x9888), 0x0c4c8000 },
207 { _MMIO(0x9888), 0x0e4c4000 },
208 { _MMIO(0x9888), 0x104c8000 },
209 { _MMIO(0x9888), 0x124c8000 },
210 { _MMIO(0x9888), 0x164c2000 },
211 { _MMIO(0x9888), 0x004ea000 },
212 { _MMIO(0x9888), 0x064e8000 },
213 { _MMIO(0x9888), 0x084e8000 },
214 { _MMIO(0x9888), 0x0a4e2000 },
215 { _MMIO(0x9888), 0x0c4ea000 },
216 { _MMIO(0x9888), 0x0e4e8000 },
217 { _MMIO(0x9888), 0x004f6b42 },
218 { _MMIO(0x9888), 0x064f6200 },
219 { _MMIO(0x9888), 0x084f4100 },
220 { _MMIO(0x9888), 0x0a4f0061 },
221 { _MMIO(0x9888), 0x0c4f6c4c },
222 { _MMIO(0x9888), 0x0e4f4b00 },
223 { _MMIO(0x9888), 0x1a4f0000 },
224 { _MMIO(0x9888), 0x1c4f0000 },
225 { _MMIO(0x9888), 0x180f5000 },
226 { _MMIO(0x9888), 0x1a0f8800 },
227 { _MMIO(0x9888), 0x1c0f08a2 },
228 { _MMIO(0x9888), 0x182c4000 },
229 { _MMIO(0x9888), 0x1c2c1451 },
230 { _MMIO(0x9888), 0x1e2c0001 },
231 { _MMIO(0x9888), 0x1a2c0010 },
232 { _MMIO(0x9888), 0x01938000 },
233 { _MMIO(0x9888), 0x0f938000 },
234 { _MMIO(0x9888), 0x19938a28 },
235 { _MMIO(0x9888), 0x03938000 },
236 { _MMIO(0x9888), 0x19900177 },
237 { _MMIO(0x9888), 0x1b900178 },
238 { _MMIO(0x9888), 0x1d900125 },
239 { _MMIO(0x9888), 0x1f900123 },
240 { _MMIO(0x9888), 0x35900000 },
241 { _MMIO(0x9888), 0x13904000 },
242 { _MMIO(0x9888), 0x21904000 },
243 { _MMIO(0x9888), 0x25904000 },
244 { _MMIO(0x9888), 0x27904000 },
245 { _MMIO(0x9888), 0x2b904000 },
246 { _MMIO(0x9888), 0x2d904000 },
247 { _MMIO(0x9888), 0x31904000 },
248 { _MMIO(0x9888), 0x15904000 },
249 { _MMIO(0x9888), 0x53901000 },
250 { _MMIO(0x9888), 0x43900000 },
251 { _MMIO(0x9888), 0x55900111 },
252 { _MMIO(0x9888), 0x47900000 },
253 { _MMIO(0x9888), 0x57900000 },
254 { _MMIO(0x9888), 0x49900000 },
255 { _MMIO(0x9888), 0x59900000 },
256 { _MMIO(0x9888), 0x4b900000 },
257 { _MMIO(0x9888), 0x37900000 },
258 { _MMIO(0x9888), 0x33900000 },
259 { _MMIO(0x9888), 0x4d900000 },
260 { _MMIO(0x9888), 0x45900000 },
261};
262
263static int
264get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
265 const struct i915_oa_reg **regs,
266 int *lens)
267{
268 int n = 0;
269
270 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
271 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
272
273 regs[n] = mux_config_compute_basic;
274 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
275 n++;
276
277 return n;
278}
279
280static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
281 { _MMIO(0x2724), 0xf0800000 },
282 { _MMIO(0x2720), 0x00000000 },
283 { _MMIO(0x2714), 0xf0800000 },
284 { _MMIO(0x2710), 0x00000000 },
285 { _MMIO(0x2740), 0x00000000 },
286 { _MMIO(0x2770), 0x0007ffea },
287 { _MMIO(0x2774), 0x00007ffc },
288 { _MMIO(0x2778), 0x0007affa },
289 { _MMIO(0x277c), 0x0000f5fd },
290 { _MMIO(0x2780), 0x00079ffa },
291 { _MMIO(0x2784), 0x0000f3fb },
292 { _MMIO(0x2788), 0x0007bf7a },
293 { _MMIO(0x278c), 0x0000f7e7 },
294 { _MMIO(0x2790), 0x0007fefa },
295 { _MMIO(0x2794), 0x0000f7cf },
296 { _MMIO(0x2798), 0x00077ffa },
297 { _MMIO(0x279c), 0x0000efdf },
298 { _MMIO(0x27a0), 0x0006fffa },
299 { _MMIO(0x27a4), 0x0000cfbf },
300 { _MMIO(0x27a8), 0x0003fffa },
301 { _MMIO(0x27ac), 0x00005f7f },
302};
303
304static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
305 { _MMIO(0xe458), 0x00005004 },
306 { _MMIO(0xe558), 0x00015014 },
307 { _MMIO(0xe658), 0x00025024 },
308 { _MMIO(0xe758), 0x00035034 },
309 { _MMIO(0xe45c), 0x00045044 },
310 { _MMIO(0xe55c), 0x00055054 },
311 { _MMIO(0xe65c), 0x00065064 },
312};
313
314static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
315 { _MMIO(0x9888), 0x0c2e001f },
316 { _MMIO(0x9888), 0x0a2f0000 },
317 { _MMIO(0x9888), 0x10186800 },
318 { _MMIO(0x9888), 0x11810019 },
319 { _MMIO(0x9888), 0x15810013 },
320 { _MMIO(0x9888), 0x13820020 },
321 { _MMIO(0x9888), 0x11830020 },
322 { _MMIO(0x9888), 0x17840000 },
323 { _MMIO(0x9888), 0x11860007 },
324 { _MMIO(0x9888), 0x21860000 },
325 { _MMIO(0x9888), 0x178703e0 },
326 { _MMIO(0x9888), 0x0c2d8000 },
327 { _MMIO(0x9888), 0x042d4000 },
328 { _MMIO(0x9888), 0x062d1000 },
329 { _MMIO(0x9888), 0x022e5400 },
330 { _MMIO(0x9888), 0x002e0000 },
331 { _MMIO(0x9888), 0x0e2e0080 },
332 { _MMIO(0x9888), 0x082f0040 },
333 { _MMIO(0x9888), 0x002f0000 },
334 { _MMIO(0x9888), 0x06143000 },
335 { _MMIO(0x9888), 0x06174000 },
336 { _MMIO(0x9888), 0x06180012 },
337 { _MMIO(0x9888), 0x00180000 },
338 { _MMIO(0x9888), 0x0d804000 },
339 { _MMIO(0x9888), 0x0f804000 },
340 { _MMIO(0x9888), 0x05804000 },
341 { _MMIO(0x9888), 0x09810200 },
342 { _MMIO(0x9888), 0x0b810030 },
343 { _MMIO(0x9888), 0x03810003 },
344 { _MMIO(0x9888), 0x21819140 },
345 { _MMIO(0x9888), 0x23819050 },
346 { _MMIO(0x9888), 0x25810018 },
347 { _MMIO(0x9888), 0x0b820980 },
348 { _MMIO(0x9888), 0x03820d80 },
349 { _MMIO(0x9888), 0x11820000 },
350 { _MMIO(0x9888), 0x0182c000 },
351 { _MMIO(0x9888), 0x07828000 },
352 { _MMIO(0x9888), 0x09824000 },
353 { _MMIO(0x9888), 0x0f828000 },
354 { _MMIO(0x9888), 0x0d830004 },
355 { _MMIO(0x9888), 0x0583000c },
356 { _MMIO(0x9888), 0x0f831000 },
357 { _MMIO(0x9888), 0x01848072 },
358 { _MMIO(0x9888), 0x11840000 },
359 { _MMIO(0x9888), 0x07848000 },
360 { _MMIO(0x9888), 0x09844000 },
361 { _MMIO(0x9888), 0x0f848000 },
362 { _MMIO(0x9888), 0x07860000 },
363 { _MMIO(0x9888), 0x09860092 },
364 { _MMIO(0x9888), 0x0f860400 },
365 { _MMIO(0x9888), 0x01869100 },
366 { _MMIO(0x9888), 0x0f870065 },
367 { _MMIO(0x9888), 0x01870000 },
368 { _MMIO(0x9888), 0x19930800 },
369 { _MMIO(0x9888), 0x0b938000 },
370 { _MMIO(0x9888), 0x0d938000 },
371 { _MMIO(0x9888), 0x1b952000 },
372 { _MMIO(0x9888), 0x1d955055 },
373 { _MMIO(0x9888), 0x1f951455 },
374 { _MMIO(0x9888), 0x0992a000 },
375 { _MMIO(0x9888), 0x0f928000 },
376 { _MMIO(0x9888), 0x1192a800 },
377 { _MMIO(0x9888), 0x1392028a },
378 { _MMIO(0x9888), 0x0b92a000 },
379 { _MMIO(0x9888), 0x0d922000 },
380 { _MMIO(0x9888), 0x13908000 },
381 { _MMIO(0x9888), 0x21908000 },
382 { _MMIO(0x9888), 0x23908000 },
383 { _MMIO(0x9888), 0x25908000 },
384 { _MMIO(0x9888), 0x27908000 },
385 { _MMIO(0x9888), 0x29908000 },
386 { _MMIO(0x9888), 0x2b908000 },
387 { _MMIO(0x9888), 0x2d904000 },
388 { _MMIO(0x9888), 0x2f908000 },
389 { _MMIO(0x9888), 0x31908000 },
390 { _MMIO(0x9888), 0x15908000 },
391 { _MMIO(0x9888), 0x17908000 },
392 { _MMIO(0x9888), 0x19908000 },
393 { _MMIO(0x9888), 0x1b908000 },
394 { _MMIO(0x9888), 0x1d904000 },
395 { _MMIO(0x9888), 0x1f904000 },
396 { _MMIO(0x9888), 0x53900000 },
397 { _MMIO(0x9888), 0x43900c01 },
398 { _MMIO(0x9888), 0x55900000 },
399 { _MMIO(0x9888), 0x47900000 },
400 { _MMIO(0x9888), 0x57900000 },
401 { _MMIO(0x9888), 0x49900863 },
402 { _MMIO(0x9888), 0x59900000 },
403 { _MMIO(0x9888), 0x4b900061 },
404 { _MMIO(0x9888), 0x37900000 },
405 { _MMIO(0x9888), 0x33900000 },
406 { _MMIO(0x9888), 0x4d900000 },
407 { _MMIO(0x9888), 0x45900c22 },
408};
409
410static int
411get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
412 const struct i915_oa_reg **regs,
413 int *lens)
414{
415 int n = 0;
416
417 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
418 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
419
420 regs[n] = mux_config_render_pipe_profile;
421 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
422 n++;
423
424 return n;
425}
426
427static const struct i915_oa_reg b_counter_config_memory_reads[] = {
428 { _MMIO(0x272c), 0xffffffff },
429 { _MMIO(0x2728), 0xffffffff },
430 { _MMIO(0x2724), 0xf0800000 },
431 { _MMIO(0x2720), 0x00000000 },
432 { _MMIO(0x271c), 0xffffffff },
433 { _MMIO(0x2718), 0xffffffff },
434 { _MMIO(0x2714), 0xf0800000 },
435 { _MMIO(0x2710), 0x00000000 },
436 { _MMIO(0x274c), 0x86543210 },
437 { _MMIO(0x2748), 0x86543210 },
438 { _MMIO(0x2744), 0x00006667 },
439 { _MMIO(0x2740), 0x00000000 },
440 { _MMIO(0x275c), 0x86543210 },
441 { _MMIO(0x2758), 0x86543210 },
442 { _MMIO(0x2754), 0x00006465 },
443 { _MMIO(0x2750), 0x00000000 },
444 { _MMIO(0x2770), 0x0007f81a },
445 { _MMIO(0x2774), 0x0000fe00 },
446 { _MMIO(0x2778), 0x0007f82a },
447 { _MMIO(0x277c), 0x0000fe00 },
448 { _MMIO(0x2780), 0x0007f872 },
449 { _MMIO(0x2784), 0x0000fe00 },
450 { _MMIO(0x2788), 0x0007f8ba },
451 { _MMIO(0x278c), 0x0000fe00 },
452 { _MMIO(0x2790), 0x0007f87a },
453 { _MMIO(0x2794), 0x0000fe00 },
454 { _MMIO(0x2798), 0x0007f8ea },
455 { _MMIO(0x279c), 0x0000fe00 },
456 { _MMIO(0x27a0), 0x0007f8e2 },
457 { _MMIO(0x27a4), 0x0000fe00 },
458 { _MMIO(0x27a8), 0x0007f8f2 },
459 { _MMIO(0x27ac), 0x0000fe00 },
460};
461
462static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
463 { _MMIO(0xe458), 0x00005004 },
464 { _MMIO(0xe558), 0x00015014 },
465 { _MMIO(0xe658), 0x00025024 },
466 { _MMIO(0xe758), 0x00035034 },
467 { _MMIO(0xe45c), 0x00045044 },
468 { _MMIO(0xe55c), 0x00055054 },
469 { _MMIO(0xe65c), 0x00065064 },
470};
471
472static const struct i915_oa_reg mux_config_memory_reads[] = {
473 { _MMIO(0x9888), 0x19800343 },
474 { _MMIO(0x9888), 0x39900340 },
475 { _MMIO(0x9888), 0x3f901000 },
476 { _MMIO(0x9888), 0x41900003 },
477 { _MMIO(0x9888), 0x03803180 },
478 { _MMIO(0x9888), 0x058035e2 },
479 { _MMIO(0x9888), 0x0780006a },
480 { _MMIO(0x9888), 0x11800000 },
481 { _MMIO(0x9888), 0x2181a000 },
482 { _MMIO(0x9888), 0x2381000a },
483 { _MMIO(0x9888), 0x1d950550 },
484 { _MMIO(0x9888), 0x0b928000 },
485 { _MMIO(0x9888), 0x0d92a000 },
486 { _MMIO(0x9888), 0x0f922000 },
487 { _MMIO(0x9888), 0x13900170 },
488 { _MMIO(0x9888), 0x21900171 },
489 { _MMIO(0x9888), 0x23900172 },
490 { _MMIO(0x9888), 0x25900173 },
491 { _MMIO(0x9888), 0x27900174 },
492 { _MMIO(0x9888), 0x29900175 },
493 { _MMIO(0x9888), 0x2b900176 },
494 { _MMIO(0x9888), 0x2d900177 },
495 { _MMIO(0x9888), 0x2f90017f },
496 { _MMIO(0x9888), 0x31900125 },
497 { _MMIO(0x9888), 0x15900123 },
498 { _MMIO(0x9888), 0x17900121 },
499 { _MMIO(0x9888), 0x35900000 },
500 { _MMIO(0x9888), 0x19908000 },
501 { _MMIO(0x9888), 0x1b908000 },
502 { _MMIO(0x9888), 0x1d908000 },
503 { _MMIO(0x9888), 0x1f908000 },
504 { _MMIO(0x9888), 0x53900000 },
505 { _MMIO(0x9888), 0x43901084 },
506 { _MMIO(0x9888), 0x55900000 },
507 { _MMIO(0x9888), 0x47901080 },
508 { _MMIO(0x9888), 0x57900000 },
509 { _MMIO(0x9888), 0x49901084 },
510 { _MMIO(0x9888), 0x59900000 },
511 { _MMIO(0x9888), 0x4b901084 },
512 { _MMIO(0x9888), 0x37900000 },
513 { _MMIO(0x9888), 0x33900000 },
514 { _MMIO(0x9888), 0x4d900004 },
515 { _MMIO(0x9888), 0x45900000 },
516};
517
518static int
519get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
520 const struct i915_oa_reg **regs,
521 int *lens)
522{
523 int n = 0;
524
525 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
526 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
527
528 regs[n] = mux_config_memory_reads;
529 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
530 n++;
531
532 return n;
533}
534
535static const struct i915_oa_reg b_counter_config_memory_writes[] = {
536 { _MMIO(0x272c), 0xffffffff },
537 { _MMIO(0x2728), 0xffffffff },
538 { _MMIO(0x2724), 0xf0800000 },
539 { _MMIO(0x2720), 0x00000000 },
540 { _MMIO(0x271c), 0xffffffff },
541 { _MMIO(0x2718), 0xffffffff },
542 { _MMIO(0x2714), 0xf0800000 },
543 { _MMIO(0x2710), 0x00000000 },
544 { _MMIO(0x274c), 0x86543210 },
545 { _MMIO(0x2748), 0x86543210 },
546 { _MMIO(0x2744), 0x00006667 },
547 { _MMIO(0x2740), 0x00000000 },
548 { _MMIO(0x275c), 0x86543210 },
549 { _MMIO(0x2758), 0x86543210 },
550 { _MMIO(0x2754), 0x00006465 },
551 { _MMIO(0x2750), 0x00000000 },
552 { _MMIO(0x2770), 0x0007f81a },
553 { _MMIO(0x2774), 0x0000fe00 },
554 { _MMIO(0x2778), 0x0007f82a },
555 { _MMIO(0x277c), 0x0000fe00 },
556 { _MMIO(0x2780), 0x0007f822 },
557 { _MMIO(0x2784), 0x0000fe00 },
558 { _MMIO(0x2788), 0x0007f8ba },
559 { _MMIO(0x278c), 0x0000fe00 },
560 { _MMIO(0x2790), 0x0007f87a },
561 { _MMIO(0x2794), 0x0000fe00 },
562 { _MMIO(0x2798), 0x0007f8ea },
563 { _MMIO(0x279c), 0x0000fe00 },
564 { _MMIO(0x27a0), 0x0007f8e2 },
565 { _MMIO(0x27a4), 0x0000fe00 },
566 { _MMIO(0x27a8), 0x0007f8f2 },
567 { _MMIO(0x27ac), 0x0000fe00 },
568};
569
570static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
571 { _MMIO(0xe458), 0x00005004 },
572 { _MMIO(0xe558), 0x00015014 },
573 { _MMIO(0xe658), 0x00025024 },
574 { _MMIO(0xe758), 0x00035034 },
575 { _MMIO(0xe45c), 0x00045044 },
576 { _MMIO(0xe55c), 0x00055054 },
577 { _MMIO(0xe65c), 0x00065064 },
578};
579
580static const struct i915_oa_reg mux_config_memory_writes[] = {
581 { _MMIO(0x9888), 0x19800343 },
582 { _MMIO(0x9888), 0x39900340 },
583 { _MMIO(0x9888), 0x3f900000 },
584 { _MMIO(0x9888), 0x41900080 },
585 { _MMIO(0x9888), 0x03803180 },
586 { _MMIO(0x9888), 0x058035e2 },
587 { _MMIO(0x9888), 0x0780006a },
588 { _MMIO(0x9888), 0x11800000 },
589 { _MMIO(0x9888), 0x2181a000 },
590 { _MMIO(0x9888), 0x2381000a },
591 { _MMIO(0x9888), 0x1d950550 },
592 { _MMIO(0x9888), 0x0b928000 },
593 { _MMIO(0x9888), 0x0d92a000 },
594 { _MMIO(0x9888), 0x0f922000 },
595 { _MMIO(0x9888), 0x13900180 },
596 { _MMIO(0x9888), 0x21900181 },
597 { _MMIO(0x9888), 0x23900182 },
598 { _MMIO(0x9888), 0x25900183 },
599 { _MMIO(0x9888), 0x27900184 },
600 { _MMIO(0x9888), 0x29900185 },
601 { _MMIO(0x9888), 0x2b900186 },
602 { _MMIO(0x9888), 0x2d900187 },
603 { _MMIO(0x9888), 0x2f900170 },
604 { _MMIO(0x9888), 0x31900125 },
605 { _MMIO(0x9888), 0x15900123 },
606 { _MMIO(0x9888), 0x17900121 },
607 { _MMIO(0x9888), 0x35900000 },
608 { _MMIO(0x9888), 0x19908000 },
609 { _MMIO(0x9888), 0x1b908000 },
610 { _MMIO(0x9888), 0x1d908000 },
611 { _MMIO(0x9888), 0x1f908000 },
612 { _MMIO(0x9888), 0x53900000 },
613 { _MMIO(0x9888), 0x43901084 },
614 { _MMIO(0x9888), 0x55900000 },
615 { _MMIO(0x9888), 0x47901080 },
616 { _MMIO(0x9888), 0x57900000 },
617 { _MMIO(0x9888), 0x49901084 },
618 { _MMIO(0x9888), 0x59900000 },
619 { _MMIO(0x9888), 0x4b901084 },
620 { _MMIO(0x9888), 0x37900000 },
621 { _MMIO(0x9888), 0x33900000 },
622 { _MMIO(0x9888), 0x4d900004 },
623 { _MMIO(0x9888), 0x45900000 },
624};
625
626static int
627get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
628 const struct i915_oa_reg **regs,
629 int *lens)
630{
631 int n = 0;
632
633 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
634 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
635
636 regs[n] = mux_config_memory_writes;
637 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
638 n++;
639
640 return n;
641}
642
643static const struct i915_oa_reg b_counter_config_compute_extended[] = {
644 { _MMIO(0x2724), 0xf0800000 },
645 { _MMIO(0x2720), 0x00000000 },
646 { _MMIO(0x2714), 0xf0800000 },
647 { _MMIO(0x2710), 0x00000000 },
648 { _MMIO(0x2740), 0x00000000 },
649 { _MMIO(0x2770), 0x0007fc2a },
650 { _MMIO(0x2774), 0x0000bf00 },
651 { _MMIO(0x2778), 0x0007fc6a },
652 { _MMIO(0x277c), 0x0000bf00 },
653 { _MMIO(0x2780), 0x0007fc92 },
654 { _MMIO(0x2784), 0x0000bf00 },
655 { _MMIO(0x2788), 0x0007fca2 },
656 { _MMIO(0x278c), 0x0000bf00 },
657 { _MMIO(0x2790), 0x0007fc32 },
658 { _MMIO(0x2794), 0x0000bf00 },
659 { _MMIO(0x2798), 0x0007fc9a },
660 { _MMIO(0x279c), 0x0000bf00 },
661 { _MMIO(0x27a0), 0x0007fe6a },
662 { _MMIO(0x27a4), 0x0000bf00 },
663 { _MMIO(0x27a8), 0x0007fe7a },
664 { _MMIO(0x27ac), 0x0000bf00 },
665};
666
667static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
668 { _MMIO(0xe458), 0x00005004 },
669 { _MMIO(0xe558), 0x00000003 },
670 { _MMIO(0xe658), 0x00002001 },
671 { _MMIO(0xe758), 0x00778008 },
672 { _MMIO(0xe45c), 0x00088078 },
673 { _MMIO(0xe55c), 0x00808708 },
674 { _MMIO(0xe65c), 0x00a08908 },
675};
676
677static const struct i915_oa_reg mux_config_compute_extended[] = {
678 { _MMIO(0x9888), 0x104f00e0 },
679 { _MMIO(0x9888), 0x141c0160 },
680 { _MMIO(0x9888), 0x161c0015 },
681 { _MMIO(0x9888), 0x181c0120 },
682 { _MMIO(0x9888), 0x002d5000 },
683 { _MMIO(0x9888), 0x062d4000 },
684 { _MMIO(0x9888), 0x082d5000 },
685 { _MMIO(0x9888), 0x0a2d5000 },
686 { _MMIO(0x9888), 0x0c2d5000 },
687 { _MMIO(0x9888), 0x0e2d5000 },
688 { _MMIO(0x9888), 0x022d5000 },
689 { _MMIO(0x9888), 0x042d5000 },
690 { _MMIO(0x9888), 0x0c2e5400 },
691 { _MMIO(0x9888), 0x0e2e5515 },
692 { _MMIO(0x9888), 0x102e0155 },
693 { _MMIO(0x9888), 0x044cc000 },
694 { _MMIO(0x9888), 0x0a4c8000 },
695 { _MMIO(0x9888), 0x0c4cc000 },
696 { _MMIO(0x9888), 0x0e4cc000 },
697 { _MMIO(0x9888), 0x104c8000 },
698 { _MMIO(0x9888), 0x124c8000 },
699 { _MMIO(0x9888), 0x144c8000 },
700 { _MMIO(0x9888), 0x164c2000 },
701 { _MMIO(0x9888), 0x064cc000 },
702 { _MMIO(0x9888), 0x084cc000 },
703 { _MMIO(0x9888), 0x004ea000 },
704 { _MMIO(0x9888), 0x064e8000 },
705 { _MMIO(0x9888), 0x084ea000 },
706 { _MMIO(0x9888), 0x0a4ea000 },
707 { _MMIO(0x9888), 0x0c4ea000 },
708 { _MMIO(0x9888), 0x0e4ea000 },
709 { _MMIO(0x9888), 0x024ea000 },
710 { _MMIO(0x9888), 0x044ea000 },
711 { _MMIO(0x9888), 0x0e4f4b41 },
712 { _MMIO(0x9888), 0x004f4200 },
713 { _MMIO(0x9888), 0x024f404c },
714 { _MMIO(0x9888), 0x1c4f0000 },
715 { _MMIO(0x9888), 0x1a4f0000 },
716 { _MMIO(0x9888), 0x001b4000 },
717 { _MMIO(0x9888), 0x061b8000 },
718 { _MMIO(0x9888), 0x081bc000 },
719 { _MMIO(0x9888), 0x0a1bc000 },
720 { _MMIO(0x9888), 0x0c1bc000 },
721 { _MMIO(0x9888), 0x041bc000 },
722 { _MMIO(0x9888), 0x001c0031 },
723 { _MMIO(0x9888), 0x061c1900 },
724 { _MMIO(0x9888), 0x081c1a33 },
725 { _MMIO(0x9888), 0x0a1c1b35 },
726 { _MMIO(0x9888), 0x0c1c3337 },
727 { _MMIO(0x9888), 0x041c31c7 },
728 { _MMIO(0x9888), 0x180f5000 },
729 { _MMIO(0x9888), 0x1a0fa8aa },
730 { _MMIO(0x9888), 0x1c0f0aaa },
731 { _MMIO(0x9888), 0x182c8000 },
732 { _MMIO(0x9888), 0x1c2c6aaa },
733 { _MMIO(0x9888), 0x1e2c0001 },
734 { _MMIO(0x9888), 0x1a2c2950 },
735 { _MMIO(0x9888), 0x01938000 },
736 { _MMIO(0x9888), 0x0f938000 },
737 { _MMIO(0x9888), 0x1993aaaa },
738 { _MMIO(0x9888), 0x03938000 },
739 { _MMIO(0x9888), 0x05938000 },
740 { _MMIO(0x9888), 0x07938000 },
741 { _MMIO(0x9888), 0x09938000 },
742 { _MMIO(0x9888), 0x0b938000 },
743 { _MMIO(0x9888), 0x13904000 },
744 { _MMIO(0x9888), 0x21904000 },
745 { _MMIO(0x9888), 0x23904000 },
746 { _MMIO(0x9888), 0x25904000 },
747 { _MMIO(0x9888), 0x27904000 },
748 { _MMIO(0x9888), 0x29904000 },
749 { _MMIO(0x9888), 0x2b904000 },
750 { _MMIO(0x9888), 0x2d904000 },
751 { _MMIO(0x9888), 0x2f904000 },
752 { _MMIO(0x9888), 0x31904000 },
753 { _MMIO(0x9888), 0x15904000 },
754 { _MMIO(0x9888), 0x17904000 },
755 { _MMIO(0x9888), 0x19904000 },
756 { _MMIO(0x9888), 0x1b904000 },
757 { _MMIO(0x9888), 0x1d904000 },
758 { _MMIO(0x9888), 0x53900000 },
759 { _MMIO(0x9888), 0x43900420 },
760 { _MMIO(0x9888), 0x55900000 },
761 { _MMIO(0x9888), 0x47900000 },
762 { _MMIO(0x9888), 0x57900000 },
763 { _MMIO(0x9888), 0x49900000 },
764 { _MMIO(0x9888), 0x59900000 },
765 { _MMIO(0x9888), 0x4b900400 },
766 { _MMIO(0x9888), 0x37900000 },
767 { _MMIO(0x9888), 0x33900000 },
768 { _MMIO(0x9888), 0x4d900001 },
769 { _MMIO(0x9888), 0x45900001 },
770};
771
772static int
773get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
774 const struct i915_oa_reg **regs,
775 int *lens)
776{
777 int n = 0;
778
779 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
780 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
781
782 regs[n] = mux_config_compute_extended;
783 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
784 n++;
785
786 return n;
787}
788
789static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
790 { _MMIO(0x2710), 0x00000000 },
791 { _MMIO(0x2714), 0x30800000 },
792 { _MMIO(0x2720), 0x00000000 },
793 { _MMIO(0x2724), 0x30800000 },
794 { _MMIO(0x2740), 0x00000000 },
795 { _MMIO(0x2770), 0x0007fffa },
796 { _MMIO(0x2774), 0x0000fefe },
797 { _MMIO(0x2778), 0x0007fffa },
798 { _MMIO(0x277c), 0x0000fefd },
799 { _MMIO(0x2790), 0x0007fffa },
800 { _MMIO(0x2794), 0x0000fbef },
801 { _MMIO(0x2798), 0x0007fffa },
802 { _MMIO(0x279c), 0x0000fbdf },
803};
804
805static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
806 { _MMIO(0xe458), 0x00005004 },
807 { _MMIO(0xe558), 0x00000003 },
808 { _MMIO(0xe658), 0x00002001 },
809 { _MMIO(0xe758), 0x00101100 },
810 { _MMIO(0xe45c), 0x00201200 },
811 { _MMIO(0xe55c), 0x00301300 },
812 { _MMIO(0xe65c), 0x00401400 },
813};
814
815static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
816 { _MMIO(0x9888), 0x166c03b0 },
817 { _MMIO(0x9888), 0x1593001e },
818 { _MMIO(0x9888), 0x3f900c00 },
819 { _MMIO(0x9888), 0x41900000 },
820 { _MMIO(0x9888), 0x002d1000 },
821 { _MMIO(0x9888), 0x062d4000 },
822 { _MMIO(0x9888), 0x082d5000 },
823 { _MMIO(0x9888), 0x0e2d5000 },
824 { _MMIO(0x9888), 0x0c2e0400 },
825 { _MMIO(0x9888), 0x0e2e1500 },
826 { _MMIO(0x9888), 0x102e0140 },
827 { _MMIO(0x9888), 0x044c4000 },
828 { _MMIO(0x9888), 0x0a4c8000 },
829 { _MMIO(0x9888), 0x0c4cc000 },
830 { _MMIO(0x9888), 0x144c8000 },
831 { _MMIO(0x9888), 0x164c2000 },
832 { _MMIO(0x9888), 0x004e2000 },
833 { _MMIO(0x9888), 0x064e8000 },
834 { _MMIO(0x9888), 0x084ea000 },
835 { _MMIO(0x9888), 0x0e4ea000 },
836 { _MMIO(0x9888), 0x1a4f4001 },
837 { _MMIO(0x9888), 0x1c4f5005 },
838 { _MMIO(0x9888), 0x006c0051 },
839 { _MMIO(0x9888), 0x066c5000 },
840 { _MMIO(0x9888), 0x086c5c5d },
841 { _MMIO(0x9888), 0x0e6c5e5f },
842 { _MMIO(0x9888), 0x106c0000 },
843 { _MMIO(0x9888), 0x146c0000 },
844 { _MMIO(0x9888), 0x1a6c0000 },
845 { _MMIO(0x9888), 0x1c6c0000 },
846 { _MMIO(0x9888), 0x180f1000 },
847 { _MMIO(0x9888), 0x1a0fa800 },
848 { _MMIO(0x9888), 0x1c0f0a00 },
849 { _MMIO(0x9888), 0x182c4000 },
850 { _MMIO(0x9888), 0x1c2c4015 },
851 { _MMIO(0x9888), 0x1e2c0001 },
852 { _MMIO(0x9888), 0x03931980 },
853 { _MMIO(0x9888), 0x05930032 },
854 { _MMIO(0x9888), 0x11930000 },
855 { _MMIO(0x9888), 0x01938000 },
856 { _MMIO(0x9888), 0x0f938000 },
857 { _MMIO(0x9888), 0x1993a00a },
858 { _MMIO(0x9888), 0x07930000 },
859 { _MMIO(0x9888), 0x09930000 },
860 { _MMIO(0x9888), 0x1d900177 },
861 { _MMIO(0x9888), 0x1f900178 },
862 { _MMIO(0x9888), 0x35900000 },
863 { _MMIO(0x9888), 0x13904000 },
864 { _MMIO(0x9888), 0x21904000 },
865 { _MMIO(0x9888), 0x23904000 },
866 { _MMIO(0x9888), 0x25904000 },
867 { _MMIO(0x9888), 0x2f904000 },
868 { _MMIO(0x9888), 0x31904000 },
869 { _MMIO(0x9888), 0x19904000 },
870 { _MMIO(0x9888), 0x1b904000 },
871 { _MMIO(0x9888), 0x53901000 },
872 { _MMIO(0x9888), 0x43900000 },
873 { _MMIO(0x9888), 0x55900111 },
874 { _MMIO(0x9888), 0x47900001 },
875 { _MMIO(0x9888), 0x57900000 },
876 { _MMIO(0x9888), 0x49900000 },
877 { _MMIO(0x9888), 0x37900000 },
878 { _MMIO(0x9888), 0x33900000 },
879 { _MMIO(0x9888), 0x59900000 },
880 { _MMIO(0x9888), 0x4b900000 },
881 { _MMIO(0x9888), 0x4d900000 },
882 { _MMIO(0x9888), 0x45900400 },
883};
884
885static int
886get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
887 const struct i915_oa_reg **regs,
888 int *lens)
889{
890 int n = 0;
891
892 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
893 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
894
895 regs[n] = mux_config_compute_l3_cache;
896 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
897 n++;
898
899 return n;
900}
901
902static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
903 { _MMIO(0x2740), 0x00000000 },
904 { _MMIO(0x2744), 0x00800000 },
905 { _MMIO(0x2710), 0x00000000 },
906 { _MMIO(0x2714), 0x10800000 },
907 { _MMIO(0x2720), 0x00000000 },
908 { _MMIO(0x2724), 0x00800000 },
909 { _MMIO(0x2770), 0x00000002 },
910 { _MMIO(0x2774), 0x0000fdff },
911};
912
913static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
914 { _MMIO(0xe458), 0x00005004 },
915 { _MMIO(0xe558), 0x00010003 },
916 { _MMIO(0xe658), 0x00012011 },
917 { _MMIO(0xe758), 0x00015014 },
918 { _MMIO(0xe45c), 0x00051050 },
919 { _MMIO(0xe55c), 0x00053052 },
920 { _MMIO(0xe65c), 0x00055054 },
921};
922
923static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
924 { _MMIO(0x9888), 0x104f0232 },
925 { _MMIO(0x9888), 0x124f4640 },
926 { _MMIO(0x9888), 0x11834400 },
927 { _MMIO(0x9888), 0x022d4000 },
928 { _MMIO(0x9888), 0x042d5000 },
929 { _MMIO(0x9888), 0x062d1000 },
930 { _MMIO(0x9888), 0x0e2e0055 },
931 { _MMIO(0x9888), 0x064c8000 },
932 { _MMIO(0x9888), 0x084cc000 },
933 { _MMIO(0x9888), 0x0a4c4000 },
934 { _MMIO(0x9888), 0x024e8000 },
935 { _MMIO(0x9888), 0x044ea000 },
936 { _MMIO(0x9888), 0x064e2000 },
937 { _MMIO(0x9888), 0x024f6100 },
938 { _MMIO(0x9888), 0x044f416b },
939 { _MMIO(0x9888), 0x064f004b },
940 { _MMIO(0x9888), 0x1a4f0000 },
941 { _MMIO(0x9888), 0x1a0f02a8 },
942 { _MMIO(0x9888), 0x1a2c5500 },
943 { _MMIO(0x9888), 0x0f808000 },
944 { _MMIO(0x9888), 0x25810020 },
945 { _MMIO(0x9888), 0x0f8305c0 },
946 { _MMIO(0x9888), 0x07938000 },
947 { _MMIO(0x9888), 0x09938000 },
948 { _MMIO(0x9888), 0x0b938000 },
949 { _MMIO(0x9888), 0x0d938000 },
950 { _MMIO(0x9888), 0x1f951000 },
951 { _MMIO(0x9888), 0x13920200 },
952 { _MMIO(0x9888), 0x31908000 },
953 { _MMIO(0x9888), 0x19904000 },
954 { _MMIO(0x9888), 0x1b904000 },
955 { _MMIO(0x9888), 0x1d904000 },
956 { _MMIO(0x9888), 0x1f904000 },
957 { _MMIO(0x9888), 0x37900000 },
958 { _MMIO(0x9888), 0x59900000 },
959 { _MMIO(0x9888), 0x4d900003 },
960 { _MMIO(0x9888), 0x53900000 },
961 { _MMIO(0x9888), 0x45900000 },
962 { _MMIO(0x9888), 0x55900000 },
963 { _MMIO(0x9888), 0x47900000 },
964 { _MMIO(0x9888), 0x33900000 },
965};
966
967static int
968get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
969 const struct i915_oa_reg **regs,
970 int *lens)
971{
972 int n = 0;
973
974 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
975 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
976
977 regs[n] = mux_config_hdc_and_sf;
978 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
979 n++;
980
981 return n;
982}
983
984static const struct i915_oa_reg b_counter_config_l3_1[] = {
985 { _MMIO(0x2740), 0x00000000 },
986 { _MMIO(0x2744), 0x00800000 },
987 { _MMIO(0x2710), 0x00000000 },
988 { _MMIO(0x2714), 0xf0800000 },
989 { _MMIO(0x2720), 0x00000000 },
990 { _MMIO(0x2724), 0xf0800000 },
991 { _MMIO(0x2770), 0x00100070 },
992 { _MMIO(0x2774), 0x0000fff1 },
993 { _MMIO(0x2778), 0x00014002 },
994 { _MMIO(0x277c), 0x0000c3ff },
995 { _MMIO(0x2780), 0x00010002 },
996 { _MMIO(0x2784), 0x0000c7ff },
997 { _MMIO(0x2788), 0x00004002 },
998 { _MMIO(0x278c), 0x0000d3ff },
999 { _MMIO(0x2790), 0x00100700 },
1000 { _MMIO(0x2794), 0x0000ff1f },
1001 { _MMIO(0x2798), 0x00001402 },
1002 { _MMIO(0x279c), 0x0000fc3f },
1003 { _MMIO(0x27a0), 0x00001002 },
1004 { _MMIO(0x27a4), 0x0000fc7f },
1005 { _MMIO(0x27a8), 0x00000402 },
1006 { _MMIO(0x27ac), 0x0000fd3f },
1007};
1008
1009static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1010 { _MMIO(0xe458), 0x00005004 },
1011 { _MMIO(0xe558), 0x00010003 },
1012 { _MMIO(0xe658), 0x00012011 },
1013 { _MMIO(0xe758), 0x00015014 },
1014 { _MMIO(0xe45c), 0x00051050 },
1015 { _MMIO(0xe55c), 0x00053052 },
1016 { _MMIO(0xe65c), 0x00055054 },
1017};
1018
1019static const struct i915_oa_reg mux_config_l3_1[] = {
1020 { _MMIO(0x9888), 0x12643400 },
1021 { _MMIO(0x9888), 0x12653400 },
1022 { _MMIO(0x9888), 0x106c6800 },
1023 { _MMIO(0x9888), 0x126c001e },
1024 { _MMIO(0x9888), 0x166c0010 },
1025 { _MMIO(0x9888), 0x0c2d5000 },
1026 { _MMIO(0x9888), 0x0e2d5000 },
1027 { _MMIO(0x9888), 0x002d4000 },
1028 { _MMIO(0x9888), 0x022d5000 },
1029 { _MMIO(0x9888), 0x042d5000 },
1030 { _MMIO(0x9888), 0x062d1000 },
1031 { _MMIO(0x9888), 0x102e0154 },
1032 { _MMIO(0x9888), 0x0c2e5000 },
1033 { _MMIO(0x9888), 0x0e2e0055 },
1034 { _MMIO(0x9888), 0x104c8000 },
1035 { _MMIO(0x9888), 0x124c8000 },
1036 { _MMIO(0x9888), 0x144c8000 },
1037 { _MMIO(0x9888), 0x164c2000 },
1038 { _MMIO(0x9888), 0x044c8000 },
1039 { _MMIO(0x9888), 0x064cc000 },
1040 { _MMIO(0x9888), 0x084cc000 },
1041 { _MMIO(0x9888), 0x0a4c4000 },
1042 { _MMIO(0x9888), 0x0c4ea000 },
1043 { _MMIO(0x9888), 0x0e4ea000 },
1044 { _MMIO(0x9888), 0x004e8000 },
1045 { _MMIO(0x9888), 0x024ea000 },
1046 { _MMIO(0x9888), 0x044ea000 },
1047 { _MMIO(0x9888), 0x064e2000 },
1048 { _MMIO(0x9888), 0x1c4f5500 },
1049 { _MMIO(0x9888), 0x1a4f1554 },
1050 { _MMIO(0x9888), 0x0a640024 },
1051 { _MMIO(0x9888), 0x10640000 },
1052 { _MMIO(0x9888), 0x04640000 },
1053 { _MMIO(0x9888), 0x0c650024 },
1054 { _MMIO(0x9888), 0x10650000 },
1055 { _MMIO(0x9888), 0x06650000 },
1056 { _MMIO(0x9888), 0x0c6c5327 },
1057 { _MMIO(0x9888), 0x0e6c5425 },
1058 { _MMIO(0x9888), 0x006c2a00 },
1059 { _MMIO(0x9888), 0x026c285b },
1060 { _MMIO(0x9888), 0x046c005c },
1061 { _MMIO(0x9888), 0x1c6c0000 },
1062 { _MMIO(0x9888), 0x1a6c0900 },
1063 { _MMIO(0x9888), 0x1c0f0aa0 },
1064 { _MMIO(0x9888), 0x180f4000 },
1065 { _MMIO(0x9888), 0x1a0f02aa },
1066 { _MMIO(0x9888), 0x1c2c5400 },
1067 { _MMIO(0x9888), 0x1e2c0001 },
1068 { _MMIO(0x9888), 0x1a2c5550 },
1069 { _MMIO(0x9888), 0x1993aa00 },
1070 { _MMIO(0x9888), 0x03938000 },
1071 { _MMIO(0x9888), 0x05938000 },
1072 { _MMIO(0x9888), 0x07938000 },
1073 { _MMIO(0x9888), 0x09938000 },
1074 { _MMIO(0x9888), 0x0b938000 },
1075 { _MMIO(0x9888), 0x0d938000 },
1076 { _MMIO(0x9888), 0x2b904000 },
1077 { _MMIO(0x9888), 0x2d904000 },
1078 { _MMIO(0x9888), 0x2f904000 },
1079 { _MMIO(0x9888), 0x31904000 },
1080 { _MMIO(0x9888), 0x15904000 },
1081 { _MMIO(0x9888), 0x17904000 },
1082 { _MMIO(0x9888), 0x19904000 },
1083 { _MMIO(0x9888), 0x1b904000 },
1084 { _MMIO(0x9888), 0x1d904000 },
1085 { _MMIO(0x9888), 0x1f904000 },
1086 { _MMIO(0x9888), 0x59900000 },
1087 { _MMIO(0x9888), 0x4b900421 },
1088 { _MMIO(0x9888), 0x37900000 },
1089 { _MMIO(0x9888), 0x33900000 },
1090 { _MMIO(0x9888), 0x4d900001 },
1091 { _MMIO(0x9888), 0x53900000 },
1092 { _MMIO(0x9888), 0x43900420 },
1093 { _MMIO(0x9888), 0x45900021 },
1094 { _MMIO(0x9888), 0x55900000 },
1095 { _MMIO(0x9888), 0x47900000 },
1096};
1097
1098static int
1099get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1100 const struct i915_oa_reg **regs,
1101 int *lens)
1102{
1103 int n = 0;
1104
1105 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1106 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1107
1108 regs[n] = mux_config_l3_1;
1109 lens[n] = ARRAY_SIZE(mux_config_l3_1);
1110 n++;
1111
1112 return n;
1113}
1114
1115static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1116 { _MMIO(0x2740), 0x00000000 },
1117 { _MMIO(0x2744), 0x00800000 },
1118 { _MMIO(0x2710), 0x00000000 },
1119 { _MMIO(0x2714), 0x30800000 },
1120 { _MMIO(0x2720), 0x00000000 },
1121 { _MMIO(0x2724), 0x00800000 },
1122 { _MMIO(0x2770), 0x00000002 },
1123 { _MMIO(0x2774), 0x0000efff },
1124 { _MMIO(0x2778), 0x00006000 },
1125 { _MMIO(0x277c), 0x0000f3ff },
1126};
1127
1128static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1129 { _MMIO(0xe458), 0x00005004 },
1130 { _MMIO(0xe558), 0x00010003 },
1131 { _MMIO(0xe658), 0x00012011 },
1132 { _MMIO(0xe758), 0x00015014 },
1133 { _MMIO(0xe45c), 0x00051050 },
1134 { _MMIO(0xe55c), 0x00053052 },
1135 { _MMIO(0xe65c), 0x00055054 },
1136};
1137
1138static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1139 { _MMIO(0x9888), 0x102d7800 },
1140 { _MMIO(0x9888), 0x122d79e0 },
1141 { _MMIO(0x9888), 0x0c2f0004 },
1142 { _MMIO(0x9888), 0x100e3800 },
1143 { _MMIO(0x9888), 0x180f0005 },
1144 { _MMIO(0x9888), 0x002d0940 },
1145 { _MMIO(0x9888), 0x022d802f },
1146 { _MMIO(0x9888), 0x042d4013 },
1147 { _MMIO(0x9888), 0x062d1000 },
1148 { _MMIO(0x9888), 0x0e2e0050 },
1149 { _MMIO(0x9888), 0x022f0010 },
1150 { _MMIO(0x9888), 0x002f0000 },
1151 { _MMIO(0x9888), 0x084c8000 },
1152 { _MMIO(0x9888), 0x0a4c4000 },
1153 { _MMIO(0x9888), 0x044e8000 },
1154 { _MMIO(0x9888), 0x064e2000 },
1155 { _MMIO(0x9888), 0x040e0480 },
1156 { _MMIO(0x9888), 0x000e0000 },
1157 { _MMIO(0x9888), 0x060f0027 },
1158 { _MMIO(0x9888), 0x100f0000 },
1159 { _MMIO(0x9888), 0x1a0f0040 },
1160 { _MMIO(0x9888), 0x03938000 },
1161 { _MMIO(0x9888), 0x05938000 },
1162 { _MMIO(0x9888), 0x07938000 },
1163 { _MMIO(0x9888), 0x09938000 },
1164 { _MMIO(0x9888), 0x0b938000 },
1165 { _MMIO(0x9888), 0x0d938000 },
1166 { _MMIO(0x9888), 0x15904000 },
1167 { _MMIO(0x9888), 0x17904000 },
1168 { _MMIO(0x9888), 0x19904000 },
1169 { _MMIO(0x9888), 0x1b904000 },
1170 { _MMIO(0x9888), 0x1d904000 },
1171 { _MMIO(0x9888), 0x1f904000 },
1172 { _MMIO(0x9888), 0x37900000 },
1173 { _MMIO(0x9888), 0x53900000 },
1174 { _MMIO(0x9888), 0x439014a0 },
1175 { _MMIO(0x9888), 0x459000a4 },
1176 { _MMIO(0x9888), 0x55900000 },
1177 { _MMIO(0x9888), 0x47900001 },
1178 { _MMIO(0x9888), 0x33900000 },
1179};
1180
1181static int
1182get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1183 const struct i915_oa_reg **regs,
1184 int *lens)
1185{
1186 int n = 0;
1187
1188 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1189 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1190
1191 regs[n] = mux_config_rasterizer_and_pixel_backend;
1192 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1193 n++;
1194
1195 return n;
1196}
1197
1198static const struct i915_oa_reg b_counter_config_sampler[] = {
1199 { _MMIO(0x2740), 0x00000000 },
1200 { _MMIO(0x2744), 0x00800000 },
1201 { _MMIO(0x2710), 0x00000000 },
1202 { _MMIO(0x2714), 0x70800000 },
1203 { _MMIO(0x2720), 0x00000000 },
1204 { _MMIO(0x2724), 0x00800000 },
1205 { _MMIO(0x2770), 0x0000c000 },
1206 { _MMIO(0x2774), 0x0000e7ff },
1207 { _MMIO(0x2778), 0x00003000 },
1208 { _MMIO(0x277c), 0x0000f9ff },
1209 { _MMIO(0x2780), 0x00000c00 },
1210 { _MMIO(0x2784), 0x0000fe7f },
1211};
1212
1213static const struct i915_oa_reg flex_eu_config_sampler[] = {
1214 { _MMIO(0xe458), 0x00005004 },
1215 { _MMIO(0xe558), 0x00010003 },
1216 { _MMIO(0xe658), 0x00012011 },
1217 { _MMIO(0xe758), 0x00015014 },
1218 { _MMIO(0xe45c), 0x00051050 },
1219 { _MMIO(0xe55c), 0x00053052 },
1220 { _MMIO(0xe65c), 0x00055054 },
1221};
1222
1223static const struct i915_oa_reg mux_config_sampler[] = {
1224 { _MMIO(0x9888), 0x121300a0 },
1225 { _MMIO(0x9888), 0x141600ab },
1226 { _MMIO(0x9888), 0x123300a0 },
1227 { _MMIO(0x9888), 0x143600ab },
1228 { _MMIO(0x9888), 0x125300a0 },
1229 { _MMIO(0x9888), 0x145600ab },
1230 { _MMIO(0x9888), 0x0c2d4000 },
1231 { _MMIO(0x9888), 0x0e2d5000 },
1232 { _MMIO(0x9888), 0x002d4000 },
1233 { _MMIO(0x9888), 0x022d5000 },
1234 { _MMIO(0x9888), 0x042d5000 },
1235 { _MMIO(0x9888), 0x062d1000 },
1236 { _MMIO(0x9888), 0x102e01a0 },
1237 { _MMIO(0x9888), 0x0c2e5000 },
1238 { _MMIO(0x9888), 0x0e2e0065 },
1239 { _MMIO(0x9888), 0x164c2000 },
1240 { _MMIO(0x9888), 0x044c8000 },
1241 { _MMIO(0x9888), 0x064cc000 },
1242 { _MMIO(0x9888), 0x084c4000 },
1243 { _MMIO(0x9888), 0x0a4c4000 },
1244 { _MMIO(0x9888), 0x0e4e8000 },
1245 { _MMIO(0x9888), 0x004e8000 },
1246 { _MMIO(0x9888), 0x024ea000 },
1247 { _MMIO(0x9888), 0x044e2000 },
1248 { _MMIO(0x9888), 0x064e2000 },
1249 { _MMIO(0x9888), 0x1c0f0800 },
1250 { _MMIO(0x9888), 0x180f4000 },
1251 { _MMIO(0x9888), 0x1a0f023f },
1252 { _MMIO(0x9888), 0x1e2c0003 },
1253 { _MMIO(0x9888), 0x1a2cc030 },
1254 { _MMIO(0x9888), 0x04132180 },
1255 { _MMIO(0x9888), 0x02130000 },
1256 { _MMIO(0x9888), 0x0c148000 },
1257 { _MMIO(0x9888), 0x0e142000 },
1258 { _MMIO(0x9888), 0x04148000 },
1259 { _MMIO(0x9888), 0x1e150140 },
1260 { _MMIO(0x9888), 0x1c150040 },
1261 { _MMIO(0x9888), 0x0c163000 },
1262 { _MMIO(0x9888), 0x0e160068 },
1263 { _MMIO(0x9888), 0x10160000 },
1264 { _MMIO(0x9888), 0x18160000 },
1265 { _MMIO(0x9888), 0x0a164000 },
1266 { _MMIO(0x9888), 0x04330043 },
1267 { _MMIO(0x9888), 0x02330000 },
1268 { _MMIO(0x9888), 0x0234a000 },
1269 { _MMIO(0x9888), 0x04342000 },
1270 { _MMIO(0x9888), 0x1c350015 },
1271 { _MMIO(0x9888), 0x02363460 },
1272 { _MMIO(0x9888), 0x10360000 },
1273 { _MMIO(0x9888), 0x04360000 },
1274 { _MMIO(0x9888), 0x06360000 },
1275 { _MMIO(0x9888), 0x08364000 },
1276 { _MMIO(0x9888), 0x06530043 },
1277 { _MMIO(0x9888), 0x02530000 },
1278 { _MMIO(0x9888), 0x0e548000 },
1279 { _MMIO(0x9888), 0x00548000 },
1280 { _MMIO(0x9888), 0x06542000 },
1281 { _MMIO(0x9888), 0x1e550400 },
1282 { _MMIO(0x9888), 0x1a552000 },
1283 { _MMIO(0x9888), 0x1c550100 },
1284 { _MMIO(0x9888), 0x0e563000 },
1285 { _MMIO(0x9888), 0x00563400 },
1286 { _MMIO(0x9888), 0x10560000 },
1287 { _MMIO(0x9888), 0x18560000 },
1288 { _MMIO(0x9888), 0x02560000 },
1289 { _MMIO(0x9888), 0x0c564000 },
1290 { _MMIO(0x9888), 0x1993a800 },
1291 { _MMIO(0x9888), 0x03938000 },
1292 { _MMIO(0x9888), 0x05938000 },
1293 { _MMIO(0x9888), 0x07938000 },
1294 { _MMIO(0x9888), 0x09938000 },
1295 { _MMIO(0x9888), 0x0b938000 },
1296 { _MMIO(0x9888), 0x0d938000 },
1297 { _MMIO(0x9888), 0x2d904000 },
1298 { _MMIO(0x9888), 0x2f904000 },
1299 { _MMIO(0x9888), 0x31904000 },
1300 { _MMIO(0x9888), 0x15904000 },
1301 { _MMIO(0x9888), 0x17904000 },
1302 { _MMIO(0x9888), 0x19904000 },
1303 { _MMIO(0x9888), 0x1b904000 },
1304 { _MMIO(0x9888), 0x1d904000 },
1305 { _MMIO(0x9888), 0x1f904000 },
1306 { _MMIO(0x9888), 0x59900000 },
1307 { _MMIO(0x9888), 0x4b9014a0 },
1308 { _MMIO(0x9888), 0x37900000 },
1309 { _MMIO(0x9888), 0x33900000 },
1310 { _MMIO(0x9888), 0x4d900001 },
1311 { _MMIO(0x9888), 0x53900000 },
1312 { _MMIO(0x9888), 0x43900820 },
1313 { _MMIO(0x9888), 0x45901022 },
1314 { _MMIO(0x9888), 0x55900000 },
1315 { _MMIO(0x9888), 0x47900000 },
1316};
1317
1318static int
1319get_sampler_mux_config(struct drm_i915_private *dev_priv,
1320 const struct i915_oa_reg **regs,
1321 int *lens)
1322{
1323 int n = 0;
1324
1325 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1326 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1327
1328 regs[n] = mux_config_sampler;
1329 lens[n] = ARRAY_SIZE(mux_config_sampler);
1330 n++;
1331
1332 return n;
1333}
1334
1335static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1336 { _MMIO(0x2740), 0x00000000 },
1337 { _MMIO(0x2744), 0x00800000 },
1338 { _MMIO(0x2710), 0x00000000 },
1339 { _MMIO(0x2714), 0xf0800000 },
1340 { _MMIO(0x2720), 0x00000000 },
1341 { _MMIO(0x2724), 0x30800000 },
1342 { _MMIO(0x2770), 0x00000002 },
1343 { _MMIO(0x2774), 0x00007fff },
1344 { _MMIO(0x2778), 0x00000000 },
1345 { _MMIO(0x277c), 0x00009fff },
1346 { _MMIO(0x2780), 0x00000002 },
1347 { _MMIO(0x2784), 0x0000efff },
1348 { _MMIO(0x2788), 0x00000000 },
1349 { _MMIO(0x278c), 0x0000f3ff },
1350 { _MMIO(0x2790), 0x00000002 },
1351 { _MMIO(0x2794), 0x0000fdff },
1352 { _MMIO(0x2798), 0x00000000 },
1353 { _MMIO(0x279c), 0x0000fe7f },
1354};
1355
1356static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1357 { _MMIO(0xe458), 0x00005004 },
1358 { _MMIO(0xe558), 0x00010003 },
1359 { _MMIO(0xe658), 0x00012011 },
1360 { _MMIO(0xe758), 0x00015014 },
1361 { _MMIO(0xe45c), 0x00051050 },
1362 { _MMIO(0xe55c), 0x00053052 },
1363 { _MMIO(0xe65c), 0x00055054 },
1364};
1365
1366static const struct i915_oa_reg mux_config_tdl_1[] = {
1367 { _MMIO(0x9888), 0x141a0000 },
1368 { _MMIO(0x9888), 0x143a0000 },
1369 { _MMIO(0x9888), 0x145a0000 },
1370 { _MMIO(0x9888), 0x0c2d4000 },
1371 { _MMIO(0x9888), 0x0e2d5000 },
1372 { _MMIO(0x9888), 0x002d4000 },
1373 { _MMIO(0x9888), 0x022d5000 },
1374 { _MMIO(0x9888), 0x042d5000 },
1375 { _MMIO(0x9888), 0x062d1000 },
1376 { _MMIO(0x9888), 0x102e0150 },
1377 { _MMIO(0x9888), 0x0c2e5000 },
1378 { _MMIO(0x9888), 0x0e2e006a },
1379 { _MMIO(0x9888), 0x124c8000 },
1380 { _MMIO(0x9888), 0x144c8000 },
1381 { _MMIO(0x9888), 0x164c2000 },
1382 { _MMIO(0x9888), 0x044c8000 },
1383 { _MMIO(0x9888), 0x064c4000 },
1384 { _MMIO(0x9888), 0x0a4c4000 },
1385 { _MMIO(0x9888), 0x0c4e8000 },
1386 { _MMIO(0x9888), 0x0e4ea000 },
1387 { _MMIO(0x9888), 0x004e8000 },
1388 { _MMIO(0x9888), 0x024e2000 },
1389 { _MMIO(0x9888), 0x064e2000 },
1390 { _MMIO(0x9888), 0x1c0f0bc0 },
1391 { _MMIO(0x9888), 0x180f4000 },
1392 { _MMIO(0x9888), 0x1a0f0302 },
1393 { _MMIO(0x9888), 0x1e2c0003 },
1394 { _MMIO(0x9888), 0x1a2c00f0 },
1395 { _MMIO(0x9888), 0x021a3080 },
1396 { _MMIO(0x9888), 0x041a31e5 },
1397 { _MMIO(0x9888), 0x02148000 },
1398 { _MMIO(0x9888), 0x0414a000 },
1399 { _MMIO(0x9888), 0x1c150054 },
1400 { _MMIO(0x9888), 0x06168000 },
1401 { _MMIO(0x9888), 0x08168000 },
1402 { _MMIO(0x9888), 0x0a168000 },
1403 { _MMIO(0x9888), 0x0c3a3280 },
1404 { _MMIO(0x9888), 0x0e3a0063 },
1405 { _MMIO(0x9888), 0x063a0061 },
1406 { _MMIO(0x9888), 0x023a0000 },
1407 { _MMIO(0x9888), 0x0c348000 },
1408 { _MMIO(0x9888), 0x0e342000 },
1409 { _MMIO(0x9888), 0x06342000 },
1410 { _MMIO(0x9888), 0x1e350140 },
1411 { _MMIO(0x9888), 0x1c350100 },
1412 { _MMIO(0x9888), 0x18360028 },
1413 { _MMIO(0x9888), 0x0c368000 },
1414 { _MMIO(0x9888), 0x0e5a3080 },
1415 { _MMIO(0x9888), 0x005a3280 },
1416 { _MMIO(0x9888), 0x025a0063 },
1417 { _MMIO(0x9888), 0x0e548000 },
1418 { _MMIO(0x9888), 0x00548000 },
1419 { _MMIO(0x9888), 0x02542000 },
1420 { _MMIO(0x9888), 0x1e550400 },
1421 { _MMIO(0x9888), 0x1a552000 },
1422 { _MMIO(0x9888), 0x1c550001 },
1423 { _MMIO(0x9888), 0x18560080 },
1424 { _MMIO(0x9888), 0x02568000 },
1425 { _MMIO(0x9888), 0x04568000 },
1426 { _MMIO(0x9888), 0x1993a800 },
1427 { _MMIO(0x9888), 0x03938000 },
1428 { _MMIO(0x9888), 0x05938000 },
1429 { _MMIO(0x9888), 0x07938000 },
1430 { _MMIO(0x9888), 0x09938000 },
1431 { _MMIO(0x9888), 0x0b938000 },
1432 { _MMIO(0x9888), 0x0d938000 },
1433 { _MMIO(0x9888), 0x2d904000 },
1434 { _MMIO(0x9888), 0x2f904000 },
1435 { _MMIO(0x9888), 0x31904000 },
1436 { _MMIO(0x9888), 0x15904000 },
1437 { _MMIO(0x9888), 0x17904000 },
1438 { _MMIO(0x9888), 0x19904000 },
1439 { _MMIO(0x9888), 0x1b904000 },
1440 { _MMIO(0x9888), 0x1d904000 },
1441 { _MMIO(0x9888), 0x1f904000 },
1442 { _MMIO(0x9888), 0x59900000 },
1443 { _MMIO(0x9888), 0x4b900420 },
1444 { _MMIO(0x9888), 0x37900000 },
1445 { _MMIO(0x9888), 0x33900000 },
1446 { _MMIO(0x9888), 0x4d900000 },
1447 { _MMIO(0x9888), 0x53900000 },
1448 { _MMIO(0x9888), 0x43900000 },
1449 { _MMIO(0x9888), 0x45901084 },
1450 { _MMIO(0x9888), 0x55900000 },
1451 { _MMIO(0x9888), 0x47900001 },
1452};
1453
1454static int
1455get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1456 const struct i915_oa_reg **regs,
1457 int *lens)
1458{
1459 int n = 0;
1460
1461 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1462 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1463
1464 regs[n] = mux_config_tdl_1;
1465 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1466 n++;
1467
1468 return n;
1469}
1470
1471static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1472 { _MMIO(0x2740), 0x00000000 },
1473 { _MMIO(0x2744), 0x00800000 },
1474 { _MMIO(0x2710), 0x00000000 },
1475 { _MMIO(0x2714), 0x00800000 },
1476 { _MMIO(0x2720), 0x00000000 },
1477 { _MMIO(0x2724), 0x00800000 },
1478};
1479
1480static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1481 { _MMIO(0xe458), 0x00005004 },
1482 { _MMIO(0xe558), 0x00010003 },
1483 { _MMIO(0xe658), 0x00012011 },
1484 { _MMIO(0xe758), 0x00015014 },
1485 { _MMIO(0xe45c), 0x00051050 },
1486 { _MMIO(0xe55c), 0x00053052 },
1487 { _MMIO(0xe65c), 0x00055054 },
1488};
1489
1490static const struct i915_oa_reg mux_config_tdl_2[] = {
1491 { _MMIO(0x9888), 0x141a026b },
1492 { _MMIO(0x9888), 0x143a0173 },
1493 { _MMIO(0x9888), 0x145a026b },
1494 { _MMIO(0x9888), 0x002d4000 },
1495 { _MMIO(0x9888), 0x022d5000 },
1496 { _MMIO(0x9888), 0x042d5000 },
1497 { _MMIO(0x9888), 0x062d1000 },
1498 { _MMIO(0x9888), 0x0c2e5000 },
1499 { _MMIO(0x9888), 0x0e2e0069 },
1500 { _MMIO(0x9888), 0x044c8000 },
1501 { _MMIO(0x9888), 0x064cc000 },
1502 { _MMIO(0x9888), 0x0a4c4000 },
1503 { _MMIO(0x9888), 0x004e8000 },
1504 { _MMIO(0x9888), 0x024ea000 },
1505 { _MMIO(0x9888), 0x064e2000 },
1506 { _MMIO(0x9888), 0x180f6000 },
1507 { _MMIO(0x9888), 0x1a0f030a },
1508 { _MMIO(0x9888), 0x1a2c03c0 },
1509 { _MMIO(0x9888), 0x041a37e7 },
1510 { _MMIO(0x9888), 0x021a0000 },
1511 { _MMIO(0x9888), 0x0414a000 },
1512 { _MMIO(0x9888), 0x1c150050 },
1513 { _MMIO(0x9888), 0x08168000 },
1514 { _MMIO(0x9888), 0x0a168000 },
1515 { _MMIO(0x9888), 0x003a3380 },
1516 { _MMIO(0x9888), 0x063a006f },
1517 { _MMIO(0x9888), 0x023a0000 },
1518 { _MMIO(0x9888), 0x00348000 },
1519 { _MMIO(0x9888), 0x06342000 },
1520 { _MMIO(0x9888), 0x1a352000 },
1521 { _MMIO(0x9888), 0x1c350100 },
1522 { _MMIO(0x9888), 0x02368000 },
1523 { _MMIO(0x9888), 0x0c368000 },
1524 { _MMIO(0x9888), 0x025a37e7 },
1525 { _MMIO(0x9888), 0x0254a000 },
1526 { _MMIO(0x9888), 0x1c550005 },
1527 { _MMIO(0x9888), 0x04568000 },
1528 { _MMIO(0x9888), 0x06568000 },
1529 { _MMIO(0x9888), 0x03938000 },
1530 { _MMIO(0x9888), 0x05938000 },
1531 { _MMIO(0x9888), 0x07938000 },
1532 { _MMIO(0x9888), 0x09938000 },
1533 { _MMIO(0x9888), 0x0b938000 },
1534 { _MMIO(0x9888), 0x0d938000 },
1535 { _MMIO(0x9888), 0x15904000 },
1536 { _MMIO(0x9888), 0x17904000 },
1537 { _MMIO(0x9888), 0x19904000 },
1538 { _MMIO(0x9888), 0x1b904000 },
1539 { _MMIO(0x9888), 0x1d904000 },
1540 { _MMIO(0x9888), 0x1f904000 },
1541 { _MMIO(0x9888), 0x37900000 },
1542 { _MMIO(0x9888), 0x53900000 },
1543 { _MMIO(0x9888), 0x43900020 },
1544 { _MMIO(0x9888), 0x45901080 },
1545 { _MMIO(0x9888), 0x55900000 },
1546 { _MMIO(0x9888), 0x47900001 },
1547 { _MMIO(0x9888), 0x33900000 },
1548};
1549
1550static int
1551get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1552 const struct i915_oa_reg **regs,
1553 int *lens)
1554{
1555 int n = 0;
1556
1557 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1558 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1559
1560 regs[n] = mux_config_tdl_2;
1561 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1562 n++;
1563
1564 return n;
1565}
1566
1567static const struct i915_oa_reg b_counter_config_compute_extra[] = {
1568 { _MMIO(0x2740), 0x00000000 },
1569 { _MMIO(0x2744), 0x00800000 },
1570 { _MMIO(0x2710), 0x00000000 },
1571 { _MMIO(0x2714), 0x00800000 },
1572 { _MMIO(0x2720), 0x00000000 },
1573 { _MMIO(0x2724), 0x00800000 },
1574};
1575
1576static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
1577 { _MMIO(0xe458), 0x00001000 },
1578 { _MMIO(0xe558), 0x00003002 },
1579 { _MMIO(0xe658), 0x00005004 },
1580 { _MMIO(0xe758), 0x00011010 },
1581 { _MMIO(0xe45c), 0x00050012 },
1582 { _MMIO(0xe55c), 0x00052051 },
1583 { _MMIO(0xe65c), 0x00000008 },
1584};
1585
1586static const struct i915_oa_reg mux_config_compute_extra[] = {
1587 { _MMIO(0x9888), 0x141a001f },
1588 { _MMIO(0x9888), 0x143a001f },
1589 { _MMIO(0x9888), 0x145a001f },
1590 { _MMIO(0x9888), 0x042d5000 },
1591 { _MMIO(0x9888), 0x062d1000 },
1592 { _MMIO(0x9888), 0x0e2e0094 },
1593 { _MMIO(0x9888), 0x084cc000 },
1594 { _MMIO(0x9888), 0x044ea000 },
1595 { _MMIO(0x9888), 0x1a0f00e0 },
1596 { _MMIO(0x9888), 0x1a2c0c00 },
1597 { _MMIO(0x9888), 0x061a0063 },
1598 { _MMIO(0x9888), 0x021a0000 },
1599 { _MMIO(0x9888), 0x06142000 },
1600 { _MMIO(0x9888), 0x1c150100 },
1601 { _MMIO(0x9888), 0x0c168000 },
1602 { _MMIO(0x9888), 0x043a3180 },
1603 { _MMIO(0x9888), 0x023a0000 },
1604 { _MMIO(0x9888), 0x04348000 },
1605 { _MMIO(0x9888), 0x1c350040 },
1606 { _MMIO(0x9888), 0x0a368000 },
1607 { _MMIO(0x9888), 0x045a0063 },
1608 { _MMIO(0x9888), 0x025a0000 },
1609 { _MMIO(0x9888), 0x04542000 },
1610 { _MMIO(0x9888), 0x1c550010 },
1611 { _MMIO(0x9888), 0x08568000 },
1612 { _MMIO(0x9888), 0x09938000 },
1613 { _MMIO(0x9888), 0x0b938000 },
1614 { _MMIO(0x9888), 0x0d938000 },
1615 { _MMIO(0x9888), 0x1b904000 },
1616 { _MMIO(0x9888), 0x1d904000 },
1617 { _MMIO(0x9888), 0x1f904000 },
1618 { _MMIO(0x9888), 0x37900000 },
1619 { _MMIO(0x9888), 0x55900000 },
1620 { _MMIO(0x9888), 0x45900400 },
1621 { _MMIO(0x9888), 0x47900004 },
1622 { _MMIO(0x9888), 0x33900000 },
1623};
1624
1625static int
1626get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
1627 const struct i915_oa_reg **regs,
1628 int *lens)
1629{
1630 int n = 0;
1631
1632 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1633 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1634
1635 regs[n] = mux_config_compute_extra;
1636 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
1637 n++;
1638
1639 return n;
1640}
1641
1642static const struct i915_oa_reg b_counter_config_test_oa[] = {
1643 { _MMIO(0x2740), 0x00000000 },
1644 { _MMIO(0x2744), 0x00800000 },
1645 { _MMIO(0x2714), 0xf0800000 },
1646 { _MMIO(0x2710), 0x00000000 },
1647 { _MMIO(0x2724), 0xf0800000 },
1648 { _MMIO(0x2720), 0x00000000 },
1649 { _MMIO(0x2770), 0x00000004 },
1650 { _MMIO(0x2774), 0x00000000 },
1651 { _MMIO(0x2778), 0x00000003 },
1652 { _MMIO(0x277c), 0x00000000 },
1653 { _MMIO(0x2780), 0x00000007 },
1654 { _MMIO(0x2784), 0x00000000 },
1655 { _MMIO(0x2788), 0x00100002 },
1656 { _MMIO(0x278c), 0x0000fff7 },
1657 { _MMIO(0x2790), 0x00100002 },
1658 { _MMIO(0x2794), 0x0000ffcf },
1659 { _MMIO(0x2798), 0x00100082 },
1660 { _MMIO(0x279c), 0x0000ffef },
1661 { _MMIO(0x27a0), 0x001000c2 },
1662 { _MMIO(0x27a4), 0x0000ffe7 },
1663 { _MMIO(0x27a8), 0x00100001 },
1664 { _MMIO(0x27ac), 0x0000ffe7 },
1665};
1666
1667static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1668};
1669
1670static const struct i915_oa_reg mux_config_test_oa[] = {
1671 { _MMIO(0x9888), 0x19800000 },
1672 { _MMIO(0x9888), 0x07800063 },
1673 { _MMIO(0x9888), 0x11800000 },
1674 { _MMIO(0x9888), 0x23810008 },
1675 { _MMIO(0x9888), 0x1d950400 },
1676 { _MMIO(0x9888), 0x0f922000 },
1677 { _MMIO(0x9888), 0x1f908000 },
1678 { _MMIO(0x9888), 0x37900000 },
1679 { _MMIO(0x9888), 0x55900000 },
1680 { _MMIO(0x9888), 0x47900000 },
1681 { _MMIO(0x9888), 0x33900000 },
1682};
1683
1684static int
1685get_test_oa_mux_config(struct drm_i915_private *dev_priv,
1686 const struct i915_oa_reg **regs,
1687 int *lens)
1688{
1689 int n = 0;
1690
1691 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1692 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1693
1694 regs[n] = mux_config_test_oa;
1695 lens[n] = ARRAY_SIZE(mux_config_test_oa);
1696 n++;
1697
1698 return n;
1699}
1700
1701int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv)
1702{
1703 dev_priv->perf.oa.n_mux_configs = 0;
1704 dev_priv->perf.oa.b_counter_regs = NULL;
1705 dev_priv->perf.oa.b_counter_regs_len = 0;
1706 dev_priv->perf.oa.flex_regs = NULL;
1707 dev_priv->perf.oa.flex_regs_len = 0;
1708
1709 switch (dev_priv->perf.oa.metrics_set) {
1710 case METRIC_SET_ID_RENDER_BASIC:
1711 dev_priv->perf.oa.n_mux_configs =
1712 get_render_basic_mux_config(dev_priv,
1713 dev_priv->perf.oa.mux_regs,
1714 dev_priv->perf.oa.mux_regs_lens);
1715 if (dev_priv->perf.oa.n_mux_configs == 0) {
1716 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
1717
1718 /* EINVAL because *_register_sysfs already checked this
1719 * and so it wouldn't have been advertised to userspace and
1720 * so shouldn't have been requested
1721 */
1722 return -EINVAL;
1723 }
1724
1725 dev_priv->perf.oa.b_counter_regs =
1726 b_counter_config_render_basic;
1727 dev_priv->perf.oa.b_counter_regs_len =
1728 ARRAY_SIZE(b_counter_config_render_basic);
1729
1730 dev_priv->perf.oa.flex_regs =
1731 flex_eu_config_render_basic;
1732 dev_priv->perf.oa.flex_regs_len =
1733 ARRAY_SIZE(flex_eu_config_render_basic);
1734
1735 return 0;
1736 case METRIC_SET_ID_COMPUTE_BASIC:
1737 dev_priv->perf.oa.n_mux_configs =
1738 get_compute_basic_mux_config(dev_priv,
1739 dev_priv->perf.oa.mux_regs,
1740 dev_priv->perf.oa.mux_regs_lens);
1741 if (dev_priv->perf.oa.n_mux_configs == 0) {
1742 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
1743
1744 /* EINVAL because *_register_sysfs already checked this
1745 * and so it wouldn't have been advertised to userspace and
1746 * so shouldn't have been requested
1747 */
1748 return -EINVAL;
1749 }
1750
1751 dev_priv->perf.oa.b_counter_regs =
1752 b_counter_config_compute_basic;
1753 dev_priv->perf.oa.b_counter_regs_len =
1754 ARRAY_SIZE(b_counter_config_compute_basic);
1755
1756 dev_priv->perf.oa.flex_regs =
1757 flex_eu_config_compute_basic;
1758 dev_priv->perf.oa.flex_regs_len =
1759 ARRAY_SIZE(flex_eu_config_compute_basic);
1760
1761 return 0;
1762 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
1763 dev_priv->perf.oa.n_mux_configs =
1764 get_render_pipe_profile_mux_config(dev_priv,
1765 dev_priv->perf.oa.mux_regs,
1766 dev_priv->perf.oa.mux_regs_lens);
1767 if (dev_priv->perf.oa.n_mux_configs == 0) {
1768 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
1769
1770 /* EINVAL because *_register_sysfs already checked this
1771 * and so it wouldn't have been advertised to userspace and
1772 * so shouldn't have been requested
1773 */
1774 return -EINVAL;
1775 }
1776
1777 dev_priv->perf.oa.b_counter_regs =
1778 b_counter_config_render_pipe_profile;
1779 dev_priv->perf.oa.b_counter_regs_len =
1780 ARRAY_SIZE(b_counter_config_render_pipe_profile);
1781
1782 dev_priv->perf.oa.flex_regs =
1783 flex_eu_config_render_pipe_profile;
1784 dev_priv->perf.oa.flex_regs_len =
1785 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
1786
1787 return 0;
1788 case METRIC_SET_ID_MEMORY_READS:
1789 dev_priv->perf.oa.n_mux_configs =
1790 get_memory_reads_mux_config(dev_priv,
1791 dev_priv->perf.oa.mux_regs,
1792 dev_priv->perf.oa.mux_regs_lens);
1793 if (dev_priv->perf.oa.n_mux_configs == 0) {
1794 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
1795
1796 /* EINVAL because *_register_sysfs already checked this
1797 * and so it wouldn't have been advertised to userspace and
1798 * so shouldn't have been requested
1799 */
1800 return -EINVAL;
1801 }
1802
1803 dev_priv->perf.oa.b_counter_regs =
1804 b_counter_config_memory_reads;
1805 dev_priv->perf.oa.b_counter_regs_len =
1806 ARRAY_SIZE(b_counter_config_memory_reads);
1807
1808 dev_priv->perf.oa.flex_regs =
1809 flex_eu_config_memory_reads;
1810 dev_priv->perf.oa.flex_regs_len =
1811 ARRAY_SIZE(flex_eu_config_memory_reads);
1812
1813 return 0;
1814 case METRIC_SET_ID_MEMORY_WRITES:
1815 dev_priv->perf.oa.n_mux_configs =
1816 get_memory_writes_mux_config(dev_priv,
1817 dev_priv->perf.oa.mux_regs,
1818 dev_priv->perf.oa.mux_regs_lens);
1819 if (dev_priv->perf.oa.n_mux_configs == 0) {
1820 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
1821
1822 /* EINVAL because *_register_sysfs already checked this
1823 * and so it wouldn't have been advertised to userspace and
1824 * so shouldn't have been requested
1825 */
1826 return -EINVAL;
1827 }
1828
1829 dev_priv->perf.oa.b_counter_regs =
1830 b_counter_config_memory_writes;
1831 dev_priv->perf.oa.b_counter_regs_len =
1832 ARRAY_SIZE(b_counter_config_memory_writes);
1833
1834 dev_priv->perf.oa.flex_regs =
1835 flex_eu_config_memory_writes;
1836 dev_priv->perf.oa.flex_regs_len =
1837 ARRAY_SIZE(flex_eu_config_memory_writes);
1838
1839 return 0;
1840 case METRIC_SET_ID_COMPUTE_EXTENDED:
1841 dev_priv->perf.oa.n_mux_configs =
1842 get_compute_extended_mux_config(dev_priv,
1843 dev_priv->perf.oa.mux_regs,
1844 dev_priv->perf.oa.mux_regs_lens);
1845 if (dev_priv->perf.oa.n_mux_configs == 0) {
1846 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
1847
1848 /* EINVAL because *_register_sysfs already checked this
1849 * and so it wouldn't have been advertised to userspace and
1850 * so shouldn't have been requested
1851 */
1852 return -EINVAL;
1853 }
1854
1855 dev_priv->perf.oa.b_counter_regs =
1856 b_counter_config_compute_extended;
1857 dev_priv->perf.oa.b_counter_regs_len =
1858 ARRAY_SIZE(b_counter_config_compute_extended);
1859
1860 dev_priv->perf.oa.flex_regs =
1861 flex_eu_config_compute_extended;
1862 dev_priv->perf.oa.flex_regs_len =
1863 ARRAY_SIZE(flex_eu_config_compute_extended);
1864
1865 return 0;
1866 case METRIC_SET_ID_COMPUTE_L3_CACHE:
1867 dev_priv->perf.oa.n_mux_configs =
1868 get_compute_l3_cache_mux_config(dev_priv,
1869 dev_priv->perf.oa.mux_regs,
1870 dev_priv->perf.oa.mux_regs_lens);
1871 if (dev_priv->perf.oa.n_mux_configs == 0) {
1872 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
1873
1874 /* EINVAL because *_register_sysfs already checked this
1875 * and so it wouldn't have been advertised to userspace and
1876 * so shouldn't have been requested
1877 */
1878 return -EINVAL;
1879 }
1880
1881 dev_priv->perf.oa.b_counter_regs =
1882 b_counter_config_compute_l3_cache;
1883 dev_priv->perf.oa.b_counter_regs_len =
1884 ARRAY_SIZE(b_counter_config_compute_l3_cache);
1885
1886 dev_priv->perf.oa.flex_regs =
1887 flex_eu_config_compute_l3_cache;
1888 dev_priv->perf.oa.flex_regs_len =
1889 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
1890
1891 return 0;
1892 case METRIC_SET_ID_HDC_AND_SF:
1893 dev_priv->perf.oa.n_mux_configs =
1894 get_hdc_and_sf_mux_config(dev_priv,
1895 dev_priv->perf.oa.mux_regs,
1896 dev_priv->perf.oa.mux_regs_lens);
1897 if (dev_priv->perf.oa.n_mux_configs == 0) {
1898 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
1899
1900 /* EINVAL because *_register_sysfs already checked this
1901 * and so it wouldn't have been advertised to userspace and
1902 * so shouldn't have been requested
1903 */
1904 return -EINVAL;
1905 }
1906
1907 dev_priv->perf.oa.b_counter_regs =
1908 b_counter_config_hdc_and_sf;
1909 dev_priv->perf.oa.b_counter_regs_len =
1910 ARRAY_SIZE(b_counter_config_hdc_and_sf);
1911
1912 dev_priv->perf.oa.flex_regs =
1913 flex_eu_config_hdc_and_sf;
1914 dev_priv->perf.oa.flex_regs_len =
1915 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
1916
1917 return 0;
1918 case METRIC_SET_ID_L3_1:
1919 dev_priv->perf.oa.n_mux_configs =
1920 get_l3_1_mux_config(dev_priv,
1921 dev_priv->perf.oa.mux_regs,
1922 dev_priv->perf.oa.mux_regs_lens);
1923 if (dev_priv->perf.oa.n_mux_configs == 0) {
1924 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
1925
1926 /* EINVAL because *_register_sysfs already checked this
1927 * and so it wouldn't have been advertised to userspace and
1928 * so shouldn't have been requested
1929 */
1930 return -EINVAL;
1931 }
1932
1933 dev_priv->perf.oa.b_counter_regs =
1934 b_counter_config_l3_1;
1935 dev_priv->perf.oa.b_counter_regs_len =
1936 ARRAY_SIZE(b_counter_config_l3_1);
1937
1938 dev_priv->perf.oa.flex_regs =
1939 flex_eu_config_l3_1;
1940 dev_priv->perf.oa.flex_regs_len =
1941 ARRAY_SIZE(flex_eu_config_l3_1);
1942
1943 return 0;
1944 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
1945 dev_priv->perf.oa.n_mux_configs =
1946 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
1947 dev_priv->perf.oa.mux_regs,
1948 dev_priv->perf.oa.mux_regs_lens);
1949 if (dev_priv->perf.oa.n_mux_configs == 0) {
1950 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
1951
1952 /* EINVAL because *_register_sysfs already checked this
1953 * and so it wouldn't have been advertised to userspace and
1954 * so shouldn't have been requested
1955 */
1956 return -EINVAL;
1957 }
1958
1959 dev_priv->perf.oa.b_counter_regs =
1960 b_counter_config_rasterizer_and_pixel_backend;
1961 dev_priv->perf.oa.b_counter_regs_len =
1962 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
1963
1964 dev_priv->perf.oa.flex_regs =
1965 flex_eu_config_rasterizer_and_pixel_backend;
1966 dev_priv->perf.oa.flex_regs_len =
1967 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
1968
1969 return 0;
1970 case METRIC_SET_ID_SAMPLER:
1971 dev_priv->perf.oa.n_mux_configs =
1972 get_sampler_mux_config(dev_priv,
1973 dev_priv->perf.oa.mux_regs,
1974 dev_priv->perf.oa.mux_regs_lens);
1975 if (dev_priv->perf.oa.n_mux_configs == 0) {
1976 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
1977
1978 /* EINVAL because *_register_sysfs already checked this
1979 * and so it wouldn't have been advertised to userspace and
1980 * so shouldn't have been requested
1981 */
1982 return -EINVAL;
1983 }
1984
1985 dev_priv->perf.oa.b_counter_regs =
1986 b_counter_config_sampler;
1987 dev_priv->perf.oa.b_counter_regs_len =
1988 ARRAY_SIZE(b_counter_config_sampler);
1989
1990 dev_priv->perf.oa.flex_regs =
1991 flex_eu_config_sampler;
1992 dev_priv->perf.oa.flex_regs_len =
1993 ARRAY_SIZE(flex_eu_config_sampler);
1994
1995 return 0;
1996 case METRIC_SET_ID_TDL_1:
1997 dev_priv->perf.oa.n_mux_configs =
1998 get_tdl_1_mux_config(dev_priv,
1999 dev_priv->perf.oa.mux_regs,
2000 dev_priv->perf.oa.mux_regs_lens);
2001 if (dev_priv->perf.oa.n_mux_configs == 0) {
2002 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2003
2004 /* EINVAL because *_register_sysfs already checked this
2005 * and so it wouldn't have been advertised to userspace and
2006 * so shouldn't have been requested
2007 */
2008 return -EINVAL;
2009 }
2010
2011 dev_priv->perf.oa.b_counter_regs =
2012 b_counter_config_tdl_1;
2013 dev_priv->perf.oa.b_counter_regs_len =
2014 ARRAY_SIZE(b_counter_config_tdl_1);
2015
2016 dev_priv->perf.oa.flex_regs =
2017 flex_eu_config_tdl_1;
2018 dev_priv->perf.oa.flex_regs_len =
2019 ARRAY_SIZE(flex_eu_config_tdl_1);
2020
2021 return 0;
2022 case METRIC_SET_ID_TDL_2:
2023 dev_priv->perf.oa.n_mux_configs =
2024 get_tdl_2_mux_config(dev_priv,
2025 dev_priv->perf.oa.mux_regs,
2026 dev_priv->perf.oa.mux_regs_lens);
2027 if (dev_priv->perf.oa.n_mux_configs == 0) {
2028 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2029
2030 /* EINVAL because *_register_sysfs already checked this
2031 * and so it wouldn't have been advertised to userspace and
2032 * so shouldn't have been requested
2033 */
2034 return -EINVAL;
2035 }
2036
2037 dev_priv->perf.oa.b_counter_regs =
2038 b_counter_config_tdl_2;
2039 dev_priv->perf.oa.b_counter_regs_len =
2040 ARRAY_SIZE(b_counter_config_tdl_2);
2041
2042 dev_priv->perf.oa.flex_regs =
2043 flex_eu_config_tdl_2;
2044 dev_priv->perf.oa.flex_regs_len =
2045 ARRAY_SIZE(flex_eu_config_tdl_2);
2046
2047 return 0;
2048 case METRIC_SET_ID_COMPUTE_EXTRA:
2049 dev_priv->perf.oa.n_mux_configs =
2050 get_compute_extra_mux_config(dev_priv,
2051 dev_priv->perf.oa.mux_regs,
2052 dev_priv->perf.oa.mux_regs_lens);
2053 if (dev_priv->perf.oa.n_mux_configs == 0) {
2054 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2055
2056 /* EINVAL because *_register_sysfs already checked this
2057 * and so it wouldn't have been advertised to userspace and
2058 * so shouldn't have been requested
2059 */
2060 return -EINVAL;
2061 }
2062
2063 dev_priv->perf.oa.b_counter_regs =
2064 b_counter_config_compute_extra;
2065 dev_priv->perf.oa.b_counter_regs_len =
2066 ARRAY_SIZE(b_counter_config_compute_extra);
2067
2068 dev_priv->perf.oa.flex_regs =
2069 flex_eu_config_compute_extra;
2070 dev_priv->perf.oa.flex_regs_len =
2071 ARRAY_SIZE(flex_eu_config_compute_extra);
2072
2073 return 0;
2074 case METRIC_SET_ID_TEST_OA:
2075 dev_priv->perf.oa.n_mux_configs =
2076 get_test_oa_mux_config(dev_priv,
2077 dev_priv->perf.oa.mux_regs,
2078 dev_priv->perf.oa.mux_regs_lens);
2079 if (dev_priv->perf.oa.n_mux_configs == 0) {
2080 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2081
2082 /* EINVAL because *_register_sysfs already checked this
2083 * and so it wouldn't have been advertised to userspace and
2084 * so shouldn't have been requested
2085 */
2086 return -EINVAL;
2087 }
2088
2089 dev_priv->perf.oa.b_counter_regs =
2090 b_counter_config_test_oa;
2091 dev_priv->perf.oa.b_counter_regs_len =
2092 ARRAY_SIZE(b_counter_config_test_oa);
2093
2094 dev_priv->perf.oa.flex_regs =
2095 flex_eu_config_test_oa;
2096 dev_priv->perf.oa.flex_regs_len =
2097 ARRAY_SIZE(flex_eu_config_test_oa);
2098
2099 return 0;
2100 default:
2101 return -ENODEV;
2102 }
2103}
2104
2105static ssize_t
2106show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2107{
2108 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2109}
2110
2111static struct device_attribute dev_attr_render_basic_id = {
2112 .attr = { .name = "id", .mode = 0444 },
2113 .show = show_render_basic_id,
2114 .store = NULL,
2115};
2116
2117static struct attribute *attrs_render_basic[] = {
2118 &dev_attr_render_basic_id.attr,
2119 NULL,
2120};
2121
2122static struct attribute_group group_render_basic = {
2123 .name = "d72df5c7-5b4a-4274-a43f-00b0fd51fc68",
2124 .attrs = attrs_render_basic,
2125};
2126
2127static ssize_t
2128show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2129{
2130 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2131}
2132
2133static struct device_attribute dev_attr_compute_basic_id = {
2134 .attr = { .name = "id", .mode = 0444 },
2135 .show = show_compute_basic_id,
2136 .store = NULL,
2137};
2138
2139static struct attribute *attrs_compute_basic[] = {
2140 &dev_attr_compute_basic_id.attr,
2141 NULL,
2142};
2143
2144static struct attribute_group group_compute_basic = {
2145 .name = "814285f6-354d-41d2-ba49-e24e622714a0",
2146 .attrs = attrs_compute_basic,
2147};
2148
2149static ssize_t
2150show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2151{
2152 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2153}
2154
2155static struct device_attribute dev_attr_render_pipe_profile_id = {
2156 .attr = { .name = "id", .mode = 0444 },
2157 .show = show_render_pipe_profile_id,
2158 .store = NULL,
2159};
2160
2161static struct attribute *attrs_render_pipe_profile[] = {
2162 &dev_attr_render_pipe_profile_id.attr,
2163 NULL,
2164};
2165
2166static struct attribute_group group_render_pipe_profile = {
2167 .name = "07d397a6-b3e6-49f6-9433-a4f293d55978",
2168 .attrs = attrs_render_pipe_profile,
2169};
2170
2171static ssize_t
2172show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2173{
2174 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2175}
2176
2177static struct device_attribute dev_attr_memory_reads_id = {
2178 .attr = { .name = "id", .mode = 0444 },
2179 .show = show_memory_reads_id,
2180 .store = NULL,
2181};
2182
2183static struct attribute *attrs_memory_reads[] = {
2184 &dev_attr_memory_reads_id.attr,
2185 NULL,
2186};
2187
2188static struct attribute_group group_memory_reads = {
2189 .name = "1a356946-5428-450b-a2f0-89f8783a302d",
2190 .attrs = attrs_memory_reads,
2191};
2192
2193static ssize_t
2194show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2195{
2196 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2197}
2198
2199static struct device_attribute dev_attr_memory_writes_id = {
2200 .attr = { .name = "id", .mode = 0444 },
2201 .show = show_memory_writes_id,
2202 .store = NULL,
2203};
2204
2205static struct attribute *attrs_memory_writes[] = {
2206 &dev_attr_memory_writes_id.attr,
2207 NULL,
2208};
2209
2210static struct attribute_group group_memory_writes = {
2211 .name = "5299be9d-7a61-4c99-9f81-f87e6c5aaca9",
2212 .attrs = attrs_memory_writes,
2213};
2214
2215static ssize_t
2216show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2217{
2218 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
2219}
2220
2221static struct device_attribute dev_attr_compute_extended_id = {
2222 .attr = { .name = "id", .mode = 0444 },
2223 .show = show_compute_extended_id,
2224 .store = NULL,
2225};
2226
2227static struct attribute *attrs_compute_extended[] = {
2228 &dev_attr_compute_extended_id.attr,
2229 NULL,
2230};
2231
2232static struct attribute_group group_compute_extended = {
2233 .name = "bc9bcff2-459a-4cbc-986d-a84b077153f3",
2234 .attrs = attrs_compute_extended,
2235};
2236
2237static ssize_t
2238show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
2239{
2240 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
2241}
2242
2243static struct device_attribute dev_attr_compute_l3_cache_id = {
2244 .attr = { .name = "id", .mode = 0444 },
2245 .show = show_compute_l3_cache_id,
2246 .store = NULL,
2247};
2248
2249static struct attribute *attrs_compute_l3_cache[] = {
2250 &dev_attr_compute_l3_cache_id.attr,
2251 NULL,
2252};
2253
2254static struct attribute_group group_compute_l3_cache = {
2255 .name = "88ec931f-5b4a-453a-9db6-a61232b6143d",
2256 .attrs = attrs_compute_l3_cache,
2257};
2258
2259static ssize_t
2260show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2261{
2262 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2263}
2264
2265static struct device_attribute dev_attr_hdc_and_sf_id = {
2266 .attr = { .name = "id", .mode = 0444 },
2267 .show = show_hdc_and_sf_id,
2268 .store = NULL,
2269};
2270
2271static struct attribute *attrs_hdc_and_sf[] = {
2272 &dev_attr_hdc_and_sf_id.attr,
2273 NULL,
2274};
2275
2276static struct attribute_group group_hdc_and_sf = {
2277 .name = "530d176d-2a18-4014-adf8-1500c6c60835",
2278 .attrs = attrs_hdc_and_sf,
2279};
2280
2281static ssize_t
2282show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2283{
2284 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2285}
2286
2287static struct device_attribute dev_attr_l3_1_id = {
2288 .attr = { .name = "id", .mode = 0444 },
2289 .show = show_l3_1_id,
2290 .store = NULL,
2291};
2292
2293static struct attribute *attrs_l3_1[] = {
2294 &dev_attr_l3_1_id.attr,
2295 NULL,
2296};
2297
2298static struct attribute_group group_l3_1 = {
2299 .name = "fdee5a5a-f23c-43d1-aa73-f6257c71671d",
2300 .attrs = attrs_l3_1,
2301};
2302
2303static ssize_t
2304show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2305{
2306 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2307}
2308
2309static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2310 .attr = { .name = "id", .mode = 0444 },
2311 .show = show_rasterizer_and_pixel_backend_id,
2312 .store = NULL,
2313};
2314
2315static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2316 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2317 NULL,
2318};
2319
2320static struct attribute_group group_rasterizer_and_pixel_backend = {
2321 .name = "6617623e-ca73-4791-b2b7-ddedd0846a0c",
2322 .attrs = attrs_rasterizer_and_pixel_backend,
2323};
2324
2325static ssize_t
2326show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
2327{
2328 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
2329}
2330
2331static struct device_attribute dev_attr_sampler_id = {
2332 .attr = { .name = "id", .mode = 0444 },
2333 .show = show_sampler_id,
2334 .store = NULL,
2335};
2336
2337static struct attribute *attrs_sampler[] = {
2338 &dev_attr_sampler_id.attr,
2339 NULL,
2340};
2341
2342static struct attribute_group group_sampler = {
2343 .name = "f3b2ea63-e82e-4234-b418-44dd20dd34d0",
2344 .attrs = attrs_sampler,
2345};
2346
2347static ssize_t
2348show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2349{
2350 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2351}
2352
2353static struct device_attribute dev_attr_tdl_1_id = {
2354 .attr = { .name = "id", .mode = 0444 },
2355 .show = show_tdl_1_id,
2356 .store = NULL,
2357};
2358
2359static struct attribute *attrs_tdl_1[] = {
2360 &dev_attr_tdl_1_id.attr,
2361 NULL,
2362};
2363
2364static struct attribute_group group_tdl_1 = {
2365 .name = "14411d35-cbf6-4f5e-b68b-190faf9a1a83",
2366 .attrs = attrs_tdl_1,
2367};
2368
2369static ssize_t
2370show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2371{
2372 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2373}
2374
2375static struct device_attribute dev_attr_tdl_2_id = {
2376 .attr = { .name = "id", .mode = 0444 },
2377 .show = show_tdl_2_id,
2378 .store = NULL,
2379};
2380
2381static struct attribute *attrs_tdl_2[] = {
2382 &dev_attr_tdl_2_id.attr,
2383 NULL,
2384};
2385
2386static struct attribute_group group_tdl_2 = {
2387 .name = "ffa3f263-0478-4724-8c9f-c911c5ec0f1d",
2388 .attrs = attrs_tdl_2,
2389};
2390
2391static ssize_t
2392show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
2393{
2394 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
2395}
2396
2397static struct device_attribute dev_attr_compute_extra_id = {
2398 .attr = { .name = "id", .mode = 0444 },
2399 .show = show_compute_extra_id,
2400 .store = NULL,
2401};
2402
2403static struct attribute *attrs_compute_extra[] = {
2404 &dev_attr_compute_extra_id.attr,
2405 NULL,
2406};
2407
2408static struct attribute_group group_compute_extra = {
2409 .name = "15274c82-27d2-4819-876a-7cb1a2c59ba4",
2410 .attrs = attrs_compute_extra,
2411};
2412
2413static ssize_t
2414show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2415{
2416 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2417}
2418
2419static struct device_attribute dev_attr_test_oa_id = {
2420 .attr = { .name = "id", .mode = 0444 },
2421 .show = show_test_oa_id,
2422 .store = NULL,
2423};
2424
2425static struct attribute *attrs_test_oa[] = {
2426 &dev_attr_test_oa_id.attr,
2427 NULL,
2428};
2429
2430static struct attribute_group group_test_oa = {
2431 .name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
2432 .attrs = attrs_test_oa,
2433};
2434
2435int
2436i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv)
2437{
2438 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2439 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2440 int ret = 0;
2441
2442 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2443 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2444 if (ret)
2445 goto error_render_basic;
2446 }
2447 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2448 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2449 if (ret)
2450 goto error_compute_basic;
2451 }
2452 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2453 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2454 if (ret)
2455 goto error_render_pipe_profile;
2456 }
2457 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
2458 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2459 if (ret)
2460 goto error_memory_reads;
2461 }
2462 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
2463 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2464 if (ret)
2465 goto error_memory_writes;
2466 }
2467 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
2468 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2469 if (ret)
2470 goto error_compute_extended;
2471 }
2472 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
2473 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2474 if (ret)
2475 goto error_compute_l3_cache;
2476 }
2477 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2478 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2479 if (ret)
2480 goto error_hdc_and_sf;
2481 }
2482 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2483 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2484 if (ret)
2485 goto error_l3_1;
2486 }
2487 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2488 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2489 if (ret)
2490 goto error_rasterizer_and_pixel_backend;
2491 }
2492 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
2493 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
2494 if (ret)
2495 goto error_sampler;
2496 }
2497 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2498 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2499 if (ret)
2500 goto error_tdl_1;
2501 }
2502 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2503 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2504 if (ret)
2505 goto error_tdl_2;
2506 }
2507 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
2508 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2509 if (ret)
2510 goto error_compute_extra;
2511 }
2512 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2513 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2514 if (ret)
2515 goto error_test_oa;
2516 }
2517
2518 return 0;
2519
2520error_test_oa:
2521 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2522 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2523error_compute_extra:
2524 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2525 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2526error_tdl_2:
2527 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2528 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2529error_tdl_1:
2530 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2531 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2532error_sampler:
2533 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2534 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2535error_rasterizer_and_pixel_backend:
2536 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2537 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2538error_l3_1:
2539 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2540 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2541error_hdc_and_sf:
2542 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2543 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2544error_compute_l3_cache:
2545 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2546 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2547error_compute_extended:
2548 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2549 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2550error_memory_writes:
2551 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2552 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2553error_memory_reads:
2554 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2555 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2556error_render_pipe_profile:
2557 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2558 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2559error_compute_basic:
2560 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2561 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2562error_render_basic:
2563 return ret;
2564}
2565
2566void
2567i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv)
2568{
2569 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2570 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2571
2572 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2573 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2574 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2575 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2576 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2577 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2578 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2579 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2580 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2581 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2582 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2583 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2584 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2585 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2586 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2587 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2588 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2589 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2590 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2591 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2592 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2593 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2594 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2595 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2596 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2597 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2598 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2599 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2600 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
2601 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2602}
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
new file mode 100644
index 000000000000..5511bb1cecf7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_GLK_H__
30#define __I915_OA_GLK_H__
31
32extern int i915_oa_n_builtin_metric_sets_glk;
33
34extern int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 4ddf756add31..10f169f683b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Autogenerated file, DO NOT EDIT manually! 2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
3 * 5 *
4 * Copyright (c) 2015 Intel Corporation 6 * Copyright (c) 2015 Intel Corporation
5 * 7 *
@@ -47,6 +49,9 @@ static const struct i915_oa_reg b_counter_config_render_basic[] = {
47 { _MMIO(0x2710), 0x00000000 }, 49 { _MMIO(0x2710), 0x00000000 },
48}; 50};
49 51
52static const struct i915_oa_reg flex_eu_config_render_basic[] = {
53};
54
50static const struct i915_oa_reg mux_config_render_basic[] = { 55static const struct i915_oa_reg mux_config_render_basic[] = {
51 { _MMIO(0x253a4), 0x01600000 }, 56 { _MMIO(0x253a4), 0x01600000 },
52 { _MMIO(0x25440), 0x00100000 }, 57 { _MMIO(0x25440), 0x00100000 },
@@ -109,12 +114,21 @@ static const struct i915_oa_reg mux_config_render_basic[] = {
109 { _MMIO(0x25428), 0x00042049 }, 114 { _MMIO(0x25428), 0x00042049 },
110}; 115};
111 116
112static const struct i915_oa_reg * 117static int
113get_render_basic_mux_config(struct drm_i915_private *dev_priv, 118get_render_basic_mux_config(struct drm_i915_private *dev_priv,
114 int *len) 119 const struct i915_oa_reg **regs,
120 int *lens)
115{ 121{
116 *len = ARRAY_SIZE(mux_config_render_basic); 122 int n = 0;
117 return mux_config_render_basic; 123
124 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
125 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
126
127 regs[n] = mux_config_render_basic;
128 lens[n] = ARRAY_SIZE(mux_config_render_basic);
129 n++;
130
131 return n;
118} 132}
119 133
120static const struct i915_oa_reg b_counter_config_compute_basic[] = { 134static const struct i915_oa_reg b_counter_config_compute_basic[] = {
@@ -137,6 +151,9 @@ static const struct i915_oa_reg b_counter_config_compute_basic[] = {
137 { _MMIO(0x236c), 0x00000000 }, 151 { _MMIO(0x236c), 0x00000000 },
138}; 152};
139 153
154static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
155};
156
140static const struct i915_oa_reg mux_config_compute_basic[] = { 157static const struct i915_oa_reg mux_config_compute_basic[] = {
141 { _MMIO(0x253a4), 0x00000000 }, 158 { _MMIO(0x253a4), 0x00000000 },
142 { _MMIO(0x2681c), 0x01f00800 }, 159 { _MMIO(0x2681c), 0x01f00800 },
@@ -172,12 +189,21 @@ static const struct i915_oa_reg mux_config_compute_basic[] = {
172 { _MMIO(0x25428), 0x00000c03 }, 189 { _MMIO(0x25428), 0x00000c03 },
173}; 190};
174 191
175static const struct i915_oa_reg * 192static int
176get_compute_basic_mux_config(struct drm_i915_private *dev_priv, 193get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
177 int *len) 194 const struct i915_oa_reg **regs,
195 int *lens)
178{ 196{
179 *len = ARRAY_SIZE(mux_config_compute_basic); 197 int n = 0;
180 return mux_config_compute_basic; 198
199 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
200 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
201
202 regs[n] = mux_config_compute_basic;
203 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
204 n++;
205
206 return n;
181} 207}
182 208
183static const struct i915_oa_reg b_counter_config_compute_extended[] = { 209static const struct i915_oa_reg b_counter_config_compute_extended[] = {
@@ -203,6 +229,9 @@ static const struct i915_oa_reg b_counter_config_compute_extended[] = {
203 { _MMIO(0x27ac), 0x0000fffe }, 229 { _MMIO(0x27ac), 0x0000fffe },
204}; 230};
205 231
232static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
233};
234
206static const struct i915_oa_reg mux_config_compute_extended[] = { 235static const struct i915_oa_reg mux_config_compute_extended[] = {
207 { _MMIO(0x2681c), 0x3eb00800 }, 236 { _MMIO(0x2681c), 0x3eb00800 },
208 { _MMIO(0x26820), 0x00900000 }, 237 { _MMIO(0x26820), 0x00900000 },
@@ -221,12 +250,21 @@ static const struct i915_oa_reg mux_config_compute_extended[] = {
221 { _MMIO(0x25428), 0x00000000 }, 250 { _MMIO(0x25428), 0x00000000 },
222}; 251};
223 252
224static const struct i915_oa_reg * 253static int
225get_compute_extended_mux_config(struct drm_i915_private *dev_priv, 254get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
226 int *len) 255 const struct i915_oa_reg **regs,
256 int *lens)
227{ 257{
228 *len = ARRAY_SIZE(mux_config_compute_extended); 258 int n = 0;
229 return mux_config_compute_extended; 259
260 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
261 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
262
263 regs[n] = mux_config_compute_extended;
264 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
265 n++;
266
267 return n;
230} 268}
231 269
232static const struct i915_oa_reg b_counter_config_memory_reads[] = { 270static const struct i915_oa_reg b_counter_config_memory_reads[] = {
@@ -260,6 +298,9 @@ static const struct i915_oa_reg b_counter_config_memory_reads[] = {
260 { _MMIO(0x27ac), 0x0000fc00 }, 298 { _MMIO(0x27ac), 0x0000fc00 },
261}; 299};
262 300
301static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
302};
303
263static const struct i915_oa_reg mux_config_memory_reads[] = { 304static const struct i915_oa_reg mux_config_memory_reads[] = {
264 { _MMIO(0x253a4), 0x34300000 }, 305 { _MMIO(0x253a4), 0x34300000 },
265 { _MMIO(0x25440), 0x2d800000 }, 306 { _MMIO(0x25440), 0x2d800000 },
@@ -281,12 +322,21 @@ static const struct i915_oa_reg mux_config_memory_reads[] = {
281 { _MMIO(0x25428), 0x00000000 }, 322 { _MMIO(0x25428), 0x00000000 },
282}; 323};
283 324
284static const struct i915_oa_reg * 325static int
285get_memory_reads_mux_config(struct drm_i915_private *dev_priv, 326get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
286 int *len) 327 const struct i915_oa_reg **regs,
328 int *lens)
287{ 329{
288 *len = ARRAY_SIZE(mux_config_memory_reads); 330 int n = 0;
289 return mux_config_memory_reads; 331
332 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
333 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
334
335 regs[n] = mux_config_memory_reads;
336 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
337 n++;
338
339 return n;
290} 340}
291 341
292static const struct i915_oa_reg b_counter_config_memory_writes[] = { 342static const struct i915_oa_reg b_counter_config_memory_writes[] = {
@@ -320,6 +370,9 @@ static const struct i915_oa_reg b_counter_config_memory_writes[] = {
320 { _MMIO(0x27ac), 0x0000fc00 }, 370 { _MMIO(0x27ac), 0x0000fc00 },
321}; 371};
322 372
373static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
374};
375
323static const struct i915_oa_reg mux_config_memory_writes[] = { 376static const struct i915_oa_reg mux_config_memory_writes[] = {
324 { _MMIO(0x253a4), 0x34300000 }, 377 { _MMIO(0x253a4), 0x34300000 },
325 { _MMIO(0x25440), 0x01500000 }, 378 { _MMIO(0x25440), 0x01500000 },
@@ -341,12 +394,21 @@ static const struct i915_oa_reg mux_config_memory_writes[] = {
341 { _MMIO(0x25428), 0x00000000 }, 394 { _MMIO(0x25428), 0x00000000 },
342}; 395};
343 396
344static const struct i915_oa_reg * 397static int
345get_memory_writes_mux_config(struct drm_i915_private *dev_priv, 398get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
346 int *len) 399 const struct i915_oa_reg **regs,
400 int *lens)
347{ 401{
348 *len = ARRAY_SIZE(mux_config_memory_writes); 402 int n = 0;
349 return mux_config_memory_writes; 403
404 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
405 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
406
407 regs[n] = mux_config_memory_writes;
408 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
409 n++;
410
411 return n;
350} 412}
351 413
352static const struct i915_oa_reg b_counter_config_sampler_balance[] = { 414static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
@@ -358,6 +420,9 @@ static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
358 { _MMIO(0x2724), 0x00800000 }, 420 { _MMIO(0x2724), 0x00800000 },
359}; 421};
360 422
423static const struct i915_oa_reg flex_eu_config_sampler_balance[] = {
424};
425
361static const struct i915_oa_reg mux_config_sampler_balance[] = { 426static const struct i915_oa_reg mux_config_sampler_balance[] = {
362 { _MMIO(0x2eb9c), 0x01906400 }, 427 { _MMIO(0x2eb9c), 0x01906400 },
363 { _MMIO(0x2fb9c), 0x01906400 }, 428 { _MMIO(0x2fb9c), 0x01906400 },
@@ -401,31 +466,40 @@ static const struct i915_oa_reg mux_config_sampler_balance[] = {
401 { _MMIO(0x25428), 0x0004a54a }, 466 { _MMIO(0x25428), 0x0004a54a },
402}; 467};
403 468
404static const struct i915_oa_reg * 469static int
405get_sampler_balance_mux_config(struct drm_i915_private *dev_priv, 470get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
406 int *len) 471 const struct i915_oa_reg **regs,
472 int *lens)
407{ 473{
408 *len = ARRAY_SIZE(mux_config_sampler_balance); 474 int n = 0;
409 return mux_config_sampler_balance; 475
476 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
477 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
478
479 regs[n] = mux_config_sampler_balance;
480 lens[n] = ARRAY_SIZE(mux_config_sampler_balance);
481 n++;
482
483 return n;
410} 484}
411 485
412int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv) 486int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
413{ 487{
414 dev_priv->perf.oa.mux_regs = NULL; 488 dev_priv->perf.oa.n_mux_configs = 0;
415 dev_priv->perf.oa.mux_regs_len = 0;
416 dev_priv->perf.oa.b_counter_regs = NULL; 489 dev_priv->perf.oa.b_counter_regs = NULL;
417 dev_priv->perf.oa.b_counter_regs_len = 0; 490 dev_priv->perf.oa.b_counter_regs_len = 0;
418 491
419 switch (dev_priv->perf.oa.metrics_set) { 492 switch (dev_priv->perf.oa.metrics_set) {
420 case METRIC_SET_ID_RENDER_BASIC: 493 case METRIC_SET_ID_RENDER_BASIC:
421 dev_priv->perf.oa.mux_regs = 494 dev_priv->perf.oa.n_mux_configs =
422 get_render_basic_mux_config(dev_priv, 495 get_render_basic_mux_config(dev_priv,
423 &dev_priv->perf.oa.mux_regs_len); 496 dev_priv->perf.oa.mux_regs,
424 if (!dev_priv->perf.oa.mux_regs) { 497 dev_priv->perf.oa.mux_regs_lens);
425 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set"); 498 if (dev_priv->perf.oa.n_mux_configs == 0) {
499 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
426 500
427 /* EINVAL because *_register_sysfs already checked this 501 /* EINVAL because *_register_sysfs already checked this
428 * and so it wouldn't have been advertised so userspace and 502 * and so it wouldn't have been advertised to userspace and
429 * so shouldn't have been requested 503 * so shouldn't have been requested
430 */ 504 */
431 return -EINVAL; 505 return -EINVAL;
@@ -436,16 +510,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
436 dev_priv->perf.oa.b_counter_regs_len = 510 dev_priv->perf.oa.b_counter_regs_len =
437 ARRAY_SIZE(b_counter_config_render_basic); 511 ARRAY_SIZE(b_counter_config_render_basic);
438 512
513 dev_priv->perf.oa.flex_regs =
514 flex_eu_config_render_basic;
515 dev_priv->perf.oa.flex_regs_len =
516 ARRAY_SIZE(flex_eu_config_render_basic);
517
439 return 0; 518 return 0;
440 case METRIC_SET_ID_COMPUTE_BASIC: 519 case METRIC_SET_ID_COMPUTE_BASIC:
441 dev_priv->perf.oa.mux_regs = 520 dev_priv->perf.oa.n_mux_configs =
442 get_compute_basic_mux_config(dev_priv, 521 get_compute_basic_mux_config(dev_priv,
443 &dev_priv->perf.oa.mux_regs_len); 522 dev_priv->perf.oa.mux_regs,
444 if (!dev_priv->perf.oa.mux_regs) { 523 dev_priv->perf.oa.mux_regs_lens);
445 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set"); 524 if (dev_priv->perf.oa.n_mux_configs == 0) {
525 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
446 526
447 /* EINVAL because *_register_sysfs already checked this 527 /* EINVAL because *_register_sysfs already checked this
448 * and so it wouldn't have been advertised so userspace and 528 * and so it wouldn't have been advertised to userspace and
449 * so shouldn't have been requested 529 * so shouldn't have been requested
450 */ 530 */
451 return -EINVAL; 531 return -EINVAL;
@@ -456,16 +536,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
456 dev_priv->perf.oa.b_counter_regs_len = 536 dev_priv->perf.oa.b_counter_regs_len =
457 ARRAY_SIZE(b_counter_config_compute_basic); 537 ARRAY_SIZE(b_counter_config_compute_basic);
458 538
539 dev_priv->perf.oa.flex_regs =
540 flex_eu_config_compute_basic;
541 dev_priv->perf.oa.flex_regs_len =
542 ARRAY_SIZE(flex_eu_config_compute_basic);
543
459 return 0; 544 return 0;
460 case METRIC_SET_ID_COMPUTE_EXTENDED: 545 case METRIC_SET_ID_COMPUTE_EXTENDED:
461 dev_priv->perf.oa.mux_regs = 546 dev_priv->perf.oa.n_mux_configs =
462 get_compute_extended_mux_config(dev_priv, 547 get_compute_extended_mux_config(dev_priv,
463 &dev_priv->perf.oa.mux_regs_len); 548 dev_priv->perf.oa.mux_regs,
464 if (!dev_priv->perf.oa.mux_regs) { 549 dev_priv->perf.oa.mux_regs_lens);
465 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set"); 550 if (dev_priv->perf.oa.n_mux_configs == 0) {
551 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
466 552
467 /* EINVAL because *_register_sysfs already checked this 553 /* EINVAL because *_register_sysfs already checked this
468 * and so it wouldn't have been advertised so userspace and 554 * and so it wouldn't have been advertised to userspace and
469 * so shouldn't have been requested 555 * so shouldn't have been requested
470 */ 556 */
471 return -EINVAL; 557 return -EINVAL;
@@ -476,16 +562,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
476 dev_priv->perf.oa.b_counter_regs_len = 562 dev_priv->perf.oa.b_counter_regs_len =
477 ARRAY_SIZE(b_counter_config_compute_extended); 563 ARRAY_SIZE(b_counter_config_compute_extended);
478 564
565 dev_priv->perf.oa.flex_regs =
566 flex_eu_config_compute_extended;
567 dev_priv->perf.oa.flex_regs_len =
568 ARRAY_SIZE(flex_eu_config_compute_extended);
569
479 return 0; 570 return 0;
480 case METRIC_SET_ID_MEMORY_READS: 571 case METRIC_SET_ID_MEMORY_READS:
481 dev_priv->perf.oa.mux_regs = 572 dev_priv->perf.oa.n_mux_configs =
482 get_memory_reads_mux_config(dev_priv, 573 get_memory_reads_mux_config(dev_priv,
483 &dev_priv->perf.oa.mux_regs_len); 574 dev_priv->perf.oa.mux_regs,
484 if (!dev_priv->perf.oa.mux_regs) { 575 dev_priv->perf.oa.mux_regs_lens);
485 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set"); 576 if (dev_priv->perf.oa.n_mux_configs == 0) {
577 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
486 578
487 /* EINVAL because *_register_sysfs already checked this 579 /* EINVAL because *_register_sysfs already checked this
488 * and so it wouldn't have been advertised so userspace and 580 * and so it wouldn't have been advertised to userspace and
489 * so shouldn't have been requested 581 * so shouldn't have been requested
490 */ 582 */
491 return -EINVAL; 583 return -EINVAL;
@@ -496,16 +588,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
496 dev_priv->perf.oa.b_counter_regs_len = 588 dev_priv->perf.oa.b_counter_regs_len =
497 ARRAY_SIZE(b_counter_config_memory_reads); 589 ARRAY_SIZE(b_counter_config_memory_reads);
498 590
591 dev_priv->perf.oa.flex_regs =
592 flex_eu_config_memory_reads;
593 dev_priv->perf.oa.flex_regs_len =
594 ARRAY_SIZE(flex_eu_config_memory_reads);
595
499 return 0; 596 return 0;
500 case METRIC_SET_ID_MEMORY_WRITES: 597 case METRIC_SET_ID_MEMORY_WRITES:
501 dev_priv->perf.oa.mux_regs = 598 dev_priv->perf.oa.n_mux_configs =
502 get_memory_writes_mux_config(dev_priv, 599 get_memory_writes_mux_config(dev_priv,
503 &dev_priv->perf.oa.mux_regs_len); 600 dev_priv->perf.oa.mux_regs,
504 if (!dev_priv->perf.oa.mux_regs) { 601 dev_priv->perf.oa.mux_regs_lens);
505 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set"); 602 if (dev_priv->perf.oa.n_mux_configs == 0) {
603 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
506 604
507 /* EINVAL because *_register_sysfs already checked this 605 /* EINVAL because *_register_sysfs already checked this
508 * and so it wouldn't have been advertised so userspace and 606 * and so it wouldn't have been advertised to userspace and
509 * so shouldn't have been requested 607 * so shouldn't have been requested
510 */ 608 */
511 return -EINVAL; 609 return -EINVAL;
@@ -516,16 +614,22 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
516 dev_priv->perf.oa.b_counter_regs_len = 614 dev_priv->perf.oa.b_counter_regs_len =
517 ARRAY_SIZE(b_counter_config_memory_writes); 615 ARRAY_SIZE(b_counter_config_memory_writes);
518 616
617 dev_priv->perf.oa.flex_regs =
618 flex_eu_config_memory_writes;
619 dev_priv->perf.oa.flex_regs_len =
620 ARRAY_SIZE(flex_eu_config_memory_writes);
621
519 return 0; 622 return 0;
520 case METRIC_SET_ID_SAMPLER_BALANCE: 623 case METRIC_SET_ID_SAMPLER_BALANCE:
521 dev_priv->perf.oa.mux_regs = 624 dev_priv->perf.oa.n_mux_configs =
522 get_sampler_balance_mux_config(dev_priv, 625 get_sampler_balance_mux_config(dev_priv,
523 &dev_priv->perf.oa.mux_regs_len); 626 dev_priv->perf.oa.mux_regs,
524 if (!dev_priv->perf.oa.mux_regs) { 627 dev_priv->perf.oa.mux_regs_lens);
525 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set"); 628 if (dev_priv->perf.oa.n_mux_configs == 0) {
629 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set\n");
526 630
527 /* EINVAL because *_register_sysfs already checked this 631 /* EINVAL because *_register_sysfs already checked this
528 * and so it wouldn't have been advertised so userspace and 632 * and so it wouldn't have been advertised to userspace and
529 * so shouldn't have been requested 633 * so shouldn't have been requested
530 */ 634 */
531 return -EINVAL; 635 return -EINVAL;
@@ -536,6 +640,11 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
536 dev_priv->perf.oa.b_counter_regs_len = 640 dev_priv->perf.oa.b_counter_regs_len =
537 ARRAY_SIZE(b_counter_config_sampler_balance); 641 ARRAY_SIZE(b_counter_config_sampler_balance);
538 642
643 dev_priv->perf.oa.flex_regs =
644 flex_eu_config_sampler_balance;
645 dev_priv->perf.oa.flex_regs_len =
646 ARRAY_SIZE(flex_eu_config_sampler_balance);
647
539 return 0; 648 return 0;
540 default: 649 default:
541 return -ENODEV; 650 return -ENODEV;
@@ -677,35 +786,36 @@ static struct attribute_group group_sampler_balance = {
677int 786int
678i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv) 787i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
679{ 788{
680 int mux_len; 789 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
790 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
681 int ret = 0; 791 int ret = 0;
682 792
683 if (get_render_basic_mux_config(dev_priv, &mux_len)) { 793 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
684 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic); 794 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
685 if (ret) 795 if (ret)
686 goto error_render_basic; 796 goto error_render_basic;
687 } 797 }
688 if (get_compute_basic_mux_config(dev_priv, &mux_len)) { 798 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
689 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic); 799 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
690 if (ret) 800 if (ret)
691 goto error_compute_basic; 801 goto error_compute_basic;
692 } 802 }
693 if (get_compute_extended_mux_config(dev_priv, &mux_len)) { 803 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
694 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended); 804 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
695 if (ret) 805 if (ret)
696 goto error_compute_extended; 806 goto error_compute_extended;
697 } 807 }
698 if (get_memory_reads_mux_config(dev_priv, &mux_len)) { 808 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
699 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads); 809 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
700 if (ret) 810 if (ret)
701 goto error_memory_reads; 811 goto error_memory_reads;
702 } 812 }
703 if (get_memory_writes_mux_config(dev_priv, &mux_len)) { 813 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
704 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes); 814 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
705 if (ret) 815 if (ret)
706 goto error_memory_writes; 816 goto error_memory_writes;
707 } 817 }
708 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) { 818 if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens)) {
709 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance); 819 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
710 if (ret) 820 if (ret)
711 goto error_sampler_balance; 821 goto error_sampler_balance;
@@ -714,19 +824,19 @@ i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
714 return 0; 824 return 0;
715 825
716error_sampler_balance: 826error_sampler_balance:
717 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) 827 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
718 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes); 828 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
719error_memory_writes: 829error_memory_writes:
720 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) 830 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
721 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads); 831 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
722error_memory_reads: 832error_memory_reads:
723 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) 833 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
724 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended); 834 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
725error_compute_extended: 835error_compute_extended:
726 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) 836 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
727 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic); 837 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
728error_compute_basic: 838error_compute_basic:
729 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) 839 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
730 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); 840 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
731error_render_basic: 841error_render_basic:
732 return ret; 842 return ret;
@@ -735,18 +845,19 @@ error_render_basic:
735void 845void
736i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv) 846i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
737{ 847{
738 int mux_len; 848 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
849 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
739 850
740 if (get_render_basic_mux_config(dev_priv, &mux_len)) 851 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
741 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); 852 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
742 if (get_compute_basic_mux_config(dev_priv, &mux_len)) 853 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
743 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic); 854 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
744 if (get_compute_extended_mux_config(dev_priv, &mux_len)) 855 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
745 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended); 856 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
746 if (get_memory_reads_mux_config(dev_priv, &mux_len)) 857 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
747 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads); 858 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
748 if (get_memory_writes_mux_config(dev_priv, &mux_len)) 859 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
749 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes); 860 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
750 if (get_sampler_balance_mux_config(dev_priv, &mux_len)) 861 if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens))
751 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance); 862 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
752} 863}
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 429a229b5158..6fe7e0690ef3 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Autogenerated file, DO NOT EDIT manually! 2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
3 * 5 *
4 * Copyright (c) 2015 Intel Corporation 6 * Copyright (c) 2015 Intel Corporation
5 * 7 *
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
new file mode 100644
index 000000000000..87dbd0a0b076
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -0,0 +1,2991 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_kblgt2.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_L3_2,
45 METRIC_SET_ID_L3_3,
46 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
47 METRIC_SET_ID_SAMPLER,
48 METRIC_SET_ID_TDL_1,
49 METRIC_SET_ID_TDL_2,
50 METRIC_SET_ID_COMPUTE_EXTRA,
51 METRIC_SET_ID_VME_PIPE,
52 METRIC_SET_ID_TEST_OA,
53};
54
55int i915_oa_n_builtin_metric_sets_kblgt2 = 18;
56
57static const struct i915_oa_reg b_counter_config_render_basic[] = {
58 { _MMIO(0x2710), 0x00000000 },
59 { _MMIO(0x2714), 0x00800000 },
60 { _MMIO(0x2720), 0x00000000 },
61 { _MMIO(0x2724), 0x00800000 },
62 { _MMIO(0x2740), 0x00000000 },
63};
64
65static const struct i915_oa_reg flex_eu_config_render_basic[] = {
66 { _MMIO(0xe458), 0x00005004 },
67 { _MMIO(0xe558), 0x00010003 },
68 { _MMIO(0xe658), 0x00012011 },
69 { _MMIO(0xe758), 0x00015014 },
70 { _MMIO(0xe45c), 0x00051050 },
71 { _MMIO(0xe55c), 0x00053052 },
72 { _MMIO(0xe65c), 0x00055054 },
73};
74
75static const struct i915_oa_reg mux_config_render_basic[] = {
76 { _MMIO(0x9888), 0x166c01e0 },
77 { _MMIO(0x9888), 0x12170280 },
78 { _MMIO(0x9888), 0x12370280 },
79 { _MMIO(0x9888), 0x11930317 },
80 { _MMIO(0x9888), 0x159303df },
81 { _MMIO(0x9888), 0x3f900003 },
82 { _MMIO(0x9888), 0x1a4e0080 },
83 { _MMIO(0x9888), 0x0a6c0053 },
84 { _MMIO(0x9888), 0x106c0000 },
85 { _MMIO(0x9888), 0x1c6c0000 },
86 { _MMIO(0x9888), 0x0a1b4000 },
87 { _MMIO(0x9888), 0x1c1c0001 },
88 { _MMIO(0x9888), 0x002f1000 },
89 { _MMIO(0x9888), 0x042f1000 },
90 { _MMIO(0x9888), 0x004c4000 },
91 { _MMIO(0x9888), 0x0a4c8400 },
92 { _MMIO(0x9888), 0x000d2000 },
93 { _MMIO(0x9888), 0x060d8000 },
94 { _MMIO(0x9888), 0x080da000 },
95 { _MMIO(0x9888), 0x0a0d2000 },
96 { _MMIO(0x9888), 0x0c0f0400 },
97 { _MMIO(0x9888), 0x0e0f6600 },
98 { _MMIO(0x9888), 0x002c8000 },
99 { _MMIO(0x9888), 0x162c2200 },
100 { _MMIO(0x9888), 0x062d8000 },
101 { _MMIO(0x9888), 0x082d8000 },
102 { _MMIO(0x9888), 0x00133000 },
103 { _MMIO(0x9888), 0x08133000 },
104 { _MMIO(0x9888), 0x00170020 },
105 { _MMIO(0x9888), 0x08170021 },
106 { _MMIO(0x9888), 0x10170000 },
107 { _MMIO(0x9888), 0x0633c000 },
108 { _MMIO(0x9888), 0x0833c000 },
109 { _MMIO(0x9888), 0x06370800 },
110 { _MMIO(0x9888), 0x08370840 },
111 { _MMIO(0x9888), 0x10370000 },
112 { _MMIO(0x9888), 0x0d933031 },
113 { _MMIO(0x9888), 0x0f933e3f },
114 { _MMIO(0x9888), 0x01933d00 },
115 { _MMIO(0x9888), 0x0393073c },
116 { _MMIO(0x9888), 0x0593000e },
117 { _MMIO(0x9888), 0x1d930000 },
118 { _MMIO(0x9888), 0x19930000 },
119 { _MMIO(0x9888), 0x1b930000 },
120 { _MMIO(0x9888), 0x1d900157 },
121 { _MMIO(0x9888), 0x1f900158 },
122 { _MMIO(0x9888), 0x35900000 },
123 { _MMIO(0x9888), 0x2b908000 },
124 { _MMIO(0x9888), 0x2d908000 },
125 { _MMIO(0x9888), 0x2f908000 },
126 { _MMIO(0x9888), 0x31908000 },
127 { _MMIO(0x9888), 0x15908000 },
128 { _MMIO(0x9888), 0x17908000 },
129 { _MMIO(0x9888), 0x19908000 },
130 { _MMIO(0x9888), 0x1b908000 },
131 { _MMIO(0x9888), 0x1190001f },
132 { _MMIO(0x9888), 0x51904400 },
133 { _MMIO(0x9888), 0x41900020 },
134 { _MMIO(0x9888), 0x55900000 },
135 { _MMIO(0x9888), 0x45900c21 },
136 { _MMIO(0x9888), 0x47900061 },
137 { _MMIO(0x9888), 0x57904440 },
138 { _MMIO(0x9888), 0x49900000 },
139 { _MMIO(0x9888), 0x37900000 },
140 { _MMIO(0x9888), 0x33900000 },
141 { _MMIO(0x9888), 0x4b900000 },
142 { _MMIO(0x9888), 0x59900004 },
143 { _MMIO(0x9888), 0x43900000 },
144 { _MMIO(0x9888), 0x53904444 },
145};
146
147static int
148get_render_basic_mux_config(struct drm_i915_private *dev_priv,
149 const struct i915_oa_reg **regs,
150 int *lens)
151{
152 int n = 0;
153
154 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
155 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
156
157 regs[n] = mux_config_render_basic;
158 lens[n] = ARRAY_SIZE(mux_config_render_basic);
159 n++;
160
161 return n;
162}
163
164static const struct i915_oa_reg b_counter_config_compute_basic[] = {
165 { _MMIO(0x2710), 0x00000000 },
166 { _MMIO(0x2714), 0x00800000 },
167 { _MMIO(0x2720), 0x00000000 },
168 { _MMIO(0x2724), 0x00800000 },
169 { _MMIO(0x2740), 0x00000000 },
170};
171
172static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
173 { _MMIO(0xe458), 0x00005004 },
174 { _MMIO(0xe558), 0x00000003 },
175 { _MMIO(0xe658), 0x00002001 },
176 { _MMIO(0xe758), 0x00778008 },
177 { _MMIO(0xe45c), 0x00088078 },
178 { _MMIO(0xe55c), 0x00808708 },
179 { _MMIO(0xe65c), 0x00a08908 },
180};
181
182static const struct i915_oa_reg mux_config_compute_basic[] = {
183 { _MMIO(0x9888), 0x104f00e0 },
184 { _MMIO(0x9888), 0x124f1c00 },
185 { _MMIO(0x9888), 0x106c00e0 },
186 { _MMIO(0x9888), 0x37906800 },
187 { _MMIO(0x9888), 0x3f900003 },
188 { _MMIO(0x9888), 0x004e8000 },
189 { _MMIO(0x9888), 0x1a4e0820 },
190 { _MMIO(0x9888), 0x1c4e0002 },
191 { _MMIO(0x9888), 0x064f0900 },
192 { _MMIO(0x9888), 0x084f0032 },
193 { _MMIO(0x9888), 0x0a4f1891 },
194 { _MMIO(0x9888), 0x0c4f0e00 },
195 { _MMIO(0x9888), 0x0e4f003c },
196 { _MMIO(0x9888), 0x004f0d80 },
197 { _MMIO(0x9888), 0x024f003b },
198 { _MMIO(0x9888), 0x006c0002 },
199 { _MMIO(0x9888), 0x086c0100 },
200 { _MMIO(0x9888), 0x0c6c000c },
201 { _MMIO(0x9888), 0x0e6c0b00 },
202 { _MMIO(0x9888), 0x186c0000 },
203 { _MMIO(0x9888), 0x1c6c0000 },
204 { _MMIO(0x9888), 0x1e6c0000 },
205 { _MMIO(0x9888), 0x001b4000 },
206 { _MMIO(0x9888), 0x081b8000 },
207 { _MMIO(0x9888), 0x0c1b4000 },
208 { _MMIO(0x9888), 0x0e1b8000 },
209 { _MMIO(0x9888), 0x101c8000 },
210 { _MMIO(0x9888), 0x1a1c8000 },
211 { _MMIO(0x9888), 0x1c1c0024 },
212 { _MMIO(0x9888), 0x065b8000 },
213 { _MMIO(0x9888), 0x085b4000 },
214 { _MMIO(0x9888), 0x0a5bc000 },
215 { _MMIO(0x9888), 0x0c5b8000 },
216 { _MMIO(0x9888), 0x0e5b4000 },
217 { _MMIO(0x9888), 0x005b8000 },
218 { _MMIO(0x9888), 0x025b4000 },
219 { _MMIO(0x9888), 0x1a5c6000 },
220 { _MMIO(0x9888), 0x1c5c001b },
221 { _MMIO(0x9888), 0x125c8000 },
222 { _MMIO(0x9888), 0x145c8000 },
223 { _MMIO(0x9888), 0x004c8000 },
224 { _MMIO(0x9888), 0x0a4c2000 },
225 { _MMIO(0x9888), 0x0c4c0208 },
226 { _MMIO(0x9888), 0x000da000 },
227 { _MMIO(0x9888), 0x060d8000 },
228 { _MMIO(0x9888), 0x080da000 },
229 { _MMIO(0x9888), 0x0a0da000 },
230 { _MMIO(0x9888), 0x0c0da000 },
231 { _MMIO(0x9888), 0x0e0da000 },
232 { _MMIO(0x9888), 0x020d2000 },
233 { _MMIO(0x9888), 0x0c0f5400 },
234 { _MMIO(0x9888), 0x0e0f5500 },
235 { _MMIO(0x9888), 0x100f0155 },
236 { _MMIO(0x9888), 0x002c8000 },
237 { _MMIO(0x9888), 0x0e2cc000 },
238 { _MMIO(0x9888), 0x162cfb00 },
239 { _MMIO(0x9888), 0x182c00be },
240 { _MMIO(0x9888), 0x022cc000 },
241 { _MMIO(0x9888), 0x042cc000 },
242 { _MMIO(0x9888), 0x19900157 },
243 { _MMIO(0x9888), 0x1b900158 },
244 { _MMIO(0x9888), 0x1d900105 },
245 { _MMIO(0x9888), 0x1f900103 },
246 { _MMIO(0x9888), 0x35900000 },
247 { _MMIO(0x9888), 0x11900fff },
248 { _MMIO(0x9888), 0x51900000 },
249 { _MMIO(0x9888), 0x41900800 },
250 { _MMIO(0x9888), 0x55900000 },
251 { _MMIO(0x9888), 0x45900821 },
252 { _MMIO(0x9888), 0x47900802 },
253 { _MMIO(0x9888), 0x57900000 },
254 { _MMIO(0x9888), 0x49900802 },
255 { _MMIO(0x9888), 0x33900000 },
256 { _MMIO(0x9888), 0x4b900002 },
257 { _MMIO(0x9888), 0x59900000 },
258 { _MMIO(0x9888), 0x43900422 },
259 { _MMIO(0x9888), 0x53904444 },
260};
261
262static int
263get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
264 const struct i915_oa_reg **regs,
265 int *lens)
266{
267 int n = 0;
268
269 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
270 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
271
272 regs[n] = mux_config_compute_basic;
273 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
274 n++;
275
276 return n;
277}
278
279static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
280 { _MMIO(0x2724), 0xf0800000 },
281 { _MMIO(0x2720), 0x00000000 },
282 { _MMIO(0x2714), 0xf0800000 },
283 { _MMIO(0x2710), 0x00000000 },
284 { _MMIO(0x2740), 0x00000000 },
285 { _MMIO(0x2770), 0x0007ffea },
286 { _MMIO(0x2774), 0x00007ffc },
287 { _MMIO(0x2778), 0x0007affa },
288 { _MMIO(0x277c), 0x0000f5fd },
289 { _MMIO(0x2780), 0x00079ffa },
290 { _MMIO(0x2784), 0x0000f3fb },
291 { _MMIO(0x2788), 0x0007bf7a },
292 { _MMIO(0x278c), 0x0000f7e7 },
293 { _MMIO(0x2790), 0x0007fefa },
294 { _MMIO(0x2794), 0x0000f7cf },
295 { _MMIO(0x2798), 0x00077ffa },
296 { _MMIO(0x279c), 0x0000efdf },
297 { _MMIO(0x27a0), 0x0006fffa },
298 { _MMIO(0x27a4), 0x0000cfbf },
299 { _MMIO(0x27a8), 0x0003fffa },
300 { _MMIO(0x27ac), 0x00005f7f },
301};
302
303static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
304 { _MMIO(0xe458), 0x00005004 },
305 { _MMIO(0xe558), 0x00015014 },
306 { _MMIO(0xe658), 0x00025024 },
307 { _MMIO(0xe758), 0x00035034 },
308 { _MMIO(0xe45c), 0x00045044 },
309 { _MMIO(0xe55c), 0x00055054 },
310 { _MMIO(0xe65c), 0x00065064 },
311};
312
313static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
314 { _MMIO(0x9888), 0x0c0e001f },
315 { _MMIO(0x9888), 0x0a0f0000 },
316 { _MMIO(0x9888), 0x10116800 },
317 { _MMIO(0x9888), 0x178a03e0 },
318 { _MMIO(0x9888), 0x11824c00 },
319 { _MMIO(0x9888), 0x11830020 },
320 { _MMIO(0x9888), 0x13840020 },
321 { _MMIO(0x9888), 0x11850019 },
322 { _MMIO(0x9888), 0x11860007 },
323 { _MMIO(0x9888), 0x01870c40 },
324 { _MMIO(0x9888), 0x17880000 },
325 { _MMIO(0x9888), 0x022f4000 },
326 { _MMIO(0x9888), 0x0a4c0040 },
327 { _MMIO(0x9888), 0x0c0d8000 },
328 { _MMIO(0x9888), 0x040d4000 },
329 { _MMIO(0x9888), 0x060d2000 },
330 { _MMIO(0x9888), 0x020e5400 },
331 { _MMIO(0x9888), 0x000e0000 },
332 { _MMIO(0x9888), 0x080f0040 },
333 { _MMIO(0x9888), 0x000f0000 },
334 { _MMIO(0x9888), 0x100f0000 },
335 { _MMIO(0x9888), 0x0e0f0040 },
336 { _MMIO(0x9888), 0x0c2c8000 },
337 { _MMIO(0x9888), 0x06104000 },
338 { _MMIO(0x9888), 0x06110012 },
339 { _MMIO(0x9888), 0x06131000 },
340 { _MMIO(0x9888), 0x01898000 },
341 { _MMIO(0x9888), 0x0d890100 },
342 { _MMIO(0x9888), 0x03898000 },
343 { _MMIO(0x9888), 0x09808000 },
344 { _MMIO(0x9888), 0x0b808000 },
345 { _MMIO(0x9888), 0x0380c000 },
346 { _MMIO(0x9888), 0x0f8a0075 },
347 { _MMIO(0x9888), 0x1d8a0000 },
348 { _MMIO(0x9888), 0x118a8000 },
349 { _MMIO(0x9888), 0x1b8a4000 },
350 { _MMIO(0x9888), 0x138a8000 },
351 { _MMIO(0x9888), 0x1d81a000 },
352 { _MMIO(0x9888), 0x15818000 },
353 { _MMIO(0x9888), 0x17818000 },
354 { _MMIO(0x9888), 0x0b820030 },
355 { _MMIO(0x9888), 0x07828000 },
356 { _MMIO(0x9888), 0x0d824000 },
357 { _MMIO(0x9888), 0x0f828000 },
358 { _MMIO(0x9888), 0x05824000 },
359 { _MMIO(0x9888), 0x0d830003 },
360 { _MMIO(0x9888), 0x0583000c },
361 { _MMIO(0x9888), 0x09830000 },
362 { _MMIO(0x9888), 0x03838000 },
363 { _MMIO(0x9888), 0x07838000 },
364 { _MMIO(0x9888), 0x0b840980 },
365 { _MMIO(0x9888), 0x03844d80 },
366 { _MMIO(0x9888), 0x11840000 },
367 { _MMIO(0x9888), 0x09848000 },
368 { _MMIO(0x9888), 0x09850080 },
369 { _MMIO(0x9888), 0x03850003 },
370 { _MMIO(0x9888), 0x01850000 },
371 { _MMIO(0x9888), 0x07860000 },
372 { _MMIO(0x9888), 0x0f860400 },
373 { _MMIO(0x9888), 0x09870032 },
374 { _MMIO(0x9888), 0x01888052 },
375 { _MMIO(0x9888), 0x11880000 },
376 { _MMIO(0x9888), 0x09884000 },
377 { _MMIO(0x9888), 0x1b931001 },
378 { _MMIO(0x9888), 0x1d930001 },
379 { _MMIO(0x9888), 0x19934000 },
380 { _MMIO(0x9888), 0x1b958000 },
381 { _MMIO(0x9888), 0x1d950094 },
382 { _MMIO(0x9888), 0x19958000 },
383 { _MMIO(0x9888), 0x09e58000 },
384 { _MMIO(0x9888), 0x0be58000 },
385 { _MMIO(0x9888), 0x03e5c000 },
386 { _MMIO(0x9888), 0x0592c000 },
387 { _MMIO(0x9888), 0x0b928000 },
388 { _MMIO(0x9888), 0x0d924000 },
389 { _MMIO(0x9888), 0x0f924000 },
390 { _MMIO(0x9888), 0x11928000 },
391 { _MMIO(0x9888), 0x1392c000 },
392 { _MMIO(0x9888), 0x09924000 },
393 { _MMIO(0x9888), 0x01985000 },
394 { _MMIO(0x9888), 0x07988000 },
395 { _MMIO(0x9888), 0x09981000 },
396 { _MMIO(0x9888), 0x0b982000 },
397 { _MMIO(0x9888), 0x0d982000 },
398 { _MMIO(0x9888), 0x0f989000 },
399 { _MMIO(0x9888), 0x05982000 },
400 { _MMIO(0x9888), 0x13904000 },
401 { _MMIO(0x9888), 0x21904000 },
402 { _MMIO(0x9888), 0x23904000 },
403 { _MMIO(0x9888), 0x25908000 },
404 { _MMIO(0x9888), 0x27904000 },
405 { _MMIO(0x9888), 0x29908000 },
406 { _MMIO(0x9888), 0x2b904000 },
407 { _MMIO(0x9888), 0x2f904000 },
408 { _MMIO(0x9888), 0x31904000 },
409 { _MMIO(0x9888), 0x15904000 },
410 { _MMIO(0x9888), 0x17908000 },
411 { _MMIO(0x9888), 0x19908000 },
412 { _MMIO(0x9888), 0x1b904000 },
413 { _MMIO(0x9888), 0x1190c080 },
414 { _MMIO(0x9888), 0x51900000 },
415 { _MMIO(0x9888), 0x41900440 },
416 { _MMIO(0x9888), 0x55900000 },
417 { _MMIO(0x9888), 0x45900400 },
418 { _MMIO(0x9888), 0x47900c21 },
419 { _MMIO(0x9888), 0x57900400 },
420 { _MMIO(0x9888), 0x49900042 },
421 { _MMIO(0x9888), 0x37900000 },
422 { _MMIO(0x9888), 0x33900000 },
423 { _MMIO(0x9888), 0x4b900024 },
424 { _MMIO(0x9888), 0x59900000 },
425 { _MMIO(0x9888), 0x43900841 },
426 { _MMIO(0x9888), 0x53900400 },
427};
428
429static int
430get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
431 const struct i915_oa_reg **regs,
432 int *lens)
433{
434 int n = 0;
435
436 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
437 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
438
439 regs[n] = mux_config_render_pipe_profile;
440 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
441 n++;
442
443 return n;
444}
445
446static const struct i915_oa_reg b_counter_config_memory_reads[] = {
447 { _MMIO(0x272c), 0xffffffff },
448 { _MMIO(0x2728), 0xffffffff },
449 { _MMIO(0x2724), 0xf0800000 },
450 { _MMIO(0x2720), 0x00000000 },
451 { _MMIO(0x271c), 0xffffffff },
452 { _MMIO(0x2718), 0xffffffff },
453 { _MMIO(0x2714), 0xf0800000 },
454 { _MMIO(0x2710), 0x00000000 },
455 { _MMIO(0x274c), 0x86543210 },
456 { _MMIO(0x2748), 0x86543210 },
457 { _MMIO(0x2744), 0x00006667 },
458 { _MMIO(0x2740), 0x00000000 },
459 { _MMIO(0x275c), 0x86543210 },
460 { _MMIO(0x2758), 0x86543210 },
461 { _MMIO(0x2754), 0x00006465 },
462 { _MMIO(0x2750), 0x00000000 },
463 { _MMIO(0x2770), 0x0007f81a },
464 { _MMIO(0x2774), 0x0000fe00 },
465 { _MMIO(0x2778), 0x0007f82a },
466 { _MMIO(0x277c), 0x0000fe00 },
467 { _MMIO(0x2780), 0x0007f872 },
468 { _MMIO(0x2784), 0x0000fe00 },
469 { _MMIO(0x2788), 0x0007f8ba },
470 { _MMIO(0x278c), 0x0000fe00 },
471 { _MMIO(0x2790), 0x0007f87a },
472 { _MMIO(0x2794), 0x0000fe00 },
473 { _MMIO(0x2798), 0x0007f8ea },
474 { _MMIO(0x279c), 0x0000fe00 },
475 { _MMIO(0x27a0), 0x0007f8e2 },
476 { _MMIO(0x27a4), 0x0000fe00 },
477 { _MMIO(0x27a8), 0x0007f8f2 },
478 { _MMIO(0x27ac), 0x0000fe00 },
479};
480
481static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
482 { _MMIO(0xe458), 0x00005004 },
483 { _MMIO(0xe558), 0x00015014 },
484 { _MMIO(0xe658), 0x00025024 },
485 { _MMIO(0xe758), 0x00035034 },
486 { _MMIO(0xe45c), 0x00045044 },
487 { _MMIO(0xe55c), 0x00055054 },
488 { _MMIO(0xe65c), 0x00065064 },
489};
490
491static const struct i915_oa_reg mux_config_memory_reads[] = {
492 { _MMIO(0x9888), 0x11810c00 },
493 { _MMIO(0x9888), 0x1381001a },
494 { _MMIO(0x9888), 0x37906800 },
495 { _MMIO(0x9888), 0x3f900064 },
496 { _MMIO(0x9888), 0x03811300 },
497 { _MMIO(0x9888), 0x05811b12 },
498 { _MMIO(0x9888), 0x0781001a },
499 { _MMIO(0x9888), 0x1f810000 },
500 { _MMIO(0x9888), 0x17810000 },
501 { _MMIO(0x9888), 0x19810000 },
502 { _MMIO(0x9888), 0x1b810000 },
503 { _MMIO(0x9888), 0x1d810000 },
504 { _MMIO(0x9888), 0x1b930055 },
505 { _MMIO(0x9888), 0x03e58000 },
506 { _MMIO(0x9888), 0x05e5c000 },
507 { _MMIO(0x9888), 0x07e54000 },
508 { _MMIO(0x9888), 0x13900150 },
509 { _MMIO(0x9888), 0x21900151 },
510 { _MMIO(0x9888), 0x23900152 },
511 { _MMIO(0x9888), 0x25900153 },
512 { _MMIO(0x9888), 0x27900154 },
513 { _MMIO(0x9888), 0x29900155 },
514 { _MMIO(0x9888), 0x2b900156 },
515 { _MMIO(0x9888), 0x2d900157 },
516 { _MMIO(0x9888), 0x2f90015f },
517 { _MMIO(0x9888), 0x31900105 },
518 { _MMIO(0x9888), 0x15900103 },
519 { _MMIO(0x9888), 0x17900101 },
520 { _MMIO(0x9888), 0x35900000 },
521 { _MMIO(0x9888), 0x19908000 },
522 { _MMIO(0x9888), 0x1b908000 },
523 { _MMIO(0x9888), 0x1d908000 },
524 { _MMIO(0x9888), 0x1f908000 },
525 { _MMIO(0x9888), 0x11900000 },
526 { _MMIO(0x9888), 0x51900000 },
527 { _MMIO(0x9888), 0x41900c60 },
528 { _MMIO(0x9888), 0x55900000 },
529 { _MMIO(0x9888), 0x45900c00 },
530 { _MMIO(0x9888), 0x47900c63 },
531 { _MMIO(0x9888), 0x57900000 },
532 { _MMIO(0x9888), 0x49900c63 },
533 { _MMIO(0x9888), 0x33900000 },
534 { _MMIO(0x9888), 0x4b900063 },
535 { _MMIO(0x9888), 0x59900000 },
536 { _MMIO(0x9888), 0x43900003 },
537 { _MMIO(0x9888), 0x53900000 },
538};
539
540static int
541get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
542 const struct i915_oa_reg **regs,
543 int *lens)
544{
545 int n = 0;
546
547 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
548 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
549
550 regs[n] = mux_config_memory_reads;
551 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
552 n++;
553
554 return n;
555}
556
557static const struct i915_oa_reg b_counter_config_memory_writes[] = {
558 { _MMIO(0x272c), 0xffffffff },
559 { _MMIO(0x2728), 0xffffffff },
560 { _MMIO(0x2724), 0xf0800000 },
561 { _MMIO(0x2720), 0x00000000 },
562 { _MMIO(0x271c), 0xffffffff },
563 { _MMIO(0x2718), 0xffffffff },
564 { _MMIO(0x2714), 0xf0800000 },
565 { _MMIO(0x2710), 0x00000000 },
566 { _MMIO(0x274c), 0x86543210 },
567 { _MMIO(0x2748), 0x86543210 },
568 { _MMIO(0x2744), 0x00006667 },
569 { _MMIO(0x2740), 0x00000000 },
570 { _MMIO(0x275c), 0x86543210 },
571 { _MMIO(0x2758), 0x86543210 },
572 { _MMIO(0x2754), 0x00006465 },
573 { _MMIO(0x2750), 0x00000000 },
574 { _MMIO(0x2770), 0x0007f81a },
575 { _MMIO(0x2774), 0x0000fe00 },
576 { _MMIO(0x2778), 0x0007f82a },
577 { _MMIO(0x277c), 0x0000fe00 },
578 { _MMIO(0x2780), 0x0007f822 },
579 { _MMIO(0x2784), 0x0000fe00 },
580 { _MMIO(0x2788), 0x0007f8ba },
581 { _MMIO(0x278c), 0x0000fe00 },
582 { _MMIO(0x2790), 0x0007f87a },
583 { _MMIO(0x2794), 0x0000fe00 },
584 { _MMIO(0x2798), 0x0007f8ea },
585 { _MMIO(0x279c), 0x0000fe00 },
586 { _MMIO(0x27a0), 0x0007f8e2 },
587 { _MMIO(0x27a4), 0x0000fe00 },
588 { _MMIO(0x27a8), 0x0007f8f2 },
589 { _MMIO(0x27ac), 0x0000fe00 },
590};
591
592static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
593 { _MMIO(0xe458), 0x00005004 },
594 { _MMIO(0xe558), 0x00015014 },
595 { _MMIO(0xe658), 0x00025024 },
596 { _MMIO(0xe758), 0x00035034 },
597 { _MMIO(0xe45c), 0x00045044 },
598 { _MMIO(0xe55c), 0x00055054 },
599 { _MMIO(0xe65c), 0x00065064 },
600};
601
602static const struct i915_oa_reg mux_config_memory_writes[] = {
603 { _MMIO(0x9888), 0x11810c00 },
604 { _MMIO(0x9888), 0x1381001a },
605 { _MMIO(0x9888), 0x37906800 },
606 { _MMIO(0x9888), 0x3f901000 },
607 { _MMIO(0x9888), 0x03811300 },
608 { _MMIO(0x9888), 0x05811b12 },
609 { _MMIO(0x9888), 0x0781001a },
610 { _MMIO(0x9888), 0x1f810000 },
611 { _MMIO(0x9888), 0x17810000 },
612 { _MMIO(0x9888), 0x19810000 },
613 { _MMIO(0x9888), 0x1b810000 },
614 { _MMIO(0x9888), 0x1d810000 },
615 { _MMIO(0x9888), 0x1b930055 },
616 { _MMIO(0x9888), 0x03e58000 },
617 { _MMIO(0x9888), 0x05e5c000 },
618 { _MMIO(0x9888), 0x07e54000 },
619 { _MMIO(0x9888), 0x13900160 },
620 { _MMIO(0x9888), 0x21900161 },
621 { _MMIO(0x9888), 0x23900162 },
622 { _MMIO(0x9888), 0x25900163 },
623 { _MMIO(0x9888), 0x27900164 },
624 { _MMIO(0x9888), 0x29900165 },
625 { _MMIO(0x9888), 0x2b900166 },
626 { _MMIO(0x9888), 0x2d900167 },
627 { _MMIO(0x9888), 0x2f900150 },
628 { _MMIO(0x9888), 0x31900105 },
629 { _MMIO(0x9888), 0x15900103 },
630 { _MMIO(0x9888), 0x17900101 },
631 { _MMIO(0x9888), 0x35900000 },
632 { _MMIO(0x9888), 0x19908000 },
633 { _MMIO(0x9888), 0x1b908000 },
634 { _MMIO(0x9888), 0x1d908000 },
635 { _MMIO(0x9888), 0x1f908000 },
636 { _MMIO(0x9888), 0x11900000 },
637 { _MMIO(0x9888), 0x51900000 },
638 { _MMIO(0x9888), 0x41900c60 },
639 { _MMIO(0x9888), 0x55900000 },
640 { _MMIO(0x9888), 0x45900c00 },
641 { _MMIO(0x9888), 0x47900c63 },
642 { _MMIO(0x9888), 0x57900000 },
643 { _MMIO(0x9888), 0x49900c63 },
644 { _MMIO(0x9888), 0x33900000 },
645 { _MMIO(0x9888), 0x4b900063 },
646 { _MMIO(0x9888), 0x59900000 },
647 { _MMIO(0x9888), 0x43900003 },
648 { _MMIO(0x9888), 0x53900000 },
649};
650
651static int
652get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
653 const struct i915_oa_reg **regs,
654 int *lens)
655{
656 int n = 0;
657
658 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
659 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
660
661 regs[n] = mux_config_memory_writes;
662 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
663 n++;
664
665 return n;
666}
667
668static const struct i915_oa_reg b_counter_config_compute_extended[] = {
669 { _MMIO(0x2724), 0xf0800000 },
670 { _MMIO(0x2720), 0x00000000 },
671 { _MMIO(0x2714), 0xf0800000 },
672 { _MMIO(0x2710), 0x00000000 },
673 { _MMIO(0x2740), 0x00000000 },
674 { _MMIO(0x2770), 0x0007fc2a },
675 { _MMIO(0x2774), 0x0000bf00 },
676 { _MMIO(0x2778), 0x0007fc6a },
677 { _MMIO(0x277c), 0x0000bf00 },
678 { _MMIO(0x2780), 0x0007fc92 },
679 { _MMIO(0x2784), 0x0000bf00 },
680 { _MMIO(0x2788), 0x0007fca2 },
681 { _MMIO(0x278c), 0x0000bf00 },
682 { _MMIO(0x2790), 0x0007fc32 },
683 { _MMIO(0x2794), 0x0000bf00 },
684 { _MMIO(0x2798), 0x0007fc9a },
685 { _MMIO(0x279c), 0x0000bf00 },
686 { _MMIO(0x27a0), 0x0007fe6a },
687 { _MMIO(0x27a4), 0x0000bf00 },
688 { _MMIO(0x27a8), 0x0007fe7a },
689 { _MMIO(0x27ac), 0x0000bf00 },
690};
691
692static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
693 { _MMIO(0xe458), 0x00005004 },
694 { _MMIO(0xe558), 0x00000003 },
695 { _MMIO(0xe658), 0x00002001 },
696 { _MMIO(0xe758), 0x00778008 },
697 { _MMIO(0xe45c), 0x00088078 },
698 { _MMIO(0xe55c), 0x00808708 },
699 { _MMIO(0xe65c), 0x00a08908 },
700};
701
702static const struct i915_oa_reg mux_config_compute_extended[] = {
703 { _MMIO(0x9888), 0x106c00e0 },
704 { _MMIO(0x9888), 0x141c8160 },
705 { _MMIO(0x9888), 0x161c8015 },
706 { _MMIO(0x9888), 0x181c0120 },
707 { _MMIO(0x9888), 0x004e8000 },
708 { _MMIO(0x9888), 0x0e4e8000 },
709 { _MMIO(0x9888), 0x184e8000 },
710 { _MMIO(0x9888), 0x1a4eaaa0 },
711 { _MMIO(0x9888), 0x1c4e0002 },
712 { _MMIO(0x9888), 0x024e8000 },
713 { _MMIO(0x9888), 0x044e8000 },
714 { _MMIO(0x9888), 0x064e8000 },
715 { _MMIO(0x9888), 0x084e8000 },
716 { _MMIO(0x9888), 0x0a4e8000 },
717 { _MMIO(0x9888), 0x0e6c0b01 },
718 { _MMIO(0x9888), 0x006c0200 },
719 { _MMIO(0x9888), 0x026c000c },
720 { _MMIO(0x9888), 0x1c6c0000 },
721 { _MMIO(0x9888), 0x1e6c0000 },
722 { _MMIO(0x9888), 0x1a6c0000 },
723 { _MMIO(0x9888), 0x0e1bc000 },
724 { _MMIO(0x9888), 0x001b8000 },
725 { _MMIO(0x9888), 0x021bc000 },
726 { _MMIO(0x9888), 0x001c0041 },
727 { _MMIO(0x9888), 0x061c4200 },
728 { _MMIO(0x9888), 0x081c4443 },
729 { _MMIO(0x9888), 0x0a1c4645 },
730 { _MMIO(0x9888), 0x0c1c7647 },
731 { _MMIO(0x9888), 0x041c7357 },
732 { _MMIO(0x9888), 0x1c1c0030 },
733 { _MMIO(0x9888), 0x101c0000 },
734 { _MMIO(0x9888), 0x1a1c0000 },
735 { _MMIO(0x9888), 0x121c8000 },
736 { _MMIO(0x9888), 0x004c8000 },
737 { _MMIO(0x9888), 0x0a4caa2a },
738 { _MMIO(0x9888), 0x0c4c02aa },
739 { _MMIO(0x9888), 0x084ca000 },
740 { _MMIO(0x9888), 0x000da000 },
741 { _MMIO(0x9888), 0x060d8000 },
742 { _MMIO(0x9888), 0x080da000 },
743 { _MMIO(0x9888), 0x0a0da000 },
744 { _MMIO(0x9888), 0x0c0da000 },
745 { _MMIO(0x9888), 0x0e0da000 },
746 { _MMIO(0x9888), 0x020da000 },
747 { _MMIO(0x9888), 0x040da000 },
748 { _MMIO(0x9888), 0x0c0f5400 },
749 { _MMIO(0x9888), 0x0e0f5515 },
750 { _MMIO(0x9888), 0x100f0155 },
751 { _MMIO(0x9888), 0x002c8000 },
752 { _MMIO(0x9888), 0x0e2c8000 },
753 { _MMIO(0x9888), 0x162caa00 },
754 { _MMIO(0x9888), 0x182c00aa },
755 { _MMIO(0x9888), 0x022c8000 },
756 { _MMIO(0x9888), 0x042c8000 },
757 { _MMIO(0x9888), 0x062c8000 },
758 { _MMIO(0x9888), 0x082c8000 },
759 { _MMIO(0x9888), 0x0a2c8000 },
760 { _MMIO(0x9888), 0x11907fff },
761 { _MMIO(0x9888), 0x51900000 },
762 { _MMIO(0x9888), 0x41900040 },
763 { _MMIO(0x9888), 0x55900000 },
764 { _MMIO(0x9888), 0x45900802 },
765 { _MMIO(0x9888), 0x47900842 },
766 { _MMIO(0x9888), 0x57900000 },
767 { _MMIO(0x9888), 0x49900842 },
768 { _MMIO(0x9888), 0x37900000 },
769 { _MMIO(0x9888), 0x33900000 },
770 { _MMIO(0x9888), 0x4b900000 },
771 { _MMIO(0x9888), 0x59900000 },
772 { _MMIO(0x9888), 0x43900800 },
773 { _MMIO(0x9888), 0x53900000 },
774};
775
776static int
777get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
778 const struct i915_oa_reg **regs,
779 int *lens)
780{
781 int n = 0;
782
783 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
784 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
785
786 regs[n] = mux_config_compute_extended;
787 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
788 n++;
789
790 return n;
791}
792
793static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
794 { _MMIO(0x2710), 0x00000000 },
795 { _MMIO(0x2714), 0x30800000 },
796 { _MMIO(0x2720), 0x00000000 },
797 { _MMIO(0x2724), 0x30800000 },
798 { _MMIO(0x2740), 0x00000000 },
799 { _MMIO(0x2770), 0x0007fffa },
800 { _MMIO(0x2774), 0x0000fefe },
801 { _MMIO(0x2778), 0x0007fffa },
802 { _MMIO(0x277c), 0x0000fefd },
803 { _MMIO(0x2790), 0x0007fffa },
804 { _MMIO(0x2794), 0x0000fbef },
805 { _MMIO(0x2798), 0x0007fffa },
806 { _MMIO(0x279c), 0x0000fbdf },
807};
808
809static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
810 { _MMIO(0xe458), 0x00005004 },
811 { _MMIO(0xe558), 0x00000003 },
812 { _MMIO(0xe658), 0x00002001 },
813 { _MMIO(0xe758), 0x00101100 },
814 { _MMIO(0xe45c), 0x00201200 },
815 { _MMIO(0xe55c), 0x00301300 },
816 { _MMIO(0xe65c), 0x00401400 },
817};
818
819static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
820 { _MMIO(0x9888), 0x166c0760 },
821 { _MMIO(0x9888), 0x1593001e },
822 { _MMIO(0x9888), 0x3f900003 },
823 { _MMIO(0x9888), 0x004e8000 },
824 { _MMIO(0x9888), 0x0e4e8000 },
825 { _MMIO(0x9888), 0x184e8000 },
826 { _MMIO(0x9888), 0x1a4e8020 },
827 { _MMIO(0x9888), 0x1c4e0002 },
828 { _MMIO(0x9888), 0x006c0051 },
829 { _MMIO(0x9888), 0x066c5000 },
830 { _MMIO(0x9888), 0x086c5c5d },
831 { _MMIO(0x9888), 0x0e6c5e5f },
832 { _MMIO(0x9888), 0x106c0000 },
833 { _MMIO(0x9888), 0x186c0000 },
834 { _MMIO(0x9888), 0x1c6c0000 },
835 { _MMIO(0x9888), 0x1e6c0000 },
836 { _MMIO(0x9888), 0x001b4000 },
837 { _MMIO(0x9888), 0x061b8000 },
838 { _MMIO(0x9888), 0x081bc000 },
839 { _MMIO(0x9888), 0x0e1bc000 },
840 { _MMIO(0x9888), 0x101c8000 },
841 { _MMIO(0x9888), 0x1a1ce000 },
842 { _MMIO(0x9888), 0x1c1c0030 },
843 { _MMIO(0x9888), 0x004c8000 },
844 { _MMIO(0x9888), 0x0a4c2a00 },
845 { _MMIO(0x9888), 0x0c4c0280 },
846 { _MMIO(0x9888), 0x000d2000 },
847 { _MMIO(0x9888), 0x060d8000 },
848 { _MMIO(0x9888), 0x080da000 },
849 { _MMIO(0x9888), 0x0e0da000 },
850 { _MMIO(0x9888), 0x0c0f0400 },
851 { _MMIO(0x9888), 0x0e0f1500 },
852 { _MMIO(0x9888), 0x100f0140 },
853 { _MMIO(0x9888), 0x002c8000 },
854 { _MMIO(0x9888), 0x0e2c8000 },
855 { _MMIO(0x9888), 0x162c0a00 },
856 { _MMIO(0x9888), 0x182c00a0 },
857 { _MMIO(0x9888), 0x03933300 },
858 { _MMIO(0x9888), 0x05930032 },
859 { _MMIO(0x9888), 0x11930000 },
860 { _MMIO(0x9888), 0x1b930000 },
861 { _MMIO(0x9888), 0x1d900157 },
862 { _MMIO(0x9888), 0x1f900158 },
863 { _MMIO(0x9888), 0x35900000 },
864 { _MMIO(0x9888), 0x19908000 },
865 { _MMIO(0x9888), 0x1b908000 },
866 { _MMIO(0x9888), 0x1190030f },
867 { _MMIO(0x9888), 0x51900000 },
868 { _MMIO(0x9888), 0x41900000 },
869 { _MMIO(0x9888), 0x55900000 },
870 { _MMIO(0x9888), 0x45900021 },
871 { _MMIO(0x9888), 0x47900000 },
872 { _MMIO(0x9888), 0x37900000 },
873 { _MMIO(0x9888), 0x33900000 },
874 { _MMIO(0x9888), 0x57900000 },
875 { _MMIO(0x9888), 0x4b900000 },
876 { _MMIO(0x9888), 0x59900000 },
877 { _MMIO(0x9888), 0x53904444 },
878 { _MMIO(0x9888), 0x43900000 },
879};
880
881static int
882get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
883 const struct i915_oa_reg **regs,
884 int *lens)
885{
886 int n = 0;
887
888 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
889 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
890
891 regs[n] = mux_config_compute_l3_cache;
892 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
893 n++;
894
895 return n;
896}
897
898static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
899 { _MMIO(0x2740), 0x00000000 },
900 { _MMIO(0x2744), 0x00800000 },
901 { _MMIO(0x2710), 0x00000000 },
902 { _MMIO(0x2714), 0x10800000 },
903 { _MMIO(0x2720), 0x00000000 },
904 { _MMIO(0x2724), 0x00800000 },
905 { _MMIO(0x2770), 0x00000002 },
906 { _MMIO(0x2774), 0x0000fdff },
907};
908
909static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
910 { _MMIO(0xe458), 0x00005004 },
911 { _MMIO(0xe558), 0x00010003 },
912 { _MMIO(0xe658), 0x00012011 },
913 { _MMIO(0xe758), 0x00015014 },
914 { _MMIO(0xe45c), 0x00051050 },
915 { _MMIO(0xe55c), 0x00053052 },
916 { _MMIO(0xe65c), 0x00055054 },
917};
918
919static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
920 { _MMIO(0x9888), 0x104f0232 },
921 { _MMIO(0x9888), 0x124f4640 },
922 { _MMIO(0x9888), 0x106c0232 },
923 { _MMIO(0x9888), 0x11834400 },
924 { _MMIO(0x9888), 0x0a4e8000 },
925 { _MMIO(0x9888), 0x0c4e8000 },
926 { _MMIO(0x9888), 0x004f1880 },
927 { _MMIO(0x9888), 0x024f08bb },
928 { _MMIO(0x9888), 0x044f001b },
929 { _MMIO(0x9888), 0x046c0100 },
930 { _MMIO(0x9888), 0x066c000b },
931 { _MMIO(0x9888), 0x1a6c0000 },
932 { _MMIO(0x9888), 0x041b8000 },
933 { _MMIO(0x9888), 0x061b4000 },
934 { _MMIO(0x9888), 0x1a1c1800 },
935 { _MMIO(0x9888), 0x005b8000 },
936 { _MMIO(0x9888), 0x025bc000 },
937 { _MMIO(0x9888), 0x045b4000 },
938 { _MMIO(0x9888), 0x125c8000 },
939 { _MMIO(0x9888), 0x145c8000 },
940 { _MMIO(0x9888), 0x165c8000 },
941 { _MMIO(0x9888), 0x185c8000 },
942 { _MMIO(0x9888), 0x0a4c00a0 },
943 { _MMIO(0x9888), 0x000d8000 },
944 { _MMIO(0x9888), 0x020da000 },
945 { _MMIO(0x9888), 0x040da000 },
946 { _MMIO(0x9888), 0x060d2000 },
947 { _MMIO(0x9888), 0x0c0f5000 },
948 { _MMIO(0x9888), 0x0e0f0055 },
949 { _MMIO(0x9888), 0x022cc000 },
950 { _MMIO(0x9888), 0x042cc000 },
951 { _MMIO(0x9888), 0x062cc000 },
952 { _MMIO(0x9888), 0x082cc000 },
953 { _MMIO(0x9888), 0x0a2c8000 },
954 { _MMIO(0x9888), 0x0c2c8000 },
955 { _MMIO(0x9888), 0x0f828000 },
956 { _MMIO(0x9888), 0x0f8305c0 },
957 { _MMIO(0x9888), 0x09830000 },
958 { _MMIO(0x9888), 0x07830000 },
959 { _MMIO(0x9888), 0x1d950080 },
960 { _MMIO(0x9888), 0x13928000 },
961 { _MMIO(0x9888), 0x0f988000 },
962 { _MMIO(0x9888), 0x31904000 },
963 { _MMIO(0x9888), 0x1190fc00 },
964 { _MMIO(0x9888), 0x37900000 },
965 { _MMIO(0x9888), 0x59900000 },
966 { _MMIO(0x9888), 0x4b900040 },
967 { _MMIO(0x9888), 0x51900000 },
968 { _MMIO(0x9888), 0x41900800 },
969 { _MMIO(0x9888), 0x43900842 },
970 { _MMIO(0x9888), 0x53900000 },
971 { _MMIO(0x9888), 0x45900000 },
972 { _MMIO(0x9888), 0x33900000 },
973};
974
975static int
976get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
977 const struct i915_oa_reg **regs,
978 int *lens)
979{
980 int n = 0;
981
982 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
983 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
984
985 regs[n] = mux_config_hdc_and_sf;
986 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
987 n++;
988
989 return n;
990}
991
992static const struct i915_oa_reg b_counter_config_l3_1[] = {
993 { _MMIO(0x2740), 0x00000000 },
994 { _MMIO(0x2744), 0x00800000 },
995 { _MMIO(0x2710), 0x00000000 },
996 { _MMIO(0x2714), 0xf0800000 },
997 { _MMIO(0x2720), 0x00000000 },
998 { _MMIO(0x2724), 0xf0800000 },
999 { _MMIO(0x2770), 0x00100070 },
1000 { _MMIO(0x2774), 0x0000fff1 },
1001 { _MMIO(0x2778), 0x00014002 },
1002 { _MMIO(0x277c), 0x0000c3ff },
1003 { _MMIO(0x2780), 0x00010002 },
1004 { _MMIO(0x2784), 0x0000c7ff },
1005 { _MMIO(0x2788), 0x00004002 },
1006 { _MMIO(0x278c), 0x0000d3ff },
1007 { _MMIO(0x2790), 0x00100700 },
1008 { _MMIO(0x2794), 0x0000ff1f },
1009 { _MMIO(0x2798), 0x00001402 },
1010 { _MMIO(0x279c), 0x0000fc3f },
1011 { _MMIO(0x27a0), 0x00001002 },
1012 { _MMIO(0x27a4), 0x0000fc7f },
1013 { _MMIO(0x27a8), 0x00000402 },
1014 { _MMIO(0x27ac), 0x0000fd3f },
1015};
1016
1017static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1018 { _MMIO(0xe458), 0x00005004 },
1019 { _MMIO(0xe558), 0x00010003 },
1020 { _MMIO(0xe658), 0x00012011 },
1021 { _MMIO(0xe758), 0x00015014 },
1022 { _MMIO(0xe45c), 0x00051050 },
1023 { _MMIO(0xe55c), 0x00053052 },
1024 { _MMIO(0xe65c), 0x00055054 },
1025};
1026
1027static const struct i915_oa_reg mux_config_l3_1[] = {
1028 { _MMIO(0x9888), 0x126c7b40 },
1029 { _MMIO(0x9888), 0x166c0020 },
1030 { _MMIO(0x9888), 0x0a603444 },
1031 { _MMIO(0x9888), 0x0a613400 },
1032 { _MMIO(0x9888), 0x1a4ea800 },
1033 { _MMIO(0x9888), 0x1c4e0002 },
1034 { _MMIO(0x9888), 0x024e8000 },
1035 { _MMIO(0x9888), 0x044e8000 },
1036 { _MMIO(0x9888), 0x064e8000 },
1037 { _MMIO(0x9888), 0x084e8000 },
1038 { _MMIO(0x9888), 0x0a4e8000 },
1039 { _MMIO(0x9888), 0x064f4000 },
1040 { _MMIO(0x9888), 0x0c6c5327 },
1041 { _MMIO(0x9888), 0x0e6c5425 },
1042 { _MMIO(0x9888), 0x006c2a00 },
1043 { _MMIO(0x9888), 0x026c285b },
1044 { _MMIO(0x9888), 0x046c005c },
1045 { _MMIO(0x9888), 0x106c0000 },
1046 { _MMIO(0x9888), 0x1c6c0000 },
1047 { _MMIO(0x9888), 0x1e6c0000 },
1048 { _MMIO(0x9888), 0x1a6c0800 },
1049 { _MMIO(0x9888), 0x0c1bc000 },
1050 { _MMIO(0x9888), 0x0e1bc000 },
1051 { _MMIO(0x9888), 0x001b8000 },
1052 { _MMIO(0x9888), 0x021bc000 },
1053 { _MMIO(0x9888), 0x041bc000 },
1054 { _MMIO(0x9888), 0x1c1c003c },
1055 { _MMIO(0x9888), 0x121c8000 },
1056 { _MMIO(0x9888), 0x141c8000 },
1057 { _MMIO(0x9888), 0x161c8000 },
1058 { _MMIO(0x9888), 0x181c8000 },
1059 { _MMIO(0x9888), 0x1a1c0800 },
1060 { _MMIO(0x9888), 0x065b4000 },
1061 { _MMIO(0x9888), 0x1a5c1000 },
1062 { _MMIO(0x9888), 0x10600000 },
1063 { _MMIO(0x9888), 0x04600000 },
1064 { _MMIO(0x9888), 0x0c610044 },
1065 { _MMIO(0x9888), 0x10610000 },
1066 { _MMIO(0x9888), 0x06610000 },
1067 { _MMIO(0x9888), 0x0c4c02a8 },
1068 { _MMIO(0x9888), 0x084ca000 },
1069 { _MMIO(0x9888), 0x0a4c002a },
1070 { _MMIO(0x9888), 0x0c0da000 },
1071 { _MMIO(0x9888), 0x0e0da000 },
1072 { _MMIO(0x9888), 0x000d8000 },
1073 { _MMIO(0x9888), 0x020da000 },
1074 { _MMIO(0x9888), 0x040da000 },
1075 { _MMIO(0x9888), 0x060d2000 },
1076 { _MMIO(0x9888), 0x100f0154 },
1077 { _MMIO(0x9888), 0x0c0f5000 },
1078 { _MMIO(0x9888), 0x0e0f0055 },
1079 { _MMIO(0x9888), 0x182c00aa },
1080 { _MMIO(0x9888), 0x022c8000 },
1081 { _MMIO(0x9888), 0x042c8000 },
1082 { _MMIO(0x9888), 0x062c8000 },
1083 { _MMIO(0x9888), 0x082c8000 },
1084 { _MMIO(0x9888), 0x0a2c8000 },
1085 { _MMIO(0x9888), 0x0c2cc000 },
1086 { _MMIO(0x9888), 0x1190ffc0 },
1087 { _MMIO(0x9888), 0x57900000 },
1088 { _MMIO(0x9888), 0x49900420 },
1089 { _MMIO(0x9888), 0x37900000 },
1090 { _MMIO(0x9888), 0x33900000 },
1091 { _MMIO(0x9888), 0x4b900021 },
1092 { _MMIO(0x9888), 0x59900000 },
1093 { _MMIO(0x9888), 0x51900000 },
1094 { _MMIO(0x9888), 0x41900400 },
1095 { _MMIO(0x9888), 0x43900421 },
1096 { _MMIO(0x9888), 0x53900000 },
1097 { _MMIO(0x9888), 0x45900040 },
1098};
1099
1100static int
1101get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1102 const struct i915_oa_reg **regs,
1103 int *lens)
1104{
1105 int n = 0;
1106
1107 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1108 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1109
1110 regs[n] = mux_config_l3_1;
1111 lens[n] = ARRAY_SIZE(mux_config_l3_1);
1112 n++;
1113
1114 return n;
1115}
1116
1117static const struct i915_oa_reg b_counter_config_l3_2[] = {
1118 { _MMIO(0x2740), 0x00000000 },
1119 { _MMIO(0x2744), 0x00800000 },
1120 { _MMIO(0x2710), 0x00000000 },
1121 { _MMIO(0x2714), 0xf0800000 },
1122 { _MMIO(0x2720), 0x00000000 },
1123 { _MMIO(0x2724), 0x00800000 },
1124 { _MMIO(0x2770), 0x00100070 },
1125 { _MMIO(0x2774), 0x0000fff1 },
1126 { _MMIO(0x2778), 0x00028002 },
1127 { _MMIO(0x277c), 0x000087ff },
1128 { _MMIO(0x2780), 0x00020002 },
1129 { _MMIO(0x2784), 0x00008fff },
1130 { _MMIO(0x2788), 0x00008002 },
1131 { _MMIO(0x278c), 0x0000a7ff },
1132};
1133
1134static const struct i915_oa_reg flex_eu_config_l3_2[] = {
1135 { _MMIO(0xe458), 0x00005004 },
1136 { _MMIO(0xe558), 0x00010003 },
1137 { _MMIO(0xe658), 0x00012011 },
1138 { _MMIO(0xe758), 0x00015014 },
1139 { _MMIO(0xe45c), 0x00051050 },
1140 { _MMIO(0xe55c), 0x00053052 },
1141 { _MMIO(0xe65c), 0x00055054 },
1142};
1143
1144static const struct i915_oa_reg mux_config_l3_2[] = {
1145 { _MMIO(0x9888), 0x126c02e0 },
1146 { _MMIO(0x9888), 0x146c0001 },
1147 { _MMIO(0x9888), 0x0a623400 },
1148 { _MMIO(0x9888), 0x044e8000 },
1149 { _MMIO(0x9888), 0x064e8000 },
1150 { _MMIO(0x9888), 0x084e8000 },
1151 { _MMIO(0x9888), 0x0a4e8000 },
1152 { _MMIO(0x9888), 0x064f4000 },
1153 { _MMIO(0x9888), 0x026c3324 },
1154 { _MMIO(0x9888), 0x046c3422 },
1155 { _MMIO(0x9888), 0x106c0000 },
1156 { _MMIO(0x9888), 0x1a6c0000 },
1157 { _MMIO(0x9888), 0x021bc000 },
1158 { _MMIO(0x9888), 0x041bc000 },
1159 { _MMIO(0x9888), 0x141c8000 },
1160 { _MMIO(0x9888), 0x161c8000 },
1161 { _MMIO(0x9888), 0x181c8000 },
1162 { _MMIO(0x9888), 0x1a1c0800 },
1163 { _MMIO(0x9888), 0x065b4000 },
1164 { _MMIO(0x9888), 0x1a5c1000 },
1165 { _MMIO(0x9888), 0x06614000 },
1166 { _MMIO(0x9888), 0x0c620044 },
1167 { _MMIO(0x9888), 0x10620000 },
1168 { _MMIO(0x9888), 0x06620000 },
1169 { _MMIO(0x9888), 0x084c8000 },
1170 { _MMIO(0x9888), 0x0a4c002a },
1171 { _MMIO(0x9888), 0x020da000 },
1172 { _MMIO(0x9888), 0x040da000 },
1173 { _MMIO(0x9888), 0x060d2000 },
1174 { _MMIO(0x9888), 0x0c0f4000 },
1175 { _MMIO(0x9888), 0x0e0f0055 },
1176 { _MMIO(0x9888), 0x042c8000 },
1177 { _MMIO(0x9888), 0x062c8000 },
1178 { _MMIO(0x9888), 0x082c8000 },
1179 { _MMIO(0x9888), 0x0a2c8000 },
1180 { _MMIO(0x9888), 0x0c2cc000 },
1181 { _MMIO(0x9888), 0x1190f800 },
1182 { _MMIO(0x9888), 0x37900000 },
1183 { _MMIO(0x9888), 0x51900000 },
1184 { _MMIO(0x9888), 0x43900000 },
1185 { _MMIO(0x9888), 0x53900000 },
1186 { _MMIO(0x9888), 0x45900000 },
1187 { _MMIO(0x9888), 0x33900000 },
1188};
1189
1190static int
1191get_l3_2_mux_config(struct drm_i915_private *dev_priv,
1192 const struct i915_oa_reg **regs,
1193 int *lens)
1194{
1195 int n = 0;
1196
1197 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1198 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1199
1200 regs[n] = mux_config_l3_2;
1201 lens[n] = ARRAY_SIZE(mux_config_l3_2);
1202 n++;
1203
1204 return n;
1205}
1206
1207static const struct i915_oa_reg b_counter_config_l3_3[] = {
1208 { _MMIO(0x2740), 0x00000000 },
1209 { _MMIO(0x2744), 0x00800000 },
1210 { _MMIO(0x2710), 0x00000000 },
1211 { _MMIO(0x2714), 0xf0800000 },
1212 { _MMIO(0x2720), 0x00000000 },
1213 { _MMIO(0x2724), 0x00800000 },
1214 { _MMIO(0x2770), 0x00100070 },
1215 { _MMIO(0x2774), 0x0000fff1 },
1216 { _MMIO(0x2778), 0x00028002 },
1217 { _MMIO(0x277c), 0x000087ff },
1218 { _MMIO(0x2780), 0x00020002 },
1219 { _MMIO(0x2784), 0x00008fff },
1220 { _MMIO(0x2788), 0x00008002 },
1221 { _MMIO(0x278c), 0x0000a7ff },
1222};
1223
1224static const struct i915_oa_reg flex_eu_config_l3_3[] = {
1225 { _MMIO(0xe458), 0x00005004 },
1226 { _MMIO(0xe558), 0x00010003 },
1227 { _MMIO(0xe658), 0x00012011 },
1228 { _MMIO(0xe758), 0x00015014 },
1229 { _MMIO(0xe45c), 0x00051050 },
1230 { _MMIO(0xe55c), 0x00053052 },
1231 { _MMIO(0xe65c), 0x00055054 },
1232};
1233
1234static const struct i915_oa_reg mux_config_l3_3[] = {
1235 { _MMIO(0x9888), 0x126c4e80 },
1236 { _MMIO(0x9888), 0x146c0000 },
1237 { _MMIO(0x9888), 0x0a633400 },
1238 { _MMIO(0x9888), 0x044e8000 },
1239 { _MMIO(0x9888), 0x064e8000 },
1240 { _MMIO(0x9888), 0x084e8000 },
1241 { _MMIO(0x9888), 0x0a4e8000 },
1242 { _MMIO(0x9888), 0x0c4e8000 },
1243 { _MMIO(0x9888), 0x026c3321 },
1244 { _MMIO(0x9888), 0x046c342f },
1245 { _MMIO(0x9888), 0x106c0000 },
1246 { _MMIO(0x9888), 0x1a6c2000 },
1247 { _MMIO(0x9888), 0x021bc000 },
1248 { _MMIO(0x9888), 0x041bc000 },
1249 { _MMIO(0x9888), 0x061b4000 },
1250 { _MMIO(0x9888), 0x141c8000 },
1251 { _MMIO(0x9888), 0x161c8000 },
1252 { _MMIO(0x9888), 0x181c8000 },
1253 { _MMIO(0x9888), 0x1a1c1800 },
1254 { _MMIO(0x9888), 0x06604000 },
1255 { _MMIO(0x9888), 0x0c630044 },
1256 { _MMIO(0x9888), 0x10630000 },
1257 { _MMIO(0x9888), 0x06630000 },
1258 { _MMIO(0x9888), 0x084c8000 },
1259 { _MMIO(0x9888), 0x0a4c00aa },
1260 { _MMIO(0x9888), 0x020da000 },
1261 { _MMIO(0x9888), 0x040da000 },
1262 { _MMIO(0x9888), 0x060d2000 },
1263 { _MMIO(0x9888), 0x0c0f4000 },
1264 { _MMIO(0x9888), 0x0e0f0055 },
1265 { _MMIO(0x9888), 0x042c8000 },
1266 { _MMIO(0x9888), 0x062c8000 },
1267 { _MMIO(0x9888), 0x082c8000 },
1268 { _MMIO(0x9888), 0x0a2c8000 },
1269 { _MMIO(0x9888), 0x0c2c8000 },
1270 { _MMIO(0x9888), 0x1190f800 },
1271 { _MMIO(0x9888), 0x37900000 },
1272 { _MMIO(0x9888), 0x51900000 },
1273 { _MMIO(0x9888), 0x43900842 },
1274 { _MMIO(0x9888), 0x53900000 },
1275 { _MMIO(0x9888), 0x45900002 },
1276 { _MMIO(0x9888), 0x33900000 },
1277};
1278
1279static int
1280get_l3_3_mux_config(struct drm_i915_private *dev_priv,
1281 const struct i915_oa_reg **regs,
1282 int *lens)
1283{
1284 int n = 0;
1285
1286 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1287 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1288
1289 regs[n] = mux_config_l3_3;
1290 lens[n] = ARRAY_SIZE(mux_config_l3_3);
1291 n++;
1292
1293 return n;
1294}
1295
1296static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1297 { _MMIO(0x2740), 0x00000000 },
1298 { _MMIO(0x2744), 0x00800000 },
1299 { _MMIO(0x2710), 0x00000000 },
1300 { _MMIO(0x2714), 0x30800000 },
1301 { _MMIO(0x2720), 0x00000000 },
1302 { _MMIO(0x2724), 0x00800000 },
1303 { _MMIO(0x2770), 0x00000002 },
1304 { _MMIO(0x2774), 0x0000efff },
1305 { _MMIO(0x2778), 0x00006000 },
1306 { _MMIO(0x277c), 0x0000f3ff },
1307};
1308
1309static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1310 { _MMIO(0xe458), 0x00005004 },
1311 { _MMIO(0xe558), 0x00010003 },
1312 { _MMIO(0xe658), 0x00012011 },
1313 { _MMIO(0xe758), 0x00015014 },
1314 { _MMIO(0xe45c), 0x00051050 },
1315 { _MMIO(0xe55c), 0x00053052 },
1316 { _MMIO(0xe65c), 0x00055054 },
1317};
1318
1319static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1320 { _MMIO(0x9888), 0x102f3800 },
1321 { _MMIO(0x9888), 0x144d0500 },
1322 { _MMIO(0x9888), 0x120d03c0 },
1323 { _MMIO(0x9888), 0x140d03cf },
1324 { _MMIO(0x9888), 0x0c0f0004 },
1325 { _MMIO(0x9888), 0x0c4e4000 },
1326 { _MMIO(0x9888), 0x042f0480 },
1327 { _MMIO(0x9888), 0x082f0000 },
1328 { _MMIO(0x9888), 0x022f0000 },
1329 { _MMIO(0x9888), 0x0a4c0090 },
1330 { _MMIO(0x9888), 0x064d0027 },
1331 { _MMIO(0x9888), 0x004d0000 },
1332 { _MMIO(0x9888), 0x000d0d40 },
1333 { _MMIO(0x9888), 0x020d803f },
1334 { _MMIO(0x9888), 0x040d8023 },
1335 { _MMIO(0x9888), 0x100d0000 },
1336 { _MMIO(0x9888), 0x060d2000 },
1337 { _MMIO(0x9888), 0x020f0010 },
1338 { _MMIO(0x9888), 0x000f0000 },
1339 { _MMIO(0x9888), 0x0e0f0050 },
1340 { _MMIO(0x9888), 0x0a2c8000 },
1341 { _MMIO(0x9888), 0x0c2c8000 },
1342 { _MMIO(0x9888), 0x1190fc00 },
1343 { _MMIO(0x9888), 0x37900000 },
1344 { _MMIO(0x9888), 0x51900000 },
1345 { _MMIO(0x9888), 0x41901400 },
1346 { _MMIO(0x9888), 0x43901485 },
1347 { _MMIO(0x9888), 0x53900000 },
1348 { _MMIO(0x9888), 0x45900001 },
1349 { _MMIO(0x9888), 0x33900000 },
1350};
1351
1352static int
1353get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1354 const struct i915_oa_reg **regs,
1355 int *lens)
1356{
1357 int n = 0;
1358
1359 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1360 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1361
1362 regs[n] = mux_config_rasterizer_and_pixel_backend;
1363 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1364 n++;
1365
1366 return n;
1367}
1368
1369static const struct i915_oa_reg b_counter_config_sampler[] = {
1370 { _MMIO(0x2740), 0x00000000 },
1371 { _MMIO(0x2744), 0x00800000 },
1372 { _MMIO(0x2710), 0x00000000 },
1373 { _MMIO(0x2714), 0x70800000 },
1374 { _MMIO(0x2720), 0x00000000 },
1375 { _MMIO(0x2724), 0x00800000 },
1376 { _MMIO(0x2770), 0x0000c000 },
1377 { _MMIO(0x2774), 0x0000e7ff },
1378 { _MMIO(0x2778), 0x00003000 },
1379 { _MMIO(0x277c), 0x0000f9ff },
1380 { _MMIO(0x2780), 0x00000c00 },
1381 { _MMIO(0x2784), 0x0000fe7f },
1382};
1383
1384static const struct i915_oa_reg flex_eu_config_sampler[] = {
1385 { _MMIO(0xe458), 0x00005004 },
1386 { _MMIO(0xe558), 0x00010003 },
1387 { _MMIO(0xe658), 0x00012011 },
1388 { _MMIO(0xe758), 0x00015014 },
1389 { _MMIO(0xe45c), 0x00051050 },
1390 { _MMIO(0xe55c), 0x00053052 },
1391 { _MMIO(0xe65c), 0x00055054 },
1392};
1393
1394static const struct i915_oa_reg mux_config_sampler[] = {
1395 { _MMIO(0x9888), 0x14152c00 },
1396 { _MMIO(0x9888), 0x16150005 },
1397 { _MMIO(0x9888), 0x121600a0 },
1398 { _MMIO(0x9888), 0x14352c00 },
1399 { _MMIO(0x9888), 0x16350005 },
1400 { _MMIO(0x9888), 0x123600a0 },
1401 { _MMIO(0x9888), 0x14552c00 },
1402 { _MMIO(0x9888), 0x16550005 },
1403 { _MMIO(0x9888), 0x125600a0 },
1404 { _MMIO(0x9888), 0x062f6000 },
1405 { _MMIO(0x9888), 0x022f2000 },
1406 { _MMIO(0x9888), 0x0c4c0050 },
1407 { _MMIO(0x9888), 0x0a4c0010 },
1408 { _MMIO(0x9888), 0x0c0d8000 },
1409 { _MMIO(0x9888), 0x0e0da000 },
1410 { _MMIO(0x9888), 0x000d8000 },
1411 { _MMIO(0x9888), 0x020da000 },
1412 { _MMIO(0x9888), 0x040da000 },
1413 { _MMIO(0x9888), 0x060d2000 },
1414 { _MMIO(0x9888), 0x100f0350 },
1415 { _MMIO(0x9888), 0x0c0fb000 },
1416 { _MMIO(0x9888), 0x0e0f00da },
1417 { _MMIO(0x9888), 0x182c0028 },
1418 { _MMIO(0x9888), 0x0a2c8000 },
1419 { _MMIO(0x9888), 0x022dc000 },
1420 { _MMIO(0x9888), 0x042d4000 },
1421 { _MMIO(0x9888), 0x0c138000 },
1422 { _MMIO(0x9888), 0x0e132000 },
1423 { _MMIO(0x9888), 0x0413c000 },
1424 { _MMIO(0x9888), 0x1c140018 },
1425 { _MMIO(0x9888), 0x0c157000 },
1426 { _MMIO(0x9888), 0x0e150078 },
1427 { _MMIO(0x9888), 0x10150000 },
1428 { _MMIO(0x9888), 0x04162180 },
1429 { _MMIO(0x9888), 0x02160000 },
1430 { _MMIO(0x9888), 0x04174000 },
1431 { _MMIO(0x9888), 0x0233a000 },
1432 { _MMIO(0x9888), 0x04333000 },
1433 { _MMIO(0x9888), 0x14348000 },
1434 { _MMIO(0x9888), 0x16348000 },
1435 { _MMIO(0x9888), 0x02357870 },
1436 { _MMIO(0x9888), 0x10350000 },
1437 { _MMIO(0x9888), 0x04360043 },
1438 { _MMIO(0x9888), 0x02360000 },
1439 { _MMIO(0x9888), 0x04371000 },
1440 { _MMIO(0x9888), 0x0e538000 },
1441 { _MMIO(0x9888), 0x00538000 },
1442 { _MMIO(0x9888), 0x06533000 },
1443 { _MMIO(0x9888), 0x1c540020 },
1444 { _MMIO(0x9888), 0x12548000 },
1445 { _MMIO(0x9888), 0x0e557000 },
1446 { _MMIO(0x9888), 0x00557800 },
1447 { _MMIO(0x9888), 0x10550000 },
1448 { _MMIO(0x9888), 0x06560043 },
1449 { _MMIO(0x9888), 0x02560000 },
1450 { _MMIO(0x9888), 0x06571000 },
1451 { _MMIO(0x9888), 0x1190ff80 },
1452 { _MMIO(0x9888), 0x57900000 },
1453 { _MMIO(0x9888), 0x49900000 },
1454 { _MMIO(0x9888), 0x37900000 },
1455 { _MMIO(0x9888), 0x33900000 },
1456 { _MMIO(0x9888), 0x4b900060 },
1457 { _MMIO(0x9888), 0x59900000 },
1458 { _MMIO(0x9888), 0x51900000 },
1459 { _MMIO(0x9888), 0x41900c00 },
1460 { _MMIO(0x9888), 0x43900842 },
1461 { _MMIO(0x9888), 0x53900000 },
1462 { _MMIO(0x9888), 0x45900060 },
1463};
1464
1465static int
1466get_sampler_mux_config(struct drm_i915_private *dev_priv,
1467 const struct i915_oa_reg **regs,
1468 int *lens)
1469{
1470 int n = 0;
1471
1472 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1473 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1474
1475 regs[n] = mux_config_sampler;
1476 lens[n] = ARRAY_SIZE(mux_config_sampler);
1477 n++;
1478
1479 return n;
1480}
1481
1482static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1483 { _MMIO(0x2740), 0x00000000 },
1484 { _MMIO(0x2744), 0x00800000 },
1485 { _MMIO(0x2710), 0x00000000 },
1486 { _MMIO(0x2714), 0xf0800000 },
1487 { _MMIO(0x2720), 0x00000000 },
1488 { _MMIO(0x2724), 0x30800000 },
1489 { _MMIO(0x2770), 0x00000002 },
1490 { _MMIO(0x2774), 0x00007fff },
1491 { _MMIO(0x2778), 0x00000000 },
1492 { _MMIO(0x277c), 0x00009fff },
1493 { _MMIO(0x2780), 0x00000002 },
1494 { _MMIO(0x2784), 0x0000efff },
1495 { _MMIO(0x2788), 0x00000000 },
1496 { _MMIO(0x278c), 0x0000f3ff },
1497 { _MMIO(0x2790), 0x00000002 },
1498 { _MMIO(0x2794), 0x0000fdff },
1499 { _MMIO(0x2798), 0x00000000 },
1500 { _MMIO(0x279c), 0x0000fe7f },
1501};
1502
1503static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1504 { _MMIO(0xe458), 0x00005004 },
1505 { _MMIO(0xe558), 0x00010003 },
1506 { _MMIO(0xe658), 0x00012011 },
1507 { _MMIO(0xe758), 0x00015014 },
1508 { _MMIO(0xe45c), 0x00051050 },
1509 { _MMIO(0xe55c), 0x00053052 },
1510 { _MMIO(0xe65c), 0x00055054 },
1511};
1512
1513static const struct i915_oa_reg mux_config_tdl_1[] = {
1514 { _MMIO(0x9888), 0x12120000 },
1515 { _MMIO(0x9888), 0x12320000 },
1516 { _MMIO(0x9888), 0x12520000 },
1517 { _MMIO(0x9888), 0x002f8000 },
1518 { _MMIO(0x9888), 0x022f3000 },
1519 { _MMIO(0x9888), 0x0a4c0015 },
1520 { _MMIO(0x9888), 0x0c0d8000 },
1521 { _MMIO(0x9888), 0x0e0da000 },
1522 { _MMIO(0x9888), 0x000d8000 },
1523 { _MMIO(0x9888), 0x020da000 },
1524 { _MMIO(0x9888), 0x040da000 },
1525 { _MMIO(0x9888), 0x060d2000 },
1526 { _MMIO(0x9888), 0x100f03a0 },
1527 { _MMIO(0x9888), 0x0c0ff000 },
1528 { _MMIO(0x9888), 0x0e0f0095 },
1529 { _MMIO(0x9888), 0x062c8000 },
1530 { _MMIO(0x9888), 0x082c8000 },
1531 { _MMIO(0x9888), 0x0a2c8000 },
1532 { _MMIO(0x9888), 0x0c2d8000 },
1533 { _MMIO(0x9888), 0x0e2d4000 },
1534 { _MMIO(0x9888), 0x062d4000 },
1535 { _MMIO(0x9888), 0x02108000 },
1536 { _MMIO(0x9888), 0x0410c000 },
1537 { _MMIO(0x9888), 0x02118000 },
1538 { _MMIO(0x9888), 0x0411c000 },
1539 { _MMIO(0x9888), 0x02121880 },
1540 { _MMIO(0x9888), 0x041219b5 },
1541 { _MMIO(0x9888), 0x00120000 },
1542 { _MMIO(0x9888), 0x02134000 },
1543 { _MMIO(0x9888), 0x04135000 },
1544 { _MMIO(0x9888), 0x0c308000 },
1545 { _MMIO(0x9888), 0x0e304000 },
1546 { _MMIO(0x9888), 0x06304000 },
1547 { _MMIO(0x9888), 0x0c318000 },
1548 { _MMIO(0x9888), 0x0e314000 },
1549 { _MMIO(0x9888), 0x06314000 },
1550 { _MMIO(0x9888), 0x0c321a80 },
1551 { _MMIO(0x9888), 0x0e320033 },
1552 { _MMIO(0x9888), 0x06320031 },
1553 { _MMIO(0x9888), 0x00320000 },
1554 { _MMIO(0x9888), 0x0c334000 },
1555 { _MMIO(0x9888), 0x0e331000 },
1556 { _MMIO(0x9888), 0x06331000 },
1557 { _MMIO(0x9888), 0x0e508000 },
1558 { _MMIO(0x9888), 0x00508000 },
1559 { _MMIO(0x9888), 0x02504000 },
1560 { _MMIO(0x9888), 0x0e518000 },
1561 { _MMIO(0x9888), 0x00518000 },
1562 { _MMIO(0x9888), 0x02514000 },
1563 { _MMIO(0x9888), 0x0e521880 },
1564 { _MMIO(0x9888), 0x00521a80 },
1565 { _MMIO(0x9888), 0x02520033 },
1566 { _MMIO(0x9888), 0x0e534000 },
1567 { _MMIO(0x9888), 0x00534000 },
1568 { _MMIO(0x9888), 0x02531000 },
1569 { _MMIO(0x9888), 0x1190ff80 },
1570 { _MMIO(0x9888), 0x57900000 },
1571 { _MMIO(0x9888), 0x49900800 },
1572 { _MMIO(0x9888), 0x37900000 },
1573 { _MMIO(0x9888), 0x33900000 },
1574 { _MMIO(0x9888), 0x4b900062 },
1575 { _MMIO(0x9888), 0x59900000 },
1576 { _MMIO(0x9888), 0x51900000 },
1577 { _MMIO(0x9888), 0x41900c00 },
1578 { _MMIO(0x9888), 0x43900003 },
1579 { _MMIO(0x9888), 0x53900000 },
1580 { _MMIO(0x9888), 0x45900040 },
1581};
1582
1583static int
1584get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1585 const struct i915_oa_reg **regs,
1586 int *lens)
1587{
1588 int n = 0;
1589
1590 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1591 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1592
1593 regs[n] = mux_config_tdl_1;
1594 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1595 n++;
1596
1597 return n;
1598}
1599
1600static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1601 { _MMIO(0x2740), 0x00000000 },
1602 { _MMIO(0x2744), 0x00800000 },
1603 { _MMIO(0x2710), 0x00000000 },
1604 { _MMIO(0x2714), 0x00800000 },
1605 { _MMIO(0x2720), 0x00000000 },
1606 { _MMIO(0x2724), 0x00800000 },
1607};
1608
1609static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1610 { _MMIO(0xe458), 0x00005004 },
1611 { _MMIO(0xe558), 0x00010003 },
1612 { _MMIO(0xe658), 0x00012011 },
1613 { _MMIO(0xe758), 0x00015014 },
1614 { _MMIO(0xe45c), 0x00051050 },
1615 { _MMIO(0xe55c), 0x00053052 },
1616 { _MMIO(0xe65c), 0x00055054 },
1617};
1618
1619static const struct i915_oa_reg mux_config_tdl_2[] = {
1620 { _MMIO(0x9888), 0x12124d60 },
1621 { _MMIO(0x9888), 0x12322e60 },
1622 { _MMIO(0x9888), 0x12524d60 },
1623 { _MMIO(0x9888), 0x022f3000 },
1624 { _MMIO(0x9888), 0x0a4c0014 },
1625 { _MMIO(0x9888), 0x000d8000 },
1626 { _MMIO(0x9888), 0x020da000 },
1627 { _MMIO(0x9888), 0x040da000 },
1628 { _MMIO(0x9888), 0x060d2000 },
1629 { _MMIO(0x9888), 0x0c0fe000 },
1630 { _MMIO(0x9888), 0x0e0f0097 },
1631 { _MMIO(0x9888), 0x082c8000 },
1632 { _MMIO(0x9888), 0x0a2c8000 },
1633 { _MMIO(0x9888), 0x002d8000 },
1634 { _MMIO(0x9888), 0x062d4000 },
1635 { _MMIO(0x9888), 0x0410c000 },
1636 { _MMIO(0x9888), 0x0411c000 },
1637 { _MMIO(0x9888), 0x04121fb7 },
1638 { _MMIO(0x9888), 0x00120000 },
1639 { _MMIO(0x9888), 0x04135000 },
1640 { _MMIO(0x9888), 0x00308000 },
1641 { _MMIO(0x9888), 0x06304000 },
1642 { _MMIO(0x9888), 0x00318000 },
1643 { _MMIO(0x9888), 0x06314000 },
1644 { _MMIO(0x9888), 0x00321b80 },
1645 { _MMIO(0x9888), 0x0632003f },
1646 { _MMIO(0x9888), 0x00334000 },
1647 { _MMIO(0x9888), 0x06331000 },
1648 { _MMIO(0x9888), 0x0250c000 },
1649 { _MMIO(0x9888), 0x0251c000 },
1650 { _MMIO(0x9888), 0x02521fb7 },
1651 { _MMIO(0x9888), 0x00520000 },
1652 { _MMIO(0x9888), 0x02535000 },
1653 { _MMIO(0x9888), 0x1190fc00 },
1654 { _MMIO(0x9888), 0x37900000 },
1655 { _MMIO(0x9888), 0x51900000 },
1656 { _MMIO(0x9888), 0x41900800 },
1657 { _MMIO(0x9888), 0x43900063 },
1658 { _MMIO(0x9888), 0x53900000 },
1659 { _MMIO(0x9888), 0x45900040 },
1660 { _MMIO(0x9888), 0x33900000 },
1661};
1662
1663static int
1664get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1665 const struct i915_oa_reg **regs,
1666 int *lens)
1667{
1668 int n = 0;
1669
1670 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1671 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1672
1673 regs[n] = mux_config_tdl_2;
1674 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1675 n++;
1676
1677 return n;
1678}
1679
1680static const struct i915_oa_reg b_counter_config_compute_extra[] = {
1681 { _MMIO(0x2740), 0x00000000 },
1682 { _MMIO(0x2744), 0x00800000 },
1683 { _MMIO(0x2710), 0x00000000 },
1684 { _MMIO(0x2714), 0x00800000 },
1685 { _MMIO(0x2720), 0x00000000 },
1686 { _MMIO(0x2724), 0x00800000 },
1687};
1688
1689static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
1690 { _MMIO(0xe458), 0x00001000 },
1691 { _MMIO(0xe558), 0x00003002 },
1692 { _MMIO(0xe658), 0x00005004 },
1693 { _MMIO(0xe758), 0x00011010 },
1694 { _MMIO(0xe45c), 0x00050012 },
1695 { _MMIO(0xe55c), 0x00052051 },
1696 { _MMIO(0xe65c), 0x00000008 },
1697};
1698
1699static const struct i915_oa_reg mux_config_compute_extra[] = {
1700 { _MMIO(0x9888), 0x121203e0 },
1701 { _MMIO(0x9888), 0x123203e0 },
1702 { _MMIO(0x9888), 0x125203e0 },
1703 { _MMIO(0x9888), 0x022f4000 },
1704 { _MMIO(0x9888), 0x0a4c0040 },
1705 { _MMIO(0x9888), 0x040da000 },
1706 { _MMIO(0x9888), 0x060d2000 },
1707 { _MMIO(0x9888), 0x0e0f006c },
1708 { _MMIO(0x9888), 0x0c2c8000 },
1709 { _MMIO(0x9888), 0x042d8000 },
1710 { _MMIO(0x9888), 0x06104000 },
1711 { _MMIO(0x9888), 0x06114000 },
1712 { _MMIO(0x9888), 0x06120033 },
1713 { _MMIO(0x9888), 0x00120000 },
1714 { _MMIO(0x9888), 0x06131000 },
1715 { _MMIO(0x9888), 0x04308000 },
1716 { _MMIO(0x9888), 0x04318000 },
1717 { _MMIO(0x9888), 0x04321980 },
1718 { _MMIO(0x9888), 0x00320000 },
1719 { _MMIO(0x9888), 0x04334000 },
1720 { _MMIO(0x9888), 0x04504000 },
1721 { _MMIO(0x9888), 0x04514000 },
1722 { _MMIO(0x9888), 0x04520033 },
1723 { _MMIO(0x9888), 0x00520000 },
1724 { _MMIO(0x9888), 0x04531000 },
1725 { _MMIO(0x9888), 0x1190e000 },
1726 { _MMIO(0x9888), 0x37900000 },
1727 { _MMIO(0x9888), 0x53900000 },
1728 { _MMIO(0x9888), 0x43900c00 },
1729 { _MMIO(0x9888), 0x45900002 },
1730 { _MMIO(0x9888), 0x33900000 },
1731};
1732
1733static int
1734get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
1735 const struct i915_oa_reg **regs,
1736 int *lens)
1737{
1738 int n = 0;
1739
1740 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1741 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1742
1743 regs[n] = mux_config_compute_extra;
1744 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
1745 n++;
1746
1747 return n;
1748}
1749
1750static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
1751 { _MMIO(0x2740), 0x00000000 },
1752 { _MMIO(0x2710), 0x00000000 },
1753 { _MMIO(0x2714), 0xf0800000 },
1754 { _MMIO(0x2720), 0x00000000 },
1755 { _MMIO(0x2724), 0x30800000 },
1756 { _MMIO(0x2770), 0x00100030 },
1757 { _MMIO(0x2774), 0x0000fff9 },
1758 { _MMIO(0x2778), 0x00000002 },
1759 { _MMIO(0x277c), 0x0000fffc },
1760 { _MMIO(0x2780), 0x00000002 },
1761 { _MMIO(0x2784), 0x0000fff3 },
1762 { _MMIO(0x2788), 0x00100180 },
1763 { _MMIO(0x278c), 0x0000ffcf },
1764 { _MMIO(0x2790), 0x00000002 },
1765 { _MMIO(0x2794), 0x0000ffcf },
1766 { _MMIO(0x2798), 0x00000002 },
1767 { _MMIO(0x279c), 0x0000ff3f },
1768};
1769
1770static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
1771 { _MMIO(0xe458), 0x00005004 },
1772 { _MMIO(0xe558), 0x00008003 },
1773};
1774
1775static const struct i915_oa_reg mux_config_vme_pipe[] = {
1776 { _MMIO(0x9888), 0x141a5800 },
1777 { _MMIO(0x9888), 0x161a00c0 },
1778 { _MMIO(0x9888), 0x12180240 },
1779 { _MMIO(0x9888), 0x14180002 },
1780 { _MMIO(0x9888), 0x143a5800 },
1781 { _MMIO(0x9888), 0x163a00c0 },
1782 { _MMIO(0x9888), 0x12380240 },
1783 { _MMIO(0x9888), 0x14380002 },
1784 { _MMIO(0x9888), 0x002f1000 },
1785 { _MMIO(0x9888), 0x022f8000 },
1786 { _MMIO(0x9888), 0x042f3000 },
1787 { _MMIO(0x9888), 0x004c4000 },
1788 { _MMIO(0x9888), 0x0a4c1500 },
1789 { _MMIO(0x9888), 0x000d2000 },
1790 { _MMIO(0x9888), 0x060d8000 },
1791 { _MMIO(0x9888), 0x080da000 },
1792 { _MMIO(0x9888), 0x0a0da000 },
1793 { _MMIO(0x9888), 0x0c0da000 },
1794 { _MMIO(0x9888), 0x0c0f0400 },
1795 { _MMIO(0x9888), 0x0e0f9500 },
1796 { _MMIO(0x9888), 0x100f002a },
1797 { _MMIO(0x9888), 0x002c8000 },
1798 { _MMIO(0x9888), 0x0e2c8000 },
1799 { _MMIO(0x9888), 0x162c0a00 },
1800 { _MMIO(0x9888), 0x0a2dc000 },
1801 { _MMIO(0x9888), 0x0c2dc000 },
1802 { _MMIO(0x9888), 0x04193000 },
1803 { _MMIO(0x9888), 0x081a28c1 },
1804 { _MMIO(0x9888), 0x001a0000 },
1805 { _MMIO(0x9888), 0x00133000 },
1806 { _MMIO(0x9888), 0x0613c000 },
1807 { _MMIO(0x9888), 0x0813f000 },
1808 { _MMIO(0x9888), 0x00172000 },
1809 { _MMIO(0x9888), 0x06178000 },
1810 { _MMIO(0x9888), 0x0817a000 },
1811 { _MMIO(0x9888), 0x00180037 },
1812 { _MMIO(0x9888), 0x06180940 },
1813 { _MMIO(0x9888), 0x08180000 },
1814 { _MMIO(0x9888), 0x02180000 },
1815 { _MMIO(0x9888), 0x04183000 },
1816 { _MMIO(0x9888), 0x06393000 },
1817 { _MMIO(0x9888), 0x0c3a28c1 },
1818 { _MMIO(0x9888), 0x003a0000 },
1819 { _MMIO(0x9888), 0x0a33f000 },
1820 { _MMIO(0x9888), 0x0c33f000 },
1821 { _MMIO(0x9888), 0x0a37a000 },
1822 { _MMIO(0x9888), 0x0c37a000 },
1823 { _MMIO(0x9888), 0x0a380977 },
1824 { _MMIO(0x9888), 0x08380000 },
1825 { _MMIO(0x9888), 0x04380000 },
1826 { _MMIO(0x9888), 0x06383000 },
1827 { _MMIO(0x9888), 0x119000ff },
1828 { _MMIO(0x9888), 0x51900000 },
1829 { _MMIO(0x9888), 0x41900040 },
1830 { _MMIO(0x9888), 0x55900000 },
1831 { _MMIO(0x9888), 0x45900800 },
1832 { _MMIO(0x9888), 0x47901000 },
1833 { _MMIO(0x9888), 0x57900000 },
1834 { _MMIO(0x9888), 0x49900844 },
1835 { _MMIO(0x9888), 0x37900000 },
1836 { _MMIO(0x9888), 0x33900000 },
1837};
1838
1839static int
1840get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
1841 const struct i915_oa_reg **regs,
1842 int *lens)
1843{
1844 int n = 0;
1845
1846 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1847 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1848
1849 regs[n] = mux_config_vme_pipe;
1850 lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
1851 n++;
1852
1853 return n;
1854}
1855
1856static const struct i915_oa_reg b_counter_config_test_oa[] = {
1857 { _MMIO(0x2740), 0x00000000 },
1858 { _MMIO(0x2744), 0x00800000 },
1859 { _MMIO(0x2714), 0xf0800000 },
1860 { _MMIO(0x2710), 0x00000000 },
1861 { _MMIO(0x2724), 0xf0800000 },
1862 { _MMIO(0x2720), 0x00000000 },
1863 { _MMIO(0x2770), 0x00000004 },
1864 { _MMIO(0x2774), 0x00000000 },
1865 { _MMIO(0x2778), 0x00000003 },
1866 { _MMIO(0x277c), 0x00000000 },
1867 { _MMIO(0x2780), 0x00000007 },
1868 { _MMIO(0x2784), 0x00000000 },
1869 { _MMIO(0x2788), 0x00100002 },
1870 { _MMIO(0x278c), 0x0000fff7 },
1871 { _MMIO(0x2790), 0x00100002 },
1872 { _MMIO(0x2794), 0x0000ffcf },
1873 { _MMIO(0x2798), 0x00100082 },
1874 { _MMIO(0x279c), 0x0000ffef },
1875 { _MMIO(0x27a0), 0x001000c2 },
1876 { _MMIO(0x27a4), 0x0000ffe7 },
1877 { _MMIO(0x27a8), 0x00100001 },
1878 { _MMIO(0x27ac), 0x0000ffe7 },
1879};
1880
1881static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1882};
1883
1884static const struct i915_oa_reg mux_config_test_oa[] = {
1885 { _MMIO(0x9888), 0x11810000 },
1886 { _MMIO(0x9888), 0x07810013 },
1887 { _MMIO(0x9888), 0x1f810000 },
1888 { _MMIO(0x9888), 0x1d810000 },
1889 { _MMIO(0x9888), 0x1b930040 },
1890 { _MMIO(0x9888), 0x07e54000 },
1891 { _MMIO(0x9888), 0x1f908000 },
1892 { _MMIO(0x9888), 0x11900000 },
1893 { _MMIO(0x9888), 0x37900000 },
1894 { _MMIO(0x9888), 0x53900000 },
1895 { _MMIO(0x9888), 0x45900000 },
1896 { _MMIO(0x9888), 0x33900000 },
1897};
1898
1899static int
1900get_test_oa_mux_config(struct drm_i915_private *dev_priv,
1901 const struct i915_oa_reg **regs,
1902 int *lens)
1903{
1904 int n = 0;
1905
1906 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1907 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1908
1909 regs[n] = mux_config_test_oa;
1910 lens[n] = ARRAY_SIZE(mux_config_test_oa);
1911 n++;
1912
1913 return n;
1914}
1915
1916int i915_oa_select_metric_set_kblgt2(struct drm_i915_private *dev_priv)
1917{
1918 dev_priv->perf.oa.n_mux_configs = 0;
1919 dev_priv->perf.oa.b_counter_regs = NULL;
1920 dev_priv->perf.oa.b_counter_regs_len = 0;
1921 dev_priv->perf.oa.flex_regs = NULL;
1922 dev_priv->perf.oa.flex_regs_len = 0;
1923
1924 switch (dev_priv->perf.oa.metrics_set) {
1925 case METRIC_SET_ID_RENDER_BASIC:
1926 dev_priv->perf.oa.n_mux_configs =
1927 get_render_basic_mux_config(dev_priv,
1928 dev_priv->perf.oa.mux_regs,
1929 dev_priv->perf.oa.mux_regs_lens);
1930 if (dev_priv->perf.oa.n_mux_configs == 0) {
1931 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
1932
1933 /* EINVAL because *_register_sysfs already checked this
1934 * and so it wouldn't have been advertised to userspace and
1935 * so shouldn't have been requested
1936 */
1937 return -EINVAL;
1938 }
1939
1940 dev_priv->perf.oa.b_counter_regs =
1941 b_counter_config_render_basic;
1942 dev_priv->perf.oa.b_counter_regs_len =
1943 ARRAY_SIZE(b_counter_config_render_basic);
1944
1945 dev_priv->perf.oa.flex_regs =
1946 flex_eu_config_render_basic;
1947 dev_priv->perf.oa.flex_regs_len =
1948 ARRAY_SIZE(flex_eu_config_render_basic);
1949
1950 return 0;
1951 case METRIC_SET_ID_COMPUTE_BASIC:
1952 dev_priv->perf.oa.n_mux_configs =
1953 get_compute_basic_mux_config(dev_priv,
1954 dev_priv->perf.oa.mux_regs,
1955 dev_priv->perf.oa.mux_regs_lens);
1956 if (dev_priv->perf.oa.n_mux_configs == 0) {
1957 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
1958
1959 /* EINVAL because *_register_sysfs already checked this
1960 * and so it wouldn't have been advertised to userspace and
1961 * so shouldn't have been requested
1962 */
1963 return -EINVAL;
1964 }
1965
1966 dev_priv->perf.oa.b_counter_regs =
1967 b_counter_config_compute_basic;
1968 dev_priv->perf.oa.b_counter_regs_len =
1969 ARRAY_SIZE(b_counter_config_compute_basic);
1970
1971 dev_priv->perf.oa.flex_regs =
1972 flex_eu_config_compute_basic;
1973 dev_priv->perf.oa.flex_regs_len =
1974 ARRAY_SIZE(flex_eu_config_compute_basic);
1975
1976 return 0;
1977 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
1978 dev_priv->perf.oa.n_mux_configs =
1979 get_render_pipe_profile_mux_config(dev_priv,
1980 dev_priv->perf.oa.mux_regs,
1981 dev_priv->perf.oa.mux_regs_lens);
1982 if (dev_priv->perf.oa.n_mux_configs == 0) {
1983 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
1984
1985 /* EINVAL because *_register_sysfs already checked this
1986 * and so it wouldn't have been advertised to userspace and
1987 * so shouldn't have been requested
1988 */
1989 return -EINVAL;
1990 }
1991
1992 dev_priv->perf.oa.b_counter_regs =
1993 b_counter_config_render_pipe_profile;
1994 dev_priv->perf.oa.b_counter_regs_len =
1995 ARRAY_SIZE(b_counter_config_render_pipe_profile);
1996
1997 dev_priv->perf.oa.flex_regs =
1998 flex_eu_config_render_pipe_profile;
1999 dev_priv->perf.oa.flex_regs_len =
2000 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
2001
2002 return 0;
2003 case METRIC_SET_ID_MEMORY_READS:
2004 dev_priv->perf.oa.n_mux_configs =
2005 get_memory_reads_mux_config(dev_priv,
2006 dev_priv->perf.oa.mux_regs,
2007 dev_priv->perf.oa.mux_regs_lens);
2008 if (dev_priv->perf.oa.n_mux_configs == 0) {
2009 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
2010
2011 /* EINVAL because *_register_sysfs already checked this
2012 * and so it wouldn't have been advertised to userspace and
2013 * so shouldn't have been requested
2014 */
2015 return -EINVAL;
2016 }
2017
2018 dev_priv->perf.oa.b_counter_regs =
2019 b_counter_config_memory_reads;
2020 dev_priv->perf.oa.b_counter_regs_len =
2021 ARRAY_SIZE(b_counter_config_memory_reads);
2022
2023 dev_priv->perf.oa.flex_regs =
2024 flex_eu_config_memory_reads;
2025 dev_priv->perf.oa.flex_regs_len =
2026 ARRAY_SIZE(flex_eu_config_memory_reads);
2027
2028 return 0;
2029 case METRIC_SET_ID_MEMORY_WRITES:
2030 dev_priv->perf.oa.n_mux_configs =
2031 get_memory_writes_mux_config(dev_priv,
2032 dev_priv->perf.oa.mux_regs,
2033 dev_priv->perf.oa.mux_regs_lens);
2034 if (dev_priv->perf.oa.n_mux_configs == 0) {
2035 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
2036
2037 /* EINVAL because *_register_sysfs already checked this
2038 * and so it wouldn't have been advertised to userspace and
2039 * so shouldn't have been requested
2040 */
2041 return -EINVAL;
2042 }
2043
2044 dev_priv->perf.oa.b_counter_regs =
2045 b_counter_config_memory_writes;
2046 dev_priv->perf.oa.b_counter_regs_len =
2047 ARRAY_SIZE(b_counter_config_memory_writes);
2048
2049 dev_priv->perf.oa.flex_regs =
2050 flex_eu_config_memory_writes;
2051 dev_priv->perf.oa.flex_regs_len =
2052 ARRAY_SIZE(flex_eu_config_memory_writes);
2053
2054 return 0;
2055 case METRIC_SET_ID_COMPUTE_EXTENDED:
2056 dev_priv->perf.oa.n_mux_configs =
2057 get_compute_extended_mux_config(dev_priv,
2058 dev_priv->perf.oa.mux_regs,
2059 dev_priv->perf.oa.mux_regs_lens);
2060 if (dev_priv->perf.oa.n_mux_configs == 0) {
2061 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
2062
2063 /* EINVAL because *_register_sysfs already checked this
2064 * and so it wouldn't have been advertised to userspace and
2065 * so shouldn't have been requested
2066 */
2067 return -EINVAL;
2068 }
2069
2070 dev_priv->perf.oa.b_counter_regs =
2071 b_counter_config_compute_extended;
2072 dev_priv->perf.oa.b_counter_regs_len =
2073 ARRAY_SIZE(b_counter_config_compute_extended);
2074
2075 dev_priv->perf.oa.flex_regs =
2076 flex_eu_config_compute_extended;
2077 dev_priv->perf.oa.flex_regs_len =
2078 ARRAY_SIZE(flex_eu_config_compute_extended);
2079
2080 return 0;
2081 case METRIC_SET_ID_COMPUTE_L3_CACHE:
2082 dev_priv->perf.oa.n_mux_configs =
2083 get_compute_l3_cache_mux_config(dev_priv,
2084 dev_priv->perf.oa.mux_regs,
2085 dev_priv->perf.oa.mux_regs_lens);
2086 if (dev_priv->perf.oa.n_mux_configs == 0) {
2087 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
2088
2089 /* EINVAL because *_register_sysfs already checked this
2090 * and so it wouldn't have been advertised to userspace and
2091 * so shouldn't have been requested
2092 */
2093 return -EINVAL;
2094 }
2095
2096 dev_priv->perf.oa.b_counter_regs =
2097 b_counter_config_compute_l3_cache;
2098 dev_priv->perf.oa.b_counter_regs_len =
2099 ARRAY_SIZE(b_counter_config_compute_l3_cache);
2100
2101 dev_priv->perf.oa.flex_regs =
2102 flex_eu_config_compute_l3_cache;
2103 dev_priv->perf.oa.flex_regs_len =
2104 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
2105
2106 return 0;
2107 case METRIC_SET_ID_HDC_AND_SF:
2108 dev_priv->perf.oa.n_mux_configs =
2109 get_hdc_and_sf_mux_config(dev_priv,
2110 dev_priv->perf.oa.mux_regs,
2111 dev_priv->perf.oa.mux_regs_lens);
2112 if (dev_priv->perf.oa.n_mux_configs == 0) {
2113 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
2114
2115 /* EINVAL because *_register_sysfs already checked this
2116 * and so it wouldn't have been advertised to userspace and
2117 * so shouldn't have been requested
2118 */
2119 return -EINVAL;
2120 }
2121
2122 dev_priv->perf.oa.b_counter_regs =
2123 b_counter_config_hdc_and_sf;
2124 dev_priv->perf.oa.b_counter_regs_len =
2125 ARRAY_SIZE(b_counter_config_hdc_and_sf);
2126
2127 dev_priv->perf.oa.flex_regs =
2128 flex_eu_config_hdc_and_sf;
2129 dev_priv->perf.oa.flex_regs_len =
2130 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2131
2132 return 0;
2133 case METRIC_SET_ID_L3_1:
2134 dev_priv->perf.oa.n_mux_configs =
2135 get_l3_1_mux_config(dev_priv,
2136 dev_priv->perf.oa.mux_regs,
2137 dev_priv->perf.oa.mux_regs_lens);
2138 if (dev_priv->perf.oa.n_mux_configs == 0) {
2139 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2140
2141 /* EINVAL because *_register_sysfs already checked this
2142 * and so it wouldn't have been advertised to userspace and
2143 * so shouldn't have been requested
2144 */
2145 return -EINVAL;
2146 }
2147
2148 dev_priv->perf.oa.b_counter_regs =
2149 b_counter_config_l3_1;
2150 dev_priv->perf.oa.b_counter_regs_len =
2151 ARRAY_SIZE(b_counter_config_l3_1);
2152
2153 dev_priv->perf.oa.flex_regs =
2154 flex_eu_config_l3_1;
2155 dev_priv->perf.oa.flex_regs_len =
2156 ARRAY_SIZE(flex_eu_config_l3_1);
2157
2158 return 0;
2159 case METRIC_SET_ID_L3_2:
2160 dev_priv->perf.oa.n_mux_configs =
2161 get_l3_2_mux_config(dev_priv,
2162 dev_priv->perf.oa.mux_regs,
2163 dev_priv->perf.oa.mux_regs_lens);
2164 if (dev_priv->perf.oa.n_mux_configs == 0) {
2165 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
2166
2167 /* EINVAL because *_register_sysfs already checked this
2168 * and so it wouldn't have been advertised to userspace and
2169 * so shouldn't have been requested
2170 */
2171 return -EINVAL;
2172 }
2173
2174 dev_priv->perf.oa.b_counter_regs =
2175 b_counter_config_l3_2;
2176 dev_priv->perf.oa.b_counter_regs_len =
2177 ARRAY_SIZE(b_counter_config_l3_2);
2178
2179 dev_priv->perf.oa.flex_regs =
2180 flex_eu_config_l3_2;
2181 dev_priv->perf.oa.flex_regs_len =
2182 ARRAY_SIZE(flex_eu_config_l3_2);
2183
2184 return 0;
2185 case METRIC_SET_ID_L3_3:
2186 dev_priv->perf.oa.n_mux_configs =
2187 get_l3_3_mux_config(dev_priv,
2188 dev_priv->perf.oa.mux_regs,
2189 dev_priv->perf.oa.mux_regs_lens);
2190 if (dev_priv->perf.oa.n_mux_configs == 0) {
2191 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
2192
2193 /* EINVAL because *_register_sysfs already checked this
2194 * and so it wouldn't have been advertised to userspace and
2195 * so shouldn't have been requested
2196 */
2197 return -EINVAL;
2198 }
2199
2200 dev_priv->perf.oa.b_counter_regs =
2201 b_counter_config_l3_3;
2202 dev_priv->perf.oa.b_counter_regs_len =
2203 ARRAY_SIZE(b_counter_config_l3_3);
2204
2205 dev_priv->perf.oa.flex_regs =
2206 flex_eu_config_l3_3;
2207 dev_priv->perf.oa.flex_regs_len =
2208 ARRAY_SIZE(flex_eu_config_l3_3);
2209
2210 return 0;
2211 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2212 dev_priv->perf.oa.n_mux_configs =
2213 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2214 dev_priv->perf.oa.mux_regs,
2215 dev_priv->perf.oa.mux_regs_lens);
2216 if (dev_priv->perf.oa.n_mux_configs == 0) {
2217 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2218
2219 /* EINVAL because *_register_sysfs already checked this
2220 * and so it wouldn't have been advertised to userspace and
2221 * so shouldn't have been requested
2222 */
2223 return -EINVAL;
2224 }
2225
2226 dev_priv->perf.oa.b_counter_regs =
2227 b_counter_config_rasterizer_and_pixel_backend;
2228 dev_priv->perf.oa.b_counter_regs_len =
2229 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2230
2231 dev_priv->perf.oa.flex_regs =
2232 flex_eu_config_rasterizer_and_pixel_backend;
2233 dev_priv->perf.oa.flex_regs_len =
2234 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2235
2236 return 0;
2237 case METRIC_SET_ID_SAMPLER:
2238 dev_priv->perf.oa.n_mux_configs =
2239 get_sampler_mux_config(dev_priv,
2240 dev_priv->perf.oa.mux_regs,
2241 dev_priv->perf.oa.mux_regs_lens);
2242 if (dev_priv->perf.oa.n_mux_configs == 0) {
2243 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
2244
2245 /* EINVAL because *_register_sysfs already checked this
2246 * and so it wouldn't have been advertised to userspace and
2247 * so shouldn't have been requested
2248 */
2249 return -EINVAL;
2250 }
2251
2252 dev_priv->perf.oa.b_counter_regs =
2253 b_counter_config_sampler;
2254 dev_priv->perf.oa.b_counter_regs_len =
2255 ARRAY_SIZE(b_counter_config_sampler);
2256
2257 dev_priv->perf.oa.flex_regs =
2258 flex_eu_config_sampler;
2259 dev_priv->perf.oa.flex_regs_len =
2260 ARRAY_SIZE(flex_eu_config_sampler);
2261
2262 return 0;
2263 case METRIC_SET_ID_TDL_1:
2264 dev_priv->perf.oa.n_mux_configs =
2265 get_tdl_1_mux_config(dev_priv,
2266 dev_priv->perf.oa.mux_regs,
2267 dev_priv->perf.oa.mux_regs_lens);
2268 if (dev_priv->perf.oa.n_mux_configs == 0) {
2269 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2270
2271 /* EINVAL because *_register_sysfs already checked this
2272 * and so it wouldn't have been advertised to userspace and
2273 * so shouldn't have been requested
2274 */
2275 return -EINVAL;
2276 }
2277
2278 dev_priv->perf.oa.b_counter_regs =
2279 b_counter_config_tdl_1;
2280 dev_priv->perf.oa.b_counter_regs_len =
2281 ARRAY_SIZE(b_counter_config_tdl_1);
2282
2283 dev_priv->perf.oa.flex_regs =
2284 flex_eu_config_tdl_1;
2285 dev_priv->perf.oa.flex_regs_len =
2286 ARRAY_SIZE(flex_eu_config_tdl_1);
2287
2288 return 0;
2289 case METRIC_SET_ID_TDL_2:
2290 dev_priv->perf.oa.n_mux_configs =
2291 get_tdl_2_mux_config(dev_priv,
2292 dev_priv->perf.oa.mux_regs,
2293 dev_priv->perf.oa.mux_regs_lens);
2294 if (dev_priv->perf.oa.n_mux_configs == 0) {
2295 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2296
2297 /* EINVAL because *_register_sysfs already checked this
2298 * and so it wouldn't have been advertised to userspace and
2299 * so shouldn't have been requested
2300 */
2301 return -EINVAL;
2302 }
2303
2304 dev_priv->perf.oa.b_counter_regs =
2305 b_counter_config_tdl_2;
2306 dev_priv->perf.oa.b_counter_regs_len =
2307 ARRAY_SIZE(b_counter_config_tdl_2);
2308
2309 dev_priv->perf.oa.flex_regs =
2310 flex_eu_config_tdl_2;
2311 dev_priv->perf.oa.flex_regs_len =
2312 ARRAY_SIZE(flex_eu_config_tdl_2);
2313
2314 return 0;
2315 case METRIC_SET_ID_COMPUTE_EXTRA:
2316 dev_priv->perf.oa.n_mux_configs =
2317 get_compute_extra_mux_config(dev_priv,
2318 dev_priv->perf.oa.mux_regs,
2319 dev_priv->perf.oa.mux_regs_lens);
2320 if (dev_priv->perf.oa.n_mux_configs == 0) {
2321 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2322
2323 /* EINVAL because *_register_sysfs already checked this
2324 * and so it wouldn't have been advertised to userspace and
2325 * so shouldn't have been requested
2326 */
2327 return -EINVAL;
2328 }
2329
2330 dev_priv->perf.oa.b_counter_regs =
2331 b_counter_config_compute_extra;
2332 dev_priv->perf.oa.b_counter_regs_len =
2333 ARRAY_SIZE(b_counter_config_compute_extra);
2334
2335 dev_priv->perf.oa.flex_regs =
2336 flex_eu_config_compute_extra;
2337 dev_priv->perf.oa.flex_regs_len =
2338 ARRAY_SIZE(flex_eu_config_compute_extra);
2339
2340 return 0;
2341 case METRIC_SET_ID_VME_PIPE:
2342 dev_priv->perf.oa.n_mux_configs =
2343 get_vme_pipe_mux_config(dev_priv,
2344 dev_priv->perf.oa.mux_regs,
2345 dev_priv->perf.oa.mux_regs_lens);
2346 if (dev_priv->perf.oa.n_mux_configs == 0) {
2347 DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
2348
2349 /* EINVAL because *_register_sysfs already checked this
2350 * and so it wouldn't have been advertised to userspace and
2351 * so shouldn't have been requested
2352 */
2353 return -EINVAL;
2354 }
2355
2356 dev_priv->perf.oa.b_counter_regs =
2357 b_counter_config_vme_pipe;
2358 dev_priv->perf.oa.b_counter_regs_len =
2359 ARRAY_SIZE(b_counter_config_vme_pipe);
2360
2361 dev_priv->perf.oa.flex_regs =
2362 flex_eu_config_vme_pipe;
2363 dev_priv->perf.oa.flex_regs_len =
2364 ARRAY_SIZE(flex_eu_config_vme_pipe);
2365
2366 return 0;
2367 case METRIC_SET_ID_TEST_OA:
2368 dev_priv->perf.oa.n_mux_configs =
2369 get_test_oa_mux_config(dev_priv,
2370 dev_priv->perf.oa.mux_regs,
2371 dev_priv->perf.oa.mux_regs_lens);
2372 if (dev_priv->perf.oa.n_mux_configs == 0) {
2373 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2374
2375 /* EINVAL because *_register_sysfs already checked this
2376 * and so it wouldn't have been advertised to userspace and
2377 * so shouldn't have been requested
2378 */
2379 return -EINVAL;
2380 }
2381
2382 dev_priv->perf.oa.b_counter_regs =
2383 b_counter_config_test_oa;
2384 dev_priv->perf.oa.b_counter_regs_len =
2385 ARRAY_SIZE(b_counter_config_test_oa);
2386
2387 dev_priv->perf.oa.flex_regs =
2388 flex_eu_config_test_oa;
2389 dev_priv->perf.oa.flex_regs_len =
2390 ARRAY_SIZE(flex_eu_config_test_oa);
2391
2392 return 0;
2393 default:
2394 return -ENODEV;
2395 }
2396}
2397
2398static ssize_t
2399show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2400{
2401 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2402}
2403
2404static struct device_attribute dev_attr_render_basic_id = {
2405 .attr = { .name = "id", .mode = 0444 },
2406 .show = show_render_basic_id,
2407 .store = NULL,
2408};
2409
2410static struct attribute *attrs_render_basic[] = {
2411 &dev_attr_render_basic_id.attr,
2412 NULL,
2413};
2414
2415static struct attribute_group group_render_basic = {
2416 .name = "f8d677e9-ff6f-4df1-9310-0334c6efacce",
2417 .attrs = attrs_render_basic,
2418};
2419
2420static ssize_t
2421show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2422{
2423 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2424}
2425
2426static struct device_attribute dev_attr_compute_basic_id = {
2427 .attr = { .name = "id", .mode = 0444 },
2428 .show = show_compute_basic_id,
2429 .store = NULL,
2430};
2431
2432static struct attribute *attrs_compute_basic[] = {
2433 &dev_attr_compute_basic_id.attr,
2434 NULL,
2435};
2436
2437static struct attribute_group group_compute_basic = {
2438 .name = "e17fc42a-e614-41b6-90c4-1074841a6c77",
2439 .attrs = attrs_compute_basic,
2440};
2441
2442static ssize_t
2443show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2444{
2445 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2446}
2447
2448static struct device_attribute dev_attr_render_pipe_profile_id = {
2449 .attr = { .name = "id", .mode = 0444 },
2450 .show = show_render_pipe_profile_id,
2451 .store = NULL,
2452};
2453
2454static struct attribute *attrs_render_pipe_profile[] = {
2455 &dev_attr_render_pipe_profile_id.attr,
2456 NULL,
2457};
2458
2459static struct attribute_group group_render_pipe_profile = {
2460 .name = "d7a17a3a-ca71-40d2-a919-ace80d50633f",
2461 .attrs = attrs_render_pipe_profile,
2462};
2463
2464static ssize_t
2465show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2466{
2467 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2468}
2469
2470static struct device_attribute dev_attr_memory_reads_id = {
2471 .attr = { .name = "id", .mode = 0444 },
2472 .show = show_memory_reads_id,
2473 .store = NULL,
2474};
2475
2476static struct attribute *attrs_memory_reads[] = {
2477 &dev_attr_memory_reads_id.attr,
2478 NULL,
2479};
2480
2481static struct attribute_group group_memory_reads = {
2482 .name = "57b59202-172b-477a-87de-33f85572c589",
2483 .attrs = attrs_memory_reads,
2484};
2485
2486static ssize_t
2487show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2488{
2489 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2490}
2491
2492static struct device_attribute dev_attr_memory_writes_id = {
2493 .attr = { .name = "id", .mode = 0444 },
2494 .show = show_memory_writes_id,
2495 .store = NULL,
2496};
2497
2498static struct attribute *attrs_memory_writes[] = {
2499 &dev_attr_memory_writes_id.attr,
2500 NULL,
2501};
2502
2503static struct attribute_group group_memory_writes = {
2504 .name = "3addf8ef-8e9b-40f5-a448-3dbb5d5128b0",
2505 .attrs = attrs_memory_writes,
2506};
2507
2508static ssize_t
2509show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2510{
2511 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
2512}
2513
2514static struct device_attribute dev_attr_compute_extended_id = {
2515 .attr = { .name = "id", .mode = 0444 },
2516 .show = show_compute_extended_id,
2517 .store = NULL,
2518};
2519
2520static struct attribute *attrs_compute_extended[] = {
2521 &dev_attr_compute_extended_id.attr,
2522 NULL,
2523};
2524
2525static struct attribute_group group_compute_extended = {
2526 .name = "4af0400a-81c3-47db-a6b6-deddbd75680e",
2527 .attrs = attrs_compute_extended,
2528};
2529
2530static ssize_t
2531show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
2532{
2533 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
2534}
2535
2536static struct device_attribute dev_attr_compute_l3_cache_id = {
2537 .attr = { .name = "id", .mode = 0444 },
2538 .show = show_compute_l3_cache_id,
2539 .store = NULL,
2540};
2541
2542static struct attribute *attrs_compute_l3_cache[] = {
2543 &dev_attr_compute_l3_cache_id.attr,
2544 NULL,
2545};
2546
2547static struct attribute_group group_compute_l3_cache = {
2548 .name = "0e22f995-79ca-4f67-83ab-e9d9772488d8",
2549 .attrs = attrs_compute_l3_cache,
2550};
2551
2552static ssize_t
2553show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2554{
2555 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2556}
2557
2558static struct device_attribute dev_attr_hdc_and_sf_id = {
2559 .attr = { .name = "id", .mode = 0444 },
2560 .show = show_hdc_and_sf_id,
2561 .store = NULL,
2562};
2563
2564static struct attribute *attrs_hdc_and_sf[] = {
2565 &dev_attr_hdc_and_sf_id.attr,
2566 NULL,
2567};
2568
2569static struct attribute_group group_hdc_and_sf = {
2570 .name = "bc2a00f7-cb8a-4ff2-8ad0-e241dad16937",
2571 .attrs = attrs_hdc_and_sf,
2572};
2573
2574static ssize_t
2575show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2576{
2577 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2578}
2579
2580static struct device_attribute dev_attr_l3_1_id = {
2581 .attr = { .name = "id", .mode = 0444 },
2582 .show = show_l3_1_id,
2583 .store = NULL,
2584};
2585
2586static struct attribute *attrs_l3_1[] = {
2587 &dev_attr_l3_1_id.attr,
2588 NULL,
2589};
2590
2591static struct attribute_group group_l3_1 = {
2592 .name = "d2bbe790-f058-42d9-81c6-cdedcf655bc2",
2593 .attrs = attrs_l3_1,
2594};
2595
2596static ssize_t
2597show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2598{
2599 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
2600}
2601
2602static struct device_attribute dev_attr_l3_2_id = {
2603 .attr = { .name = "id", .mode = 0444 },
2604 .show = show_l3_2_id,
2605 .store = NULL,
2606};
2607
2608static struct attribute *attrs_l3_2[] = {
2609 &dev_attr_l3_2_id.attr,
2610 NULL,
2611};
2612
2613static struct attribute_group group_l3_2 = {
2614 .name = "2f8e32e4-5956-46e2-af31-c8ea95887332",
2615 .attrs = attrs_l3_2,
2616};
2617
2618static ssize_t
2619show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
2620{
2621 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
2622}
2623
2624static struct device_attribute dev_attr_l3_3_id = {
2625 .attr = { .name = "id", .mode = 0444 },
2626 .show = show_l3_3_id,
2627 .store = NULL,
2628};
2629
2630static struct attribute *attrs_l3_3[] = {
2631 &dev_attr_l3_3_id.attr,
2632 NULL,
2633};
2634
2635static struct attribute_group group_l3_3 = {
2636 .name = "ca046aad-b5fb-4101-adce-6473ee6e5b14",
2637 .attrs = attrs_l3_3,
2638};
2639
2640static ssize_t
2641show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2642{
2643 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2644}
2645
2646static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2647 .attr = { .name = "id", .mode = 0444 },
2648 .show = show_rasterizer_and_pixel_backend_id,
2649 .store = NULL,
2650};
2651
2652static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2653 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2654 NULL,
2655};
2656
2657static struct attribute_group group_rasterizer_and_pixel_backend = {
2658 .name = "605f388f-24bb-455c-88e3-8d57ae0d7e9f",
2659 .attrs = attrs_rasterizer_and_pixel_backend,
2660};
2661
2662static ssize_t
2663show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
2664{
2665 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
2666}
2667
2668static struct device_attribute dev_attr_sampler_id = {
2669 .attr = { .name = "id", .mode = 0444 },
2670 .show = show_sampler_id,
2671 .store = NULL,
2672};
2673
2674static struct attribute *attrs_sampler[] = {
2675 &dev_attr_sampler_id.attr,
2676 NULL,
2677};
2678
2679static struct attribute_group group_sampler = {
2680 .name = "31dd157c-bf4e-4bab-bf2b-f5c8174af1af",
2681 .attrs = attrs_sampler,
2682};
2683
2684static ssize_t
2685show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2686{
2687 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2688}
2689
2690static struct device_attribute dev_attr_tdl_1_id = {
2691 .attr = { .name = "id", .mode = 0444 },
2692 .show = show_tdl_1_id,
2693 .store = NULL,
2694};
2695
2696static struct attribute *attrs_tdl_1[] = {
2697 &dev_attr_tdl_1_id.attr,
2698 NULL,
2699};
2700
2701static struct attribute_group group_tdl_1 = {
2702 .name = "105db928-5542-466b-9128-e1f3c91426cb",
2703 .attrs = attrs_tdl_1,
2704};
2705
2706static ssize_t
2707show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2708{
2709 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2710}
2711
2712static struct device_attribute dev_attr_tdl_2_id = {
2713 .attr = { .name = "id", .mode = 0444 },
2714 .show = show_tdl_2_id,
2715 .store = NULL,
2716};
2717
2718static struct attribute *attrs_tdl_2[] = {
2719 &dev_attr_tdl_2_id.attr,
2720 NULL,
2721};
2722
2723static struct attribute_group group_tdl_2 = {
2724 .name = "03db94d2-b37f-4c58-a791-0d2067b013bb",
2725 .attrs = attrs_tdl_2,
2726};
2727
2728static ssize_t
2729show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
2730{
2731 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
2732}
2733
2734static struct device_attribute dev_attr_compute_extra_id = {
2735 .attr = { .name = "id", .mode = 0444 },
2736 .show = show_compute_extra_id,
2737 .store = NULL,
2738};
2739
2740static struct attribute *attrs_compute_extra[] = {
2741 &dev_attr_compute_extra_id.attr,
2742 NULL,
2743};
2744
2745static struct attribute_group group_compute_extra = {
2746 .name = "aa7a3fb9-22fb-43ff-a32d-0ab6c13bbd16",
2747 .attrs = attrs_compute_extra,
2748};
2749
2750static ssize_t
2751show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
2752{
2753 return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
2754}
2755
2756static struct device_attribute dev_attr_vme_pipe_id = {
2757 .attr = { .name = "id", .mode = 0444 },
2758 .show = show_vme_pipe_id,
2759 .store = NULL,
2760};
2761
2762static struct attribute *attrs_vme_pipe[] = {
2763 &dev_attr_vme_pipe_id.attr,
2764 NULL,
2765};
2766
2767static struct attribute_group group_vme_pipe = {
2768 .name = "398a4268-ef6f-4ffc-b55f-3c7b5363ce61",
2769 .attrs = attrs_vme_pipe,
2770};
2771
2772static ssize_t
2773show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2774{
2775 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2776}
2777
2778static struct device_attribute dev_attr_test_oa_id = {
2779 .attr = { .name = "id", .mode = 0444 },
2780 .show = show_test_oa_id,
2781 .store = NULL,
2782};
2783
2784static struct attribute *attrs_test_oa[] = {
2785 &dev_attr_test_oa_id.attr,
2786 NULL,
2787};
2788
2789static struct attribute_group group_test_oa = {
2790 .name = "baa3c7e4-52b6-4b85-801e-465a94b746dd",
2791 .attrs = attrs_test_oa,
2792};
2793
2794int
2795i915_perf_register_sysfs_kblgt2(struct drm_i915_private *dev_priv)
2796{
2797 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2798 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2799 int ret = 0;
2800
2801 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2802 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2803 if (ret)
2804 goto error_render_basic;
2805 }
2806 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2807 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2808 if (ret)
2809 goto error_compute_basic;
2810 }
2811 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2812 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2813 if (ret)
2814 goto error_render_pipe_profile;
2815 }
2816 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
2817 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2818 if (ret)
2819 goto error_memory_reads;
2820 }
2821 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
2822 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2823 if (ret)
2824 goto error_memory_writes;
2825 }
2826 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
2827 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2828 if (ret)
2829 goto error_compute_extended;
2830 }
2831 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
2832 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2833 if (ret)
2834 goto error_compute_l3_cache;
2835 }
2836 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2837 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2838 if (ret)
2839 goto error_hdc_and_sf;
2840 }
2841 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2842 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2843 if (ret)
2844 goto error_l3_1;
2845 }
2846 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2847 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2848 if (ret)
2849 goto error_l3_2;
2850 }
2851 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
2852 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2853 if (ret)
2854 goto error_l3_3;
2855 }
2856 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2857 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2858 if (ret)
2859 goto error_rasterizer_and_pixel_backend;
2860 }
2861 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
2862 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
2863 if (ret)
2864 goto error_sampler;
2865 }
2866 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2867 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2868 if (ret)
2869 goto error_tdl_1;
2870 }
2871 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2872 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2873 if (ret)
2874 goto error_tdl_2;
2875 }
2876 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
2877 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2878 if (ret)
2879 goto error_compute_extra;
2880 }
2881 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
2882 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2883 if (ret)
2884 goto error_vme_pipe;
2885 }
2886 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2887 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2888 if (ret)
2889 goto error_test_oa;
2890 }
2891
2892 return 0;
2893
2894error_test_oa:
2895 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
2896 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2897error_vme_pipe:
2898 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2899 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2900error_compute_extra:
2901 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2902 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2903error_tdl_2:
2904 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2905 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2906error_tdl_1:
2907 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2908 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2909error_sampler:
2910 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2911 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2912error_rasterizer_and_pixel_backend:
2913 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
2914 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2915error_l3_3:
2916 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
2917 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2918error_l3_2:
2919 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2920 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2921error_l3_1:
2922 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2923 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2924error_hdc_and_sf:
2925 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2926 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2927error_compute_l3_cache:
2928 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2929 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2930error_compute_extended:
2931 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2932 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2933error_memory_writes:
2934 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2935 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2936error_memory_reads:
2937 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2938 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2939error_render_pipe_profile:
2940 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2941 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2942error_compute_basic:
2943 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2944 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2945error_render_basic:
2946 return ret;
2947}
2948
2949void
2950i915_perf_unregister_sysfs_kblgt2(struct drm_i915_private *dev_priv)
2951{
2952 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2953 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2954
2955 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2956 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2957 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2958 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2959 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2960 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2961 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2962 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2963 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2964 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2965 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2966 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2967 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2968 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2969 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2970 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2971 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2972 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2973 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
2974 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2975 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
2976 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2977 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2978 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2979 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2980 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2981 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2982 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2983 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2984 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2985 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2986 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2987 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
2988 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2989 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
2990 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2991}
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
new file mode 100644
index 000000000000..7e61bfc4f9f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_KBLGT2_H__
30#define __I915_OA_KBLGT2_H__
31
32extern int i915_oa_n_builtin_metric_sets_kblgt2;
33
34extern int i915_oa_select_metric_set_kblgt2(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_kblgt2(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_kblgt2(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
new file mode 100644
index 000000000000..6ed092566a32
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -0,0 +1,3040 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_kblgt3.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_L3_2,
45 METRIC_SET_ID_L3_3,
46 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
47 METRIC_SET_ID_SAMPLER,
48 METRIC_SET_ID_TDL_1,
49 METRIC_SET_ID_TDL_2,
50 METRIC_SET_ID_COMPUTE_EXTRA,
51 METRIC_SET_ID_VME_PIPE,
52 METRIC_SET_ID_TEST_OA,
53};
54
55int i915_oa_n_builtin_metric_sets_kblgt3 = 18;
56
57static const struct i915_oa_reg b_counter_config_render_basic[] = {
58 { _MMIO(0x2710), 0x00000000 },
59 { _MMIO(0x2714), 0x00800000 },
60 { _MMIO(0x2720), 0x00000000 },
61 { _MMIO(0x2724), 0x00800000 },
62 { _MMIO(0x2740), 0x00000000 },
63};
64
65static const struct i915_oa_reg flex_eu_config_render_basic[] = {
66 { _MMIO(0xe458), 0x00005004 },
67 { _MMIO(0xe558), 0x00010003 },
68 { _MMIO(0xe658), 0x00012011 },
69 { _MMIO(0xe758), 0x00015014 },
70 { _MMIO(0xe45c), 0x00051050 },
71 { _MMIO(0xe55c), 0x00053052 },
72 { _MMIO(0xe65c), 0x00055054 },
73};
74
75static const struct i915_oa_reg mux_config_render_basic[] = {
76 { _MMIO(0x9888), 0x166c01e0 },
77 { _MMIO(0x9888), 0x12170280 },
78 { _MMIO(0x9888), 0x12370280 },
79 { _MMIO(0x9888), 0x16ec01e0 },
80 { _MMIO(0x9888), 0x11930317 },
81 { _MMIO(0x9888), 0x159303df },
82 { _MMIO(0x9888), 0x3f900003 },
83 { _MMIO(0x9888), 0x1a4e0380 },
84 { _MMIO(0x9888), 0x0a6c0053 },
85 { _MMIO(0x9888), 0x106c0000 },
86 { _MMIO(0x9888), 0x1c6c0000 },
87 { _MMIO(0x9888), 0x0a1b4000 },
88 { _MMIO(0x9888), 0x1c1c0001 },
89 { _MMIO(0x9888), 0x002f1000 },
90 { _MMIO(0x9888), 0x042f1000 },
91 { _MMIO(0x9888), 0x004c4000 },
92 { _MMIO(0x9888), 0x0a4c8400 },
93 { _MMIO(0x9888), 0x0c4c0002 },
94 { _MMIO(0x9888), 0x000d2000 },
95 { _MMIO(0x9888), 0x060d8000 },
96 { _MMIO(0x9888), 0x080da000 },
97 { _MMIO(0x9888), 0x0a0da000 },
98 { _MMIO(0x9888), 0x0c0f0400 },
99 { _MMIO(0x9888), 0x0e0f6600 },
100 { _MMIO(0x9888), 0x100f0001 },
101 { _MMIO(0x9888), 0x002c8000 },
102 { _MMIO(0x9888), 0x162ca200 },
103 { _MMIO(0x9888), 0x062d8000 },
104 { _MMIO(0x9888), 0x082d8000 },
105 { _MMIO(0x9888), 0x00133000 },
106 { _MMIO(0x9888), 0x08133000 },
107 { _MMIO(0x9888), 0x00170020 },
108 { _MMIO(0x9888), 0x08170021 },
109 { _MMIO(0x9888), 0x10170000 },
110 { _MMIO(0x9888), 0x0633c000 },
111 { _MMIO(0x9888), 0x0833c000 },
112 { _MMIO(0x9888), 0x06370800 },
113 { _MMIO(0x9888), 0x08370840 },
114 { _MMIO(0x9888), 0x10370000 },
115 { _MMIO(0x9888), 0x1ace0200 },
116 { _MMIO(0x9888), 0x0aec5300 },
117 { _MMIO(0x9888), 0x10ec0000 },
118 { _MMIO(0x9888), 0x1cec0000 },
119 { _MMIO(0x9888), 0x0a9b8000 },
120 { _MMIO(0x9888), 0x1c9c0002 },
121 { _MMIO(0x9888), 0x0ccc0002 },
122 { _MMIO(0x9888), 0x0a8d8000 },
123 { _MMIO(0x9888), 0x108f0001 },
124 { _MMIO(0x9888), 0x16ac8000 },
125 { _MMIO(0x9888), 0x0d933031 },
126 { _MMIO(0x9888), 0x0f933e3f },
127 { _MMIO(0x9888), 0x01933d00 },
128 { _MMIO(0x9888), 0x0393073c },
129 { _MMIO(0x9888), 0x0593000e },
130 { _MMIO(0x9888), 0x1d930000 },
131 { _MMIO(0x9888), 0x19930000 },
132 { _MMIO(0x9888), 0x1b930000 },
133 { _MMIO(0x9888), 0x1d900157 },
134 { _MMIO(0x9888), 0x1f900158 },
135 { _MMIO(0x9888), 0x35900000 },
136 { _MMIO(0x9888), 0x2b908000 },
137 { _MMIO(0x9888), 0x2d908000 },
138 { _MMIO(0x9888), 0x2f908000 },
139 { _MMIO(0x9888), 0x31908000 },
140 { _MMIO(0x9888), 0x15908000 },
141 { _MMIO(0x9888), 0x17908000 },
142 { _MMIO(0x9888), 0x19908000 },
143 { _MMIO(0x9888), 0x1b908000 },
144 { _MMIO(0x9888), 0x1190003f },
145 { _MMIO(0x9888), 0x51902240 },
146 { _MMIO(0x9888), 0x41900c00 },
147 { _MMIO(0x9888), 0x55900242 },
148 { _MMIO(0x9888), 0x45900084 },
149 { _MMIO(0x9888), 0x47901400 },
150 { _MMIO(0x9888), 0x57902220 },
151 { _MMIO(0x9888), 0x49900c60 },
152 { _MMIO(0x9888), 0x37900000 },
153 { _MMIO(0x9888), 0x33900000 },
154 { _MMIO(0x9888), 0x4b900063 },
155 { _MMIO(0x9888), 0x59900002 },
156 { _MMIO(0x9888), 0x43900c63 },
157 { _MMIO(0x9888), 0x53902222 },
158};
159
160static int
161get_render_basic_mux_config(struct drm_i915_private *dev_priv,
162 const struct i915_oa_reg **regs,
163 int *lens)
164{
165 int n = 0;
166
167 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
168 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
169
170 regs[n] = mux_config_render_basic;
171 lens[n] = ARRAY_SIZE(mux_config_render_basic);
172 n++;
173
174 return n;
175}
176
177static const struct i915_oa_reg b_counter_config_compute_basic[] = {
178 { _MMIO(0x2710), 0x00000000 },
179 { _MMIO(0x2714), 0x00800000 },
180 { _MMIO(0x2720), 0x00000000 },
181 { _MMIO(0x2724), 0x00800000 },
182 { _MMIO(0x2740), 0x00000000 },
183};
184
185static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
186 { _MMIO(0xe458), 0x00005004 },
187 { _MMIO(0xe558), 0x00000003 },
188 { _MMIO(0xe658), 0x00002001 },
189 { _MMIO(0xe758), 0x00778008 },
190 { _MMIO(0xe45c), 0x00088078 },
191 { _MMIO(0xe55c), 0x00808708 },
192 { _MMIO(0xe65c), 0x00a08908 },
193};
194
195static const struct i915_oa_reg mux_config_compute_basic[] = {
196 { _MMIO(0x9888), 0x104f00e0 },
197 { _MMIO(0x9888), 0x124f1c00 },
198 { _MMIO(0x9888), 0x106c00e0 },
199 { _MMIO(0x9888), 0x37906800 },
200 { _MMIO(0x9888), 0x3f900003 },
201 { _MMIO(0x9888), 0x004e8000 },
202 { _MMIO(0x9888), 0x1a4e0820 },
203 { _MMIO(0x9888), 0x1c4e0002 },
204 { _MMIO(0x9888), 0x064f0900 },
205 { _MMIO(0x9888), 0x084f0032 },
206 { _MMIO(0x9888), 0x0a4f1891 },
207 { _MMIO(0x9888), 0x0c4f0e00 },
208 { _MMIO(0x9888), 0x0e4f003c },
209 { _MMIO(0x9888), 0x004f0d80 },
210 { _MMIO(0x9888), 0x024f003b },
211 { _MMIO(0x9888), 0x006c0002 },
212 { _MMIO(0x9888), 0x086c0100 },
213 { _MMIO(0x9888), 0x0c6c000c },
214 { _MMIO(0x9888), 0x0e6c0b00 },
215 { _MMIO(0x9888), 0x186c0000 },
216 { _MMIO(0x9888), 0x1c6c0000 },
217 { _MMIO(0x9888), 0x1e6c0000 },
218 { _MMIO(0x9888), 0x001b4000 },
219 { _MMIO(0x9888), 0x081b8000 },
220 { _MMIO(0x9888), 0x0c1b4000 },
221 { _MMIO(0x9888), 0x0e1b8000 },
222 { _MMIO(0x9888), 0x101c8000 },
223 { _MMIO(0x9888), 0x1a1c8000 },
224 { _MMIO(0x9888), 0x1c1c0024 },
225 { _MMIO(0x9888), 0x065b8000 },
226 { _MMIO(0x9888), 0x085b4000 },
227 { _MMIO(0x9888), 0x0a5bc000 },
228 { _MMIO(0x9888), 0x0c5b8000 },
229 { _MMIO(0x9888), 0x0e5b4000 },
230 { _MMIO(0x9888), 0x005b8000 },
231 { _MMIO(0x9888), 0x025b4000 },
232 { _MMIO(0x9888), 0x1a5c6000 },
233 { _MMIO(0x9888), 0x1c5c001b },
234 { _MMIO(0x9888), 0x125c8000 },
235 { _MMIO(0x9888), 0x145c8000 },
236 { _MMIO(0x9888), 0x004c8000 },
237 { _MMIO(0x9888), 0x0a4c2000 },
238 { _MMIO(0x9888), 0x0c4c0208 },
239 { _MMIO(0x9888), 0x000da000 },
240 { _MMIO(0x9888), 0x060d8000 },
241 { _MMIO(0x9888), 0x080da000 },
242 { _MMIO(0x9888), 0x0a0da000 },
243 { _MMIO(0x9888), 0x0c0da000 },
244 { _MMIO(0x9888), 0x0e0da000 },
245 { _MMIO(0x9888), 0x020d2000 },
246 { _MMIO(0x9888), 0x0c0f5400 },
247 { _MMIO(0x9888), 0x0e0f5500 },
248 { _MMIO(0x9888), 0x100f0155 },
249 { _MMIO(0x9888), 0x002c8000 },
250 { _MMIO(0x9888), 0x0e2cc000 },
251 { _MMIO(0x9888), 0x162cfb00 },
252 { _MMIO(0x9888), 0x182c00be },
253 { _MMIO(0x9888), 0x022cc000 },
254 { _MMIO(0x9888), 0x042cc000 },
255 { _MMIO(0x9888), 0x19900157 },
256 { _MMIO(0x9888), 0x1b900158 },
257 { _MMIO(0x9888), 0x1d900105 },
258 { _MMIO(0x9888), 0x1f900103 },
259 { _MMIO(0x9888), 0x35900000 },
260 { _MMIO(0x9888), 0x11900fff },
261 { _MMIO(0x9888), 0x51900000 },
262 { _MMIO(0x9888), 0x41900800 },
263 { _MMIO(0x9888), 0x55900000 },
264 { _MMIO(0x9888), 0x45900821 },
265 { _MMIO(0x9888), 0x47900802 },
266 { _MMIO(0x9888), 0x57900000 },
267 { _MMIO(0x9888), 0x49900802 },
268 { _MMIO(0x9888), 0x33900000 },
269 { _MMIO(0x9888), 0x4b900002 },
270 { _MMIO(0x9888), 0x59900000 },
271 { _MMIO(0x9888), 0x43900422 },
272 { _MMIO(0x9888), 0x53904444 },
273};
274
275static int
276get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
277 const struct i915_oa_reg **regs,
278 int *lens)
279{
280 int n = 0;
281
282 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
283 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
284
285 regs[n] = mux_config_compute_basic;
286 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
287 n++;
288
289 return n;
290}
291
292static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
293 { _MMIO(0x2724), 0xf0800000 },
294 { _MMIO(0x2720), 0x00000000 },
295 { _MMIO(0x2714), 0xf0800000 },
296 { _MMIO(0x2710), 0x00000000 },
297 { _MMIO(0x2740), 0x00000000 },
298 { _MMIO(0x2770), 0x0007ffea },
299 { _MMIO(0x2774), 0x00007ffc },
300 { _MMIO(0x2778), 0x0007affa },
301 { _MMIO(0x277c), 0x0000f5fd },
302 { _MMIO(0x2780), 0x00079ffa },
303 { _MMIO(0x2784), 0x0000f3fb },
304 { _MMIO(0x2788), 0x0007bf7a },
305 { _MMIO(0x278c), 0x0000f7e7 },
306 { _MMIO(0x2790), 0x0007fefa },
307 { _MMIO(0x2794), 0x0000f7cf },
308 { _MMIO(0x2798), 0x00077ffa },
309 { _MMIO(0x279c), 0x0000efdf },
310 { _MMIO(0x27a0), 0x0006fffa },
311 { _MMIO(0x27a4), 0x0000cfbf },
312 { _MMIO(0x27a8), 0x0003fffa },
313 { _MMIO(0x27ac), 0x00005f7f },
314};
315
316static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
317 { _MMIO(0xe458), 0x00005004 },
318 { _MMIO(0xe558), 0x00015014 },
319 { _MMIO(0xe658), 0x00025024 },
320 { _MMIO(0xe758), 0x00035034 },
321 { _MMIO(0xe45c), 0x00045044 },
322 { _MMIO(0xe55c), 0x00055054 },
323 { _MMIO(0xe65c), 0x00065064 },
324};
325
326static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
327 { _MMIO(0x9888), 0x0c0e001f },
328 { _MMIO(0x9888), 0x0a0f0000 },
329 { _MMIO(0x9888), 0x10116800 },
330 { _MMIO(0x9888), 0x178a03e0 },
331 { _MMIO(0x9888), 0x11824c00 },
332 { _MMIO(0x9888), 0x11830020 },
333 { _MMIO(0x9888), 0x13840020 },
334 { _MMIO(0x9888), 0x11850019 },
335 { _MMIO(0x9888), 0x11860007 },
336 { _MMIO(0x9888), 0x01870c40 },
337 { _MMIO(0x9888), 0x17880000 },
338 { _MMIO(0x9888), 0x022f4000 },
339 { _MMIO(0x9888), 0x0a4c0040 },
340 { _MMIO(0x9888), 0x0c0d8000 },
341 { _MMIO(0x9888), 0x040d4000 },
342 { _MMIO(0x9888), 0x060d2000 },
343 { _MMIO(0x9888), 0x020e5400 },
344 { _MMIO(0x9888), 0x000e0000 },
345 { _MMIO(0x9888), 0x080f0040 },
346 { _MMIO(0x9888), 0x000f0000 },
347 { _MMIO(0x9888), 0x100f0000 },
348 { _MMIO(0x9888), 0x0e0f0040 },
349 { _MMIO(0x9888), 0x0c2c8000 },
350 { _MMIO(0x9888), 0x06104000 },
351 { _MMIO(0x9888), 0x06110012 },
352 { _MMIO(0x9888), 0x06131000 },
353 { _MMIO(0x9888), 0x01898000 },
354 { _MMIO(0x9888), 0x0d890100 },
355 { _MMIO(0x9888), 0x03898000 },
356 { _MMIO(0x9888), 0x09808000 },
357 { _MMIO(0x9888), 0x0b808000 },
358 { _MMIO(0x9888), 0x0380c000 },
359 { _MMIO(0x9888), 0x0f8a0075 },
360 { _MMIO(0x9888), 0x1d8a0000 },
361 { _MMIO(0x9888), 0x118a8000 },
362 { _MMIO(0x9888), 0x1b8a4000 },
363 { _MMIO(0x9888), 0x138a8000 },
364 { _MMIO(0x9888), 0x1d81a000 },
365 { _MMIO(0x9888), 0x15818000 },
366 { _MMIO(0x9888), 0x17818000 },
367 { _MMIO(0x9888), 0x0b820030 },
368 { _MMIO(0x9888), 0x07828000 },
369 { _MMIO(0x9888), 0x0d824000 },
370 { _MMIO(0x9888), 0x0f828000 },
371 { _MMIO(0x9888), 0x05824000 },
372 { _MMIO(0x9888), 0x0d830003 },
373 { _MMIO(0x9888), 0x0583000c },
374 { _MMIO(0x9888), 0x09830000 },
375 { _MMIO(0x9888), 0x03838000 },
376 { _MMIO(0x9888), 0x07838000 },
377 { _MMIO(0x9888), 0x0b840980 },
378 { _MMIO(0x9888), 0x03844d80 },
379 { _MMIO(0x9888), 0x11840000 },
380 { _MMIO(0x9888), 0x09848000 },
381 { _MMIO(0x9888), 0x09850080 },
382 { _MMIO(0x9888), 0x03850003 },
383 { _MMIO(0x9888), 0x01850000 },
384 { _MMIO(0x9888), 0x07860000 },
385 { _MMIO(0x9888), 0x0f860400 },
386 { _MMIO(0x9888), 0x09870032 },
387 { _MMIO(0x9888), 0x01888052 },
388 { _MMIO(0x9888), 0x11880000 },
389 { _MMIO(0x9888), 0x09884000 },
390 { _MMIO(0x9888), 0x1b931001 },
391 { _MMIO(0x9888), 0x1d930001 },
392 { _MMIO(0x9888), 0x19934000 },
393 { _MMIO(0x9888), 0x1b958000 },
394 { _MMIO(0x9888), 0x1d950094 },
395 { _MMIO(0x9888), 0x19958000 },
396 { _MMIO(0x9888), 0x09e58000 },
397 { _MMIO(0x9888), 0x0be58000 },
398 { _MMIO(0x9888), 0x03e5c000 },
399 { _MMIO(0x9888), 0x0592c000 },
400 { _MMIO(0x9888), 0x0b928000 },
401 { _MMIO(0x9888), 0x0d924000 },
402 { _MMIO(0x9888), 0x0f924000 },
403 { _MMIO(0x9888), 0x11928000 },
404 { _MMIO(0x9888), 0x1392c000 },
405 { _MMIO(0x9888), 0x09924000 },
406 { _MMIO(0x9888), 0x01985000 },
407 { _MMIO(0x9888), 0x07988000 },
408 { _MMIO(0x9888), 0x09981000 },
409 { _MMIO(0x9888), 0x0b982000 },
410 { _MMIO(0x9888), 0x0d982000 },
411 { _MMIO(0x9888), 0x0f989000 },
412 { _MMIO(0x9888), 0x05982000 },
413 { _MMIO(0x9888), 0x13904000 },
414 { _MMIO(0x9888), 0x21904000 },
415 { _MMIO(0x9888), 0x23904000 },
416 { _MMIO(0x9888), 0x25908000 },
417 { _MMIO(0x9888), 0x27904000 },
418 { _MMIO(0x9888), 0x29908000 },
419 { _MMIO(0x9888), 0x2b904000 },
420 { _MMIO(0x9888), 0x2f904000 },
421 { _MMIO(0x9888), 0x31904000 },
422 { _MMIO(0x9888), 0x15904000 },
423 { _MMIO(0x9888), 0x17908000 },
424 { _MMIO(0x9888), 0x19908000 },
425 { _MMIO(0x9888), 0x1b904000 },
426 { _MMIO(0x9888), 0x1190c080 },
427 { _MMIO(0x9888), 0x51900000 },
428 { _MMIO(0x9888), 0x41900440 },
429 { _MMIO(0x9888), 0x55900000 },
430 { _MMIO(0x9888), 0x45900400 },
431 { _MMIO(0x9888), 0x47900c21 },
432 { _MMIO(0x9888), 0x57900400 },
433 { _MMIO(0x9888), 0x49900042 },
434 { _MMIO(0x9888), 0x37900000 },
435 { _MMIO(0x9888), 0x33900000 },
436 { _MMIO(0x9888), 0x4b900024 },
437 { _MMIO(0x9888), 0x59900000 },
438 { _MMIO(0x9888), 0x43900841 },
439 { _MMIO(0x9888), 0x53900400 },
440};
441
442static int
443get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
444 const struct i915_oa_reg **regs,
445 int *lens)
446{
447 int n = 0;
448
449 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
450 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
451
452 regs[n] = mux_config_render_pipe_profile;
453 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
454 n++;
455
456 return n;
457}
458
459static const struct i915_oa_reg b_counter_config_memory_reads[] = {
460 { _MMIO(0x272c), 0xffffffff },
461 { _MMIO(0x2728), 0xffffffff },
462 { _MMIO(0x2724), 0xf0800000 },
463 { _MMIO(0x2720), 0x00000000 },
464 { _MMIO(0x271c), 0xffffffff },
465 { _MMIO(0x2718), 0xffffffff },
466 { _MMIO(0x2714), 0xf0800000 },
467 { _MMIO(0x2710), 0x00000000 },
468 { _MMIO(0x274c), 0x86543210 },
469 { _MMIO(0x2748), 0x86543210 },
470 { _MMIO(0x2744), 0x00006667 },
471 { _MMIO(0x2740), 0x00000000 },
472 { _MMIO(0x275c), 0x86543210 },
473 { _MMIO(0x2758), 0x86543210 },
474 { _MMIO(0x2754), 0x00006465 },
475 { _MMIO(0x2750), 0x00000000 },
476 { _MMIO(0x2770), 0x0007f81a },
477 { _MMIO(0x2774), 0x0000fe00 },
478 { _MMIO(0x2778), 0x0007f82a },
479 { _MMIO(0x277c), 0x0000fe00 },
480 { _MMIO(0x2780), 0x0007f872 },
481 { _MMIO(0x2784), 0x0000fe00 },
482 { _MMIO(0x2788), 0x0007f8ba },
483 { _MMIO(0x278c), 0x0000fe00 },
484 { _MMIO(0x2790), 0x0007f87a },
485 { _MMIO(0x2794), 0x0000fe00 },
486 { _MMIO(0x2798), 0x0007f8ea },
487 { _MMIO(0x279c), 0x0000fe00 },
488 { _MMIO(0x27a0), 0x0007f8e2 },
489 { _MMIO(0x27a4), 0x0000fe00 },
490 { _MMIO(0x27a8), 0x0007f8f2 },
491 { _MMIO(0x27ac), 0x0000fe00 },
492};
493
494static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
495 { _MMIO(0xe458), 0x00005004 },
496 { _MMIO(0xe558), 0x00015014 },
497 { _MMIO(0xe658), 0x00025024 },
498 { _MMIO(0xe758), 0x00035034 },
499 { _MMIO(0xe45c), 0x00045044 },
500 { _MMIO(0xe55c), 0x00055054 },
501 { _MMIO(0xe65c), 0x00065064 },
502};
503
504static const struct i915_oa_reg mux_config_memory_reads[] = {
505 { _MMIO(0x9888), 0x11810c00 },
506 { _MMIO(0x9888), 0x1381001a },
507 { _MMIO(0x9888), 0x37906800 },
508 { _MMIO(0x9888), 0x3f900064 },
509 { _MMIO(0x9888), 0x03811300 },
510 { _MMIO(0x9888), 0x05811b12 },
511 { _MMIO(0x9888), 0x0781001a },
512 { _MMIO(0x9888), 0x1f810000 },
513 { _MMIO(0x9888), 0x17810000 },
514 { _MMIO(0x9888), 0x19810000 },
515 { _MMIO(0x9888), 0x1b810000 },
516 { _MMIO(0x9888), 0x1d810000 },
517 { _MMIO(0x9888), 0x1b930055 },
518 { _MMIO(0x9888), 0x03e58000 },
519 { _MMIO(0x9888), 0x05e5c000 },
520 { _MMIO(0x9888), 0x07e54000 },
521 { _MMIO(0x9888), 0x13900150 },
522 { _MMIO(0x9888), 0x21900151 },
523 { _MMIO(0x9888), 0x23900152 },
524 { _MMIO(0x9888), 0x25900153 },
525 { _MMIO(0x9888), 0x27900154 },
526 { _MMIO(0x9888), 0x29900155 },
527 { _MMIO(0x9888), 0x2b900156 },
528 { _MMIO(0x9888), 0x2d900157 },
529 { _MMIO(0x9888), 0x2f90015f },
530 { _MMIO(0x9888), 0x31900105 },
531 { _MMIO(0x9888), 0x15900103 },
532 { _MMIO(0x9888), 0x17900101 },
533 { _MMIO(0x9888), 0x35900000 },
534 { _MMIO(0x9888), 0x19908000 },
535 { _MMIO(0x9888), 0x1b908000 },
536 { _MMIO(0x9888), 0x1d908000 },
537 { _MMIO(0x9888), 0x1f908000 },
538 { _MMIO(0x9888), 0x11900000 },
539 { _MMIO(0x9888), 0x51900000 },
540 { _MMIO(0x9888), 0x41900c60 },
541 { _MMIO(0x9888), 0x55900000 },
542 { _MMIO(0x9888), 0x45900c00 },
543 { _MMIO(0x9888), 0x47900c63 },
544 { _MMIO(0x9888), 0x57900000 },
545 { _MMIO(0x9888), 0x49900c63 },
546 { _MMIO(0x9888), 0x33900000 },
547 { _MMIO(0x9888), 0x4b900063 },
548 { _MMIO(0x9888), 0x59900000 },
549 { _MMIO(0x9888), 0x43900003 },
550 { _MMIO(0x9888), 0x53900000 },
551};
552
553static int
554get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
555 const struct i915_oa_reg **regs,
556 int *lens)
557{
558 int n = 0;
559
560 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
561 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
562
563 regs[n] = mux_config_memory_reads;
564 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
565 n++;
566
567 return n;
568}
569
570static const struct i915_oa_reg b_counter_config_memory_writes[] = {
571 { _MMIO(0x272c), 0xffffffff },
572 { _MMIO(0x2728), 0xffffffff },
573 { _MMIO(0x2724), 0xf0800000 },
574 { _MMIO(0x2720), 0x00000000 },
575 { _MMIO(0x271c), 0xffffffff },
576 { _MMIO(0x2718), 0xffffffff },
577 { _MMIO(0x2714), 0xf0800000 },
578 { _MMIO(0x2710), 0x00000000 },
579 { _MMIO(0x274c), 0x86543210 },
580 { _MMIO(0x2748), 0x86543210 },
581 { _MMIO(0x2744), 0x00006667 },
582 { _MMIO(0x2740), 0x00000000 },
583 { _MMIO(0x275c), 0x86543210 },
584 { _MMIO(0x2758), 0x86543210 },
585 { _MMIO(0x2754), 0x00006465 },
586 { _MMIO(0x2750), 0x00000000 },
587 { _MMIO(0x2770), 0x0007f81a },
588 { _MMIO(0x2774), 0x0000fe00 },
589 { _MMIO(0x2778), 0x0007f82a },
590 { _MMIO(0x277c), 0x0000fe00 },
591 { _MMIO(0x2780), 0x0007f822 },
592 { _MMIO(0x2784), 0x0000fe00 },
593 { _MMIO(0x2788), 0x0007f8ba },
594 { _MMIO(0x278c), 0x0000fe00 },
595 { _MMIO(0x2790), 0x0007f87a },
596 { _MMIO(0x2794), 0x0000fe00 },
597 { _MMIO(0x2798), 0x0007f8ea },
598 { _MMIO(0x279c), 0x0000fe00 },
599 { _MMIO(0x27a0), 0x0007f8e2 },
600 { _MMIO(0x27a4), 0x0000fe00 },
601 { _MMIO(0x27a8), 0x0007f8f2 },
602 { _MMIO(0x27ac), 0x0000fe00 },
603};
604
605static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
606 { _MMIO(0xe458), 0x00005004 },
607 { _MMIO(0xe558), 0x00015014 },
608 { _MMIO(0xe658), 0x00025024 },
609 { _MMIO(0xe758), 0x00035034 },
610 { _MMIO(0xe45c), 0x00045044 },
611 { _MMIO(0xe55c), 0x00055054 },
612 { _MMIO(0xe65c), 0x00065064 },
613};
614
615static const struct i915_oa_reg mux_config_memory_writes[] = {
616 { _MMIO(0x9888), 0x11810c00 },
617 { _MMIO(0x9888), 0x1381001a },
618 { _MMIO(0x9888), 0x37906800 },
619 { _MMIO(0x9888), 0x3f901000 },
620 { _MMIO(0x9888), 0x03811300 },
621 { _MMIO(0x9888), 0x05811b12 },
622 { _MMIO(0x9888), 0x0781001a },
623 { _MMIO(0x9888), 0x1f810000 },
624 { _MMIO(0x9888), 0x17810000 },
625 { _MMIO(0x9888), 0x19810000 },
626 { _MMIO(0x9888), 0x1b810000 },
627 { _MMIO(0x9888), 0x1d810000 },
628 { _MMIO(0x9888), 0x1b930055 },
629 { _MMIO(0x9888), 0x03e58000 },
630 { _MMIO(0x9888), 0x05e5c000 },
631 { _MMIO(0x9888), 0x07e54000 },
632 { _MMIO(0x9888), 0x13900160 },
633 { _MMIO(0x9888), 0x21900161 },
634 { _MMIO(0x9888), 0x23900162 },
635 { _MMIO(0x9888), 0x25900163 },
636 { _MMIO(0x9888), 0x27900164 },
637 { _MMIO(0x9888), 0x29900165 },
638 { _MMIO(0x9888), 0x2b900166 },
639 { _MMIO(0x9888), 0x2d900167 },
640 { _MMIO(0x9888), 0x2f900150 },
641 { _MMIO(0x9888), 0x31900105 },
642 { _MMIO(0x9888), 0x15900103 },
643 { _MMIO(0x9888), 0x17900101 },
644 { _MMIO(0x9888), 0x35900000 },
645 { _MMIO(0x9888), 0x19908000 },
646 { _MMIO(0x9888), 0x1b908000 },
647 { _MMIO(0x9888), 0x1d908000 },
648 { _MMIO(0x9888), 0x1f908000 },
649 { _MMIO(0x9888), 0x11900000 },
650 { _MMIO(0x9888), 0x51900000 },
651 { _MMIO(0x9888), 0x41900c60 },
652 { _MMIO(0x9888), 0x55900000 },
653 { _MMIO(0x9888), 0x45900c00 },
654 { _MMIO(0x9888), 0x47900c63 },
655 { _MMIO(0x9888), 0x57900000 },
656 { _MMIO(0x9888), 0x49900c63 },
657 { _MMIO(0x9888), 0x33900000 },
658 { _MMIO(0x9888), 0x4b900063 },
659 { _MMIO(0x9888), 0x59900000 },
660 { _MMIO(0x9888), 0x43900003 },
661 { _MMIO(0x9888), 0x53900000 },
662};
663
664static int
665get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
666 const struct i915_oa_reg **regs,
667 int *lens)
668{
669 int n = 0;
670
671 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
672 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
673
674 regs[n] = mux_config_memory_writes;
675 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
676 n++;
677
678 return n;
679}
680
681static const struct i915_oa_reg b_counter_config_compute_extended[] = {
682 { _MMIO(0x2724), 0xf0800000 },
683 { _MMIO(0x2720), 0x00000000 },
684 { _MMIO(0x2714), 0xf0800000 },
685 { _MMIO(0x2710), 0x00000000 },
686 { _MMIO(0x2740), 0x00000000 },
687 { _MMIO(0x2770), 0x0007fc2a },
688 { _MMIO(0x2774), 0x0000bf00 },
689 { _MMIO(0x2778), 0x0007fc6a },
690 { _MMIO(0x277c), 0x0000bf00 },
691 { _MMIO(0x2780), 0x0007fc92 },
692 { _MMIO(0x2784), 0x0000bf00 },
693 { _MMIO(0x2788), 0x0007fca2 },
694 { _MMIO(0x278c), 0x0000bf00 },
695 { _MMIO(0x2790), 0x0007fc32 },
696 { _MMIO(0x2794), 0x0000bf00 },
697 { _MMIO(0x2798), 0x0007fc9a },
698 { _MMIO(0x279c), 0x0000bf00 },
699 { _MMIO(0x27a0), 0x0007fe6a },
700 { _MMIO(0x27a4), 0x0000bf00 },
701 { _MMIO(0x27a8), 0x0007fe7a },
702 { _MMIO(0x27ac), 0x0000bf00 },
703};
704
705static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
706 { _MMIO(0xe458), 0x00005004 },
707 { _MMIO(0xe558), 0x00000003 },
708 { _MMIO(0xe658), 0x00002001 },
709 { _MMIO(0xe758), 0x00778008 },
710 { _MMIO(0xe45c), 0x00088078 },
711 { _MMIO(0xe55c), 0x00808708 },
712 { _MMIO(0xe65c), 0x00a08908 },
713};
714
715static const struct i915_oa_reg mux_config_compute_extended[] = {
716 { _MMIO(0x9888), 0x106c00e0 },
717 { _MMIO(0x9888), 0x141c8160 },
718 { _MMIO(0x9888), 0x161c8015 },
719 { _MMIO(0x9888), 0x181c0120 },
720 { _MMIO(0x9888), 0x004e8000 },
721 { _MMIO(0x9888), 0x0e4e8000 },
722 { _MMIO(0x9888), 0x184e8000 },
723 { _MMIO(0x9888), 0x1a4eaaa0 },
724 { _MMIO(0x9888), 0x1c4e0002 },
725 { _MMIO(0x9888), 0x024e8000 },
726 { _MMIO(0x9888), 0x044e8000 },
727 { _MMIO(0x9888), 0x064e8000 },
728 { _MMIO(0x9888), 0x084e8000 },
729 { _MMIO(0x9888), 0x0a4e8000 },
730 { _MMIO(0x9888), 0x0e6c0b01 },
731 { _MMIO(0x9888), 0x006c0200 },
732 { _MMIO(0x9888), 0x026c000c },
733 { _MMIO(0x9888), 0x1c6c0000 },
734 { _MMIO(0x9888), 0x1e6c0000 },
735 { _MMIO(0x9888), 0x1a6c0000 },
736 { _MMIO(0x9888), 0x0e1bc000 },
737 { _MMIO(0x9888), 0x001b8000 },
738 { _MMIO(0x9888), 0x021bc000 },
739 { _MMIO(0x9888), 0x001c0041 },
740 { _MMIO(0x9888), 0x061c4200 },
741 { _MMIO(0x9888), 0x081c4443 },
742 { _MMIO(0x9888), 0x0a1c4645 },
743 { _MMIO(0x9888), 0x0c1c7647 },
744 { _MMIO(0x9888), 0x041c7357 },
745 { _MMIO(0x9888), 0x1c1c0030 },
746 { _MMIO(0x9888), 0x101c0000 },
747 { _MMIO(0x9888), 0x1a1c0000 },
748 { _MMIO(0x9888), 0x121c8000 },
749 { _MMIO(0x9888), 0x004c8000 },
750 { _MMIO(0x9888), 0x0a4caa2a },
751 { _MMIO(0x9888), 0x0c4c02aa },
752 { _MMIO(0x9888), 0x084ca000 },
753 { _MMIO(0x9888), 0x000da000 },
754 { _MMIO(0x9888), 0x060d8000 },
755 { _MMIO(0x9888), 0x080da000 },
756 { _MMIO(0x9888), 0x0a0da000 },
757 { _MMIO(0x9888), 0x0c0da000 },
758 { _MMIO(0x9888), 0x0e0da000 },
759 { _MMIO(0x9888), 0x020da000 },
760 { _MMIO(0x9888), 0x040da000 },
761 { _MMIO(0x9888), 0x0c0f5400 },
762 { _MMIO(0x9888), 0x0e0f5515 },
763 { _MMIO(0x9888), 0x100f0155 },
764 { _MMIO(0x9888), 0x002c8000 },
765 { _MMIO(0x9888), 0x0e2c8000 },
766 { _MMIO(0x9888), 0x162caa00 },
767 { _MMIO(0x9888), 0x182c00aa },
768 { _MMIO(0x9888), 0x022c8000 },
769 { _MMIO(0x9888), 0x042c8000 },
770 { _MMIO(0x9888), 0x062c8000 },
771 { _MMIO(0x9888), 0x082c8000 },
772 { _MMIO(0x9888), 0x0a2c8000 },
773 { _MMIO(0x9888), 0x11907fff },
774 { _MMIO(0x9888), 0x51900000 },
775 { _MMIO(0x9888), 0x41900040 },
776 { _MMIO(0x9888), 0x55900000 },
777 { _MMIO(0x9888), 0x45900802 },
778 { _MMIO(0x9888), 0x47900842 },
779 { _MMIO(0x9888), 0x57900000 },
780 { _MMIO(0x9888), 0x49900842 },
781 { _MMIO(0x9888), 0x37900000 },
782 { _MMIO(0x9888), 0x33900000 },
783 { _MMIO(0x9888), 0x4b900000 },
784 { _MMIO(0x9888), 0x59900000 },
785 { _MMIO(0x9888), 0x43900800 },
786 { _MMIO(0x9888), 0x53900000 },
787};
788
789static int
790get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
791 const struct i915_oa_reg **regs,
792 int *lens)
793{
794 int n = 0;
795
796 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
797 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
798
799 regs[n] = mux_config_compute_extended;
800 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
801 n++;
802
803 return n;
804}
805
806static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
807 { _MMIO(0x2710), 0x00000000 },
808 { _MMIO(0x2714), 0x30800000 },
809 { _MMIO(0x2720), 0x00000000 },
810 { _MMIO(0x2724), 0x30800000 },
811 { _MMIO(0x2740), 0x00000000 },
812 { _MMIO(0x2770), 0x0007fffa },
813 { _MMIO(0x2774), 0x0000fefe },
814 { _MMIO(0x2778), 0x0007fffa },
815 { _MMIO(0x277c), 0x0000fefd },
816 { _MMIO(0x2790), 0x0007fffa },
817 { _MMIO(0x2794), 0x0000fbef },
818 { _MMIO(0x2798), 0x0007fffa },
819 { _MMIO(0x279c), 0x0000fbdf },
820};
821
822static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
823 { _MMIO(0xe458), 0x00005004 },
824 { _MMIO(0xe558), 0x00000003 },
825 { _MMIO(0xe658), 0x00002001 },
826 { _MMIO(0xe758), 0x00101100 },
827 { _MMIO(0xe45c), 0x00201200 },
828 { _MMIO(0xe55c), 0x00301300 },
829 { _MMIO(0xe65c), 0x00401400 },
830};
831
832static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
833 { _MMIO(0x9888), 0x166c0760 },
834 { _MMIO(0x9888), 0x1593001e },
835 { _MMIO(0x9888), 0x3f900003 },
836 { _MMIO(0x9888), 0x004e8000 },
837 { _MMIO(0x9888), 0x0e4e8000 },
838 { _MMIO(0x9888), 0x184e8000 },
839 { _MMIO(0x9888), 0x1a4e8020 },
840 { _MMIO(0x9888), 0x1c4e0002 },
841 { _MMIO(0x9888), 0x006c0051 },
842 { _MMIO(0x9888), 0x066c5000 },
843 { _MMIO(0x9888), 0x086c5c5d },
844 { _MMIO(0x9888), 0x0e6c5e5f },
845 { _MMIO(0x9888), 0x106c0000 },
846 { _MMIO(0x9888), 0x186c0000 },
847 { _MMIO(0x9888), 0x1c6c0000 },
848 { _MMIO(0x9888), 0x1e6c0000 },
849 { _MMIO(0x9888), 0x001b4000 },
850 { _MMIO(0x9888), 0x061b8000 },
851 { _MMIO(0x9888), 0x081bc000 },
852 { _MMIO(0x9888), 0x0e1bc000 },
853 { _MMIO(0x9888), 0x101c8000 },
854 { _MMIO(0x9888), 0x1a1ce000 },
855 { _MMIO(0x9888), 0x1c1c0030 },
856 { _MMIO(0x9888), 0x004c8000 },
857 { _MMIO(0x9888), 0x0a4c2a00 },
858 { _MMIO(0x9888), 0x0c4c0280 },
859 { _MMIO(0x9888), 0x000d2000 },
860 { _MMIO(0x9888), 0x060d8000 },
861 { _MMIO(0x9888), 0x080da000 },
862 { _MMIO(0x9888), 0x0e0da000 },
863 { _MMIO(0x9888), 0x0c0f0400 },
864 { _MMIO(0x9888), 0x0e0f1500 },
865 { _MMIO(0x9888), 0x100f0140 },
866 { _MMIO(0x9888), 0x002c8000 },
867 { _MMIO(0x9888), 0x0e2c8000 },
868 { _MMIO(0x9888), 0x162c0a00 },
869 { _MMIO(0x9888), 0x182c00a0 },
870 { _MMIO(0x9888), 0x03933300 },
871 { _MMIO(0x9888), 0x05930032 },
872 { _MMIO(0x9888), 0x11930000 },
873 { _MMIO(0x9888), 0x1b930000 },
874 { _MMIO(0x9888), 0x1d900157 },
875 { _MMIO(0x9888), 0x1f900158 },
876 { _MMIO(0x9888), 0x35900000 },
877 { _MMIO(0x9888), 0x19908000 },
878 { _MMIO(0x9888), 0x1b908000 },
879 { _MMIO(0x9888), 0x1190030f },
880 { _MMIO(0x9888), 0x51900000 },
881 { _MMIO(0x9888), 0x41900000 },
882 { _MMIO(0x9888), 0x55900000 },
883 { _MMIO(0x9888), 0x45900021 },
884 { _MMIO(0x9888), 0x47900000 },
885 { _MMIO(0x9888), 0x37900000 },
886 { _MMIO(0x9888), 0x33900000 },
887 { _MMIO(0x9888), 0x57900000 },
888 { _MMIO(0x9888), 0x4b900000 },
889 { _MMIO(0x9888), 0x59900000 },
890 { _MMIO(0x9888), 0x53904444 },
891 { _MMIO(0x9888), 0x43900000 },
892};
893
894static int
895get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
896 const struct i915_oa_reg **regs,
897 int *lens)
898{
899 int n = 0;
900
901 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
902 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
903
904 regs[n] = mux_config_compute_l3_cache;
905 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
906 n++;
907
908 return n;
909}
910
911static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
912 { _MMIO(0x2740), 0x00000000 },
913 { _MMIO(0x2744), 0x00800000 },
914 { _MMIO(0x2710), 0x00000000 },
915 { _MMIO(0x2714), 0x10800000 },
916 { _MMIO(0x2740), 0x00000000 },
917 { _MMIO(0x2720), 0x00000000 },
918 { _MMIO(0x2724), 0x00800000 },
919 { _MMIO(0x2770), 0x00000002 },
920 { _MMIO(0x2774), 0x0000fdff },
921};
922
923static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
924 { _MMIO(0xe458), 0x00005004 },
925 { _MMIO(0xe558), 0x00010003 },
926 { _MMIO(0xe658), 0x00012011 },
927 { _MMIO(0xe758), 0x00015014 },
928 { _MMIO(0xe45c), 0x00051050 },
929 { _MMIO(0xe55c), 0x00053052 },
930 { _MMIO(0xe65c), 0x00055054 },
931};
932
933static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
934 { _MMIO(0x9888), 0x104f0232 },
935 { _MMIO(0x9888), 0x124f4640 },
936 { _MMIO(0x9888), 0x106c0232 },
937 { _MMIO(0x9888), 0x11834400 },
938 { _MMIO(0x9888), 0x0a4e8000 },
939 { _MMIO(0x9888), 0x0c4e8000 },
940 { _MMIO(0x9888), 0x004f1880 },
941 { _MMIO(0x9888), 0x024f08bb },
942 { _MMIO(0x9888), 0x044f001b },
943 { _MMIO(0x9888), 0x046c0100 },
944 { _MMIO(0x9888), 0x066c000b },
945 { _MMIO(0x9888), 0x1a6c0000 },
946 { _MMIO(0x9888), 0x041b8000 },
947 { _MMIO(0x9888), 0x061b4000 },
948 { _MMIO(0x9888), 0x1a1c1800 },
949 { _MMIO(0x9888), 0x005b8000 },
950 { _MMIO(0x9888), 0x025bc000 },
951 { _MMIO(0x9888), 0x045b4000 },
952 { _MMIO(0x9888), 0x125c8000 },
953 { _MMIO(0x9888), 0x145c8000 },
954 { _MMIO(0x9888), 0x165c8000 },
955 { _MMIO(0x9888), 0x185c8000 },
956 { _MMIO(0x9888), 0x0a4c00a0 },
957 { _MMIO(0x9888), 0x000d8000 },
958 { _MMIO(0x9888), 0x020da000 },
959 { _MMIO(0x9888), 0x040da000 },
960 { _MMIO(0x9888), 0x060d2000 },
961 { _MMIO(0x9888), 0x0c0f5000 },
962 { _MMIO(0x9888), 0x0e0f0055 },
963 { _MMIO(0x9888), 0x022cc000 },
964 { _MMIO(0x9888), 0x042cc000 },
965 { _MMIO(0x9888), 0x062cc000 },
966 { _MMIO(0x9888), 0x082cc000 },
967 { _MMIO(0x9888), 0x0a2c8000 },
968 { _MMIO(0x9888), 0x0c2c8000 },
969 { _MMIO(0x9888), 0x0f828000 },
970 { _MMIO(0x9888), 0x0f8305c0 },
971 { _MMIO(0x9888), 0x09830000 },
972 { _MMIO(0x9888), 0x07830000 },
973 { _MMIO(0x9888), 0x1d950080 },
974 { _MMIO(0x9888), 0x13928000 },
975 { _MMIO(0x9888), 0x0f988000 },
976 { _MMIO(0x9888), 0x31904000 },
977 { _MMIO(0x9888), 0x1190fc00 },
978 { _MMIO(0x9888), 0x37900000 },
979 { _MMIO(0x9888), 0x59900000 },
980 { _MMIO(0x9888), 0x4b900040 },
981 { _MMIO(0x9888), 0x51900000 },
982 { _MMIO(0x9888), 0x41900800 },
983 { _MMIO(0x9888), 0x43900842 },
984 { _MMIO(0x9888), 0x53900000 },
985 { _MMIO(0x9888), 0x45900000 },
986 { _MMIO(0x9888), 0x33900000 },
987};
988
989static int
990get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
991 const struct i915_oa_reg **regs,
992 int *lens)
993{
994 int n = 0;
995
996 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
997 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
998
999 regs[n] = mux_config_hdc_and_sf;
1000 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
1001 n++;
1002
1003 return n;
1004}
1005
1006static const struct i915_oa_reg b_counter_config_l3_1[] = {
1007 { _MMIO(0x2740), 0x00000000 },
1008 { _MMIO(0x2744), 0x00800000 },
1009 { _MMIO(0x2710), 0x00000000 },
1010 { _MMIO(0x2714), 0xf0800000 },
1011 { _MMIO(0x2720), 0x00000000 },
1012 { _MMIO(0x2724), 0xf0800000 },
1013 { _MMIO(0x2770), 0x00100070 },
1014 { _MMIO(0x2774), 0x0000fff1 },
1015 { _MMIO(0x2778), 0x00014002 },
1016 { _MMIO(0x277c), 0x0000c3ff },
1017 { _MMIO(0x2780), 0x00010002 },
1018 { _MMIO(0x2784), 0x0000c7ff },
1019 { _MMIO(0x2788), 0x00004002 },
1020 { _MMIO(0x278c), 0x0000d3ff },
1021 { _MMIO(0x2790), 0x00100700 },
1022 { _MMIO(0x2794), 0x0000ff1f },
1023 { _MMIO(0x2798), 0x00001402 },
1024 { _MMIO(0x279c), 0x0000fc3f },
1025 { _MMIO(0x27a0), 0x00001002 },
1026 { _MMIO(0x27a4), 0x0000fc7f },
1027 { _MMIO(0x27a8), 0x00000402 },
1028 { _MMIO(0x27ac), 0x0000fd3f },
1029};
1030
1031static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1032 { _MMIO(0xe458), 0x00005004 },
1033 { _MMIO(0xe558), 0x00010003 },
1034 { _MMIO(0xe658), 0x00012011 },
1035 { _MMIO(0xe758), 0x00015014 },
1036 { _MMIO(0xe45c), 0x00051050 },
1037 { _MMIO(0xe55c), 0x00053052 },
1038 { _MMIO(0xe65c), 0x00055054 },
1039};
1040
1041static const struct i915_oa_reg mux_config_l3_1[] = {
1042 { _MMIO(0x9888), 0x126c7b40 },
1043 { _MMIO(0x9888), 0x166c0020 },
1044 { _MMIO(0x9888), 0x0a603444 },
1045 { _MMIO(0x9888), 0x0a613400 },
1046 { _MMIO(0x9888), 0x1a4ea800 },
1047 { _MMIO(0x9888), 0x1c4e0002 },
1048 { _MMIO(0x9888), 0x024e8000 },
1049 { _MMIO(0x9888), 0x044e8000 },
1050 { _MMIO(0x9888), 0x064e8000 },
1051 { _MMIO(0x9888), 0x084e8000 },
1052 { _MMIO(0x9888), 0x0a4e8000 },
1053 { _MMIO(0x9888), 0x064f4000 },
1054 { _MMIO(0x9888), 0x0c6c5327 },
1055 { _MMIO(0x9888), 0x0e6c5425 },
1056 { _MMIO(0x9888), 0x006c2a00 },
1057 { _MMIO(0x9888), 0x026c285b },
1058 { _MMIO(0x9888), 0x046c005c },
1059 { _MMIO(0x9888), 0x106c0000 },
1060 { _MMIO(0x9888), 0x1c6c0000 },
1061 { _MMIO(0x9888), 0x1e6c0000 },
1062 { _MMIO(0x9888), 0x1a6c0800 },
1063 { _MMIO(0x9888), 0x0c1bc000 },
1064 { _MMIO(0x9888), 0x0e1bc000 },
1065 { _MMIO(0x9888), 0x001b8000 },
1066 { _MMIO(0x9888), 0x021bc000 },
1067 { _MMIO(0x9888), 0x041bc000 },
1068 { _MMIO(0x9888), 0x1c1c003c },
1069 { _MMIO(0x9888), 0x121c8000 },
1070 { _MMIO(0x9888), 0x141c8000 },
1071 { _MMIO(0x9888), 0x161c8000 },
1072 { _MMIO(0x9888), 0x181c8000 },
1073 { _MMIO(0x9888), 0x1a1c0800 },
1074 { _MMIO(0x9888), 0x065b4000 },
1075 { _MMIO(0x9888), 0x1a5c1000 },
1076 { _MMIO(0x9888), 0x10600000 },
1077 { _MMIO(0x9888), 0x04600000 },
1078 { _MMIO(0x9888), 0x0c610044 },
1079 { _MMIO(0x9888), 0x10610000 },
1080 { _MMIO(0x9888), 0x06610000 },
1081 { _MMIO(0x9888), 0x0c4c02a8 },
1082 { _MMIO(0x9888), 0x084ca000 },
1083 { _MMIO(0x9888), 0x0a4c002a },
1084 { _MMIO(0x9888), 0x0c0da000 },
1085 { _MMIO(0x9888), 0x0e0da000 },
1086 { _MMIO(0x9888), 0x000d8000 },
1087 { _MMIO(0x9888), 0x020da000 },
1088 { _MMIO(0x9888), 0x040da000 },
1089 { _MMIO(0x9888), 0x060d2000 },
1090 { _MMIO(0x9888), 0x100f0154 },
1091 { _MMIO(0x9888), 0x0c0f5000 },
1092 { _MMIO(0x9888), 0x0e0f0055 },
1093 { _MMIO(0x9888), 0x182c00aa },
1094 { _MMIO(0x9888), 0x022c8000 },
1095 { _MMIO(0x9888), 0x042c8000 },
1096 { _MMIO(0x9888), 0x062c8000 },
1097 { _MMIO(0x9888), 0x082c8000 },
1098 { _MMIO(0x9888), 0x0a2c8000 },
1099 { _MMIO(0x9888), 0x0c2cc000 },
1100 { _MMIO(0x9888), 0x1190ffc0 },
1101 { _MMIO(0x9888), 0x57900000 },
1102 { _MMIO(0x9888), 0x49900420 },
1103 { _MMIO(0x9888), 0x37900000 },
1104 { _MMIO(0x9888), 0x33900000 },
1105 { _MMIO(0x9888), 0x4b900021 },
1106 { _MMIO(0x9888), 0x59900000 },
1107 { _MMIO(0x9888), 0x51900000 },
1108 { _MMIO(0x9888), 0x41900400 },
1109 { _MMIO(0x9888), 0x43900421 },
1110 { _MMIO(0x9888), 0x53900000 },
1111 { _MMIO(0x9888), 0x45900040 },
1112};
1113
1114static int
1115get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1116 const struct i915_oa_reg **regs,
1117 int *lens)
1118{
1119 int n = 0;
1120
1121 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1122 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1123
1124 regs[n] = mux_config_l3_1;
1125 lens[n] = ARRAY_SIZE(mux_config_l3_1);
1126 n++;
1127
1128 return n;
1129}
1130
1131static const struct i915_oa_reg b_counter_config_l3_2[] = {
1132 { _MMIO(0x2740), 0x00000000 },
1133 { _MMIO(0x2744), 0x00800000 },
1134 { _MMIO(0x2710), 0x00000000 },
1135 { _MMIO(0x2714), 0xf0800000 },
1136 { _MMIO(0x2720), 0x00000000 },
1137 { _MMIO(0x2724), 0x00800000 },
1138 { _MMIO(0x2770), 0x00100070 },
1139 { _MMIO(0x2774), 0x0000fff1 },
1140 { _MMIO(0x2778), 0x00028002 },
1141 { _MMIO(0x277c), 0x000087ff },
1142 { _MMIO(0x2780), 0x00020002 },
1143 { _MMIO(0x2784), 0x00008fff },
1144 { _MMIO(0x2788), 0x00008002 },
1145 { _MMIO(0x278c), 0x0000a7ff },
1146};
1147
1148static const struct i915_oa_reg flex_eu_config_l3_2[] = {
1149 { _MMIO(0xe458), 0x00005004 },
1150 { _MMIO(0xe558), 0x00010003 },
1151 { _MMIO(0xe658), 0x00012011 },
1152 { _MMIO(0xe758), 0x00015014 },
1153 { _MMIO(0xe45c), 0x00051050 },
1154 { _MMIO(0xe55c), 0x00053052 },
1155 { _MMIO(0xe65c), 0x00055054 },
1156};
1157
1158static const struct i915_oa_reg mux_config_l3_2[] = {
1159 { _MMIO(0x9888), 0x126c02e0 },
1160 { _MMIO(0x9888), 0x146c0001 },
1161 { _MMIO(0x9888), 0x0a623400 },
1162 { _MMIO(0x9888), 0x044e8000 },
1163 { _MMIO(0x9888), 0x064e8000 },
1164 { _MMIO(0x9888), 0x084e8000 },
1165 { _MMIO(0x9888), 0x0a4e8000 },
1166 { _MMIO(0x9888), 0x064f4000 },
1167 { _MMIO(0x9888), 0x026c3324 },
1168 { _MMIO(0x9888), 0x046c3422 },
1169 { _MMIO(0x9888), 0x106c0000 },
1170 { _MMIO(0x9888), 0x1a6c0000 },
1171 { _MMIO(0x9888), 0x021bc000 },
1172 { _MMIO(0x9888), 0x041bc000 },
1173 { _MMIO(0x9888), 0x141c8000 },
1174 { _MMIO(0x9888), 0x161c8000 },
1175 { _MMIO(0x9888), 0x181c8000 },
1176 { _MMIO(0x9888), 0x1a1c0800 },
1177 { _MMIO(0x9888), 0x065b4000 },
1178 { _MMIO(0x9888), 0x1a5c1000 },
1179 { _MMIO(0x9888), 0x06614000 },
1180 { _MMIO(0x9888), 0x0c620044 },
1181 { _MMIO(0x9888), 0x10620000 },
1182 { _MMIO(0x9888), 0x06620000 },
1183 { _MMIO(0x9888), 0x084c8000 },
1184 { _MMIO(0x9888), 0x0a4c002a },
1185 { _MMIO(0x9888), 0x020da000 },
1186 { _MMIO(0x9888), 0x040da000 },
1187 { _MMIO(0x9888), 0x060d2000 },
1188 { _MMIO(0x9888), 0x0c0f4000 },
1189 { _MMIO(0x9888), 0x0e0f0055 },
1190 { _MMIO(0x9888), 0x042c8000 },
1191 { _MMIO(0x9888), 0x062c8000 },
1192 { _MMIO(0x9888), 0x082c8000 },
1193 { _MMIO(0x9888), 0x0a2c8000 },
1194 { _MMIO(0x9888), 0x0c2cc000 },
1195 { _MMIO(0x9888), 0x1190f800 },
1196 { _MMIO(0x9888), 0x37900000 },
1197 { _MMIO(0x9888), 0x51900000 },
1198 { _MMIO(0x9888), 0x43900000 },
1199 { _MMIO(0x9888), 0x53900000 },
1200 { _MMIO(0x9888), 0x45900000 },
1201 { _MMIO(0x9888), 0x33900000 },
1202};
1203
1204static int
1205get_l3_2_mux_config(struct drm_i915_private *dev_priv,
1206 const struct i915_oa_reg **regs,
1207 int *lens)
1208{
1209 int n = 0;
1210
1211 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1212 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1213
1214 regs[n] = mux_config_l3_2;
1215 lens[n] = ARRAY_SIZE(mux_config_l3_2);
1216 n++;
1217
1218 return n;
1219}
1220
1221static const struct i915_oa_reg b_counter_config_l3_3[] = {
1222 { _MMIO(0x2740), 0x00000000 },
1223 { _MMIO(0x2744), 0x00800000 },
1224 { _MMIO(0x2710), 0x00000000 },
1225 { _MMIO(0x2714), 0xf0800000 },
1226 { _MMIO(0x2720), 0x00000000 },
1227 { _MMIO(0x2724), 0x00800000 },
1228 { _MMIO(0x2770), 0x00100070 },
1229 { _MMIO(0x2774), 0x0000fff1 },
1230 { _MMIO(0x2778), 0x00028002 },
1231 { _MMIO(0x277c), 0x000087ff },
1232 { _MMIO(0x2780), 0x00020002 },
1233 { _MMIO(0x2784), 0x00008fff },
1234 { _MMIO(0x2788), 0x00008002 },
1235 { _MMIO(0x278c), 0x0000a7ff },
1236};
1237
1238static const struct i915_oa_reg flex_eu_config_l3_3[] = {
1239 { _MMIO(0xe458), 0x00005004 },
1240 { _MMIO(0xe558), 0x00010003 },
1241 { _MMIO(0xe658), 0x00012011 },
1242 { _MMIO(0xe758), 0x00015014 },
1243 { _MMIO(0xe45c), 0x00051050 },
1244 { _MMIO(0xe55c), 0x00053052 },
1245 { _MMIO(0xe65c), 0x00055054 },
1246};
1247
1248static const struct i915_oa_reg mux_config_l3_3[] = {
1249 { _MMIO(0x9888), 0x126c4e80 },
1250 { _MMIO(0x9888), 0x146c0000 },
1251 { _MMIO(0x9888), 0x0a633400 },
1252 { _MMIO(0x9888), 0x044e8000 },
1253 { _MMIO(0x9888), 0x064e8000 },
1254 { _MMIO(0x9888), 0x084e8000 },
1255 { _MMIO(0x9888), 0x0a4e8000 },
1256 { _MMIO(0x9888), 0x0c4e8000 },
1257 { _MMIO(0x9888), 0x026c3321 },
1258 { _MMIO(0x9888), 0x046c342f },
1259 { _MMIO(0x9888), 0x106c0000 },
1260 { _MMIO(0x9888), 0x1a6c2000 },
1261 { _MMIO(0x9888), 0x021bc000 },
1262 { _MMIO(0x9888), 0x041bc000 },
1263 { _MMIO(0x9888), 0x061b4000 },
1264 { _MMIO(0x9888), 0x141c8000 },
1265 { _MMIO(0x9888), 0x161c8000 },
1266 { _MMIO(0x9888), 0x181c8000 },
1267 { _MMIO(0x9888), 0x1a1c1800 },
1268 { _MMIO(0x9888), 0x06604000 },
1269 { _MMIO(0x9888), 0x0c630044 },
1270 { _MMIO(0x9888), 0x10630000 },
1271 { _MMIO(0x9888), 0x06630000 },
1272 { _MMIO(0x9888), 0x084c8000 },
1273 { _MMIO(0x9888), 0x0a4c00aa },
1274 { _MMIO(0x9888), 0x020da000 },
1275 { _MMIO(0x9888), 0x040da000 },
1276 { _MMIO(0x9888), 0x060d2000 },
1277 { _MMIO(0x9888), 0x0c0f4000 },
1278 { _MMIO(0x9888), 0x0e0f0055 },
1279 { _MMIO(0x9888), 0x042c8000 },
1280 { _MMIO(0x9888), 0x062c8000 },
1281 { _MMIO(0x9888), 0x082c8000 },
1282 { _MMIO(0x9888), 0x0a2c8000 },
1283 { _MMIO(0x9888), 0x0c2c8000 },
1284 { _MMIO(0x9888), 0x1190f800 },
1285 { _MMIO(0x9888), 0x37900000 },
1286 { _MMIO(0x9888), 0x51900000 },
1287 { _MMIO(0x9888), 0x43900842 },
1288 { _MMIO(0x9888), 0x53900000 },
1289 { _MMIO(0x9888), 0x45900002 },
1290 { _MMIO(0x9888), 0x33900000 },
1291};
1292
1293static int
1294get_l3_3_mux_config(struct drm_i915_private *dev_priv,
1295 const struct i915_oa_reg **regs,
1296 int *lens)
1297{
1298 int n = 0;
1299
1300 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1301 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1302
1303 regs[n] = mux_config_l3_3;
1304 lens[n] = ARRAY_SIZE(mux_config_l3_3);
1305 n++;
1306
1307 return n;
1308}
1309
1310static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1311 { _MMIO(0x2740), 0x00000000 },
1312 { _MMIO(0x2744), 0x00800000 },
1313 { _MMIO(0x2710), 0x00000000 },
1314 { _MMIO(0x2714), 0x30800000 },
1315 { _MMIO(0x2720), 0x00000000 },
1316 { _MMIO(0x2724), 0x00800000 },
1317 { _MMIO(0x2770), 0x00000002 },
1318 { _MMIO(0x2774), 0x0000efff },
1319 { _MMIO(0x2778), 0x00006000 },
1320 { _MMIO(0x277c), 0x0000f3ff },
1321};
1322
1323static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1324 { _MMIO(0xe458), 0x00005004 },
1325 { _MMIO(0xe558), 0x00010003 },
1326 { _MMIO(0xe658), 0x00012011 },
1327 { _MMIO(0xe758), 0x00015014 },
1328 { _MMIO(0xe45c), 0x00051050 },
1329 { _MMIO(0xe55c), 0x00053052 },
1330 { _MMIO(0xe65c), 0x00055054 },
1331};
1332
1333static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1334 { _MMIO(0x9888), 0x102f3800 },
1335 { _MMIO(0x9888), 0x144d0500 },
1336 { _MMIO(0x9888), 0x120d03c0 },
1337 { _MMIO(0x9888), 0x140d03cf },
1338 { _MMIO(0x9888), 0x0c0f0004 },
1339 { _MMIO(0x9888), 0x0c4e4000 },
1340 { _MMIO(0x9888), 0x042f0480 },
1341 { _MMIO(0x9888), 0x082f0000 },
1342 { _MMIO(0x9888), 0x022f0000 },
1343 { _MMIO(0x9888), 0x0a4c0090 },
1344 { _MMIO(0x9888), 0x064d0027 },
1345 { _MMIO(0x9888), 0x004d0000 },
1346 { _MMIO(0x9888), 0x000d0d40 },
1347 { _MMIO(0x9888), 0x020d803f },
1348 { _MMIO(0x9888), 0x040d8023 },
1349 { _MMIO(0x9888), 0x100d0000 },
1350 { _MMIO(0x9888), 0x060d2000 },
1351 { _MMIO(0x9888), 0x020f0010 },
1352 { _MMIO(0x9888), 0x000f0000 },
1353 { _MMIO(0x9888), 0x0e0f0050 },
1354 { _MMIO(0x9888), 0x0a2c8000 },
1355 { _MMIO(0x9888), 0x0c2c8000 },
1356 { _MMIO(0x9888), 0x1190fc00 },
1357 { _MMIO(0x9888), 0x37900000 },
1358 { _MMIO(0x9888), 0x51900000 },
1359 { _MMIO(0x9888), 0x41901400 },
1360 { _MMIO(0x9888), 0x43901485 },
1361 { _MMIO(0x9888), 0x53900000 },
1362 { _MMIO(0x9888), 0x45900001 },
1363 { _MMIO(0x9888), 0x33900000 },
1364};
1365
1366static int
1367get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1368 const struct i915_oa_reg **regs,
1369 int *lens)
1370{
1371 int n = 0;
1372
1373 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1374 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1375
1376 regs[n] = mux_config_rasterizer_and_pixel_backend;
1377 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1378 n++;
1379
1380 return n;
1381}
1382
1383static const struct i915_oa_reg b_counter_config_sampler[] = {
1384 { _MMIO(0x2740), 0x00000000 },
1385 { _MMIO(0x2744), 0x00800000 },
1386 { _MMIO(0x2710), 0x00000000 },
1387 { _MMIO(0x2714), 0x70800000 },
1388 { _MMIO(0x2720), 0x00000000 },
1389 { _MMIO(0x2724), 0x00800000 },
1390 { _MMIO(0x2770), 0x0000c000 },
1391 { _MMIO(0x2774), 0x0000e7ff },
1392 { _MMIO(0x2778), 0x00003000 },
1393 { _MMIO(0x277c), 0x0000f9ff },
1394 { _MMIO(0x2780), 0x00000c00 },
1395 { _MMIO(0x2784), 0x0000fe7f },
1396};
1397
1398static const struct i915_oa_reg flex_eu_config_sampler[] = {
1399 { _MMIO(0xe458), 0x00005004 },
1400 { _MMIO(0xe558), 0x00010003 },
1401 { _MMIO(0xe658), 0x00012011 },
1402 { _MMIO(0xe758), 0x00015014 },
1403 { _MMIO(0xe45c), 0x00051050 },
1404 { _MMIO(0xe55c), 0x00053052 },
1405 { _MMIO(0xe65c), 0x00055054 },
1406};
1407
1408static const struct i915_oa_reg mux_config_sampler[] = {
1409 { _MMIO(0x9888), 0x14152c00 },
1410 { _MMIO(0x9888), 0x16150005 },
1411 { _MMIO(0x9888), 0x121600a0 },
1412 { _MMIO(0x9888), 0x14352c00 },
1413 { _MMIO(0x9888), 0x16350005 },
1414 { _MMIO(0x9888), 0x123600a0 },
1415 { _MMIO(0x9888), 0x14552c00 },
1416 { _MMIO(0x9888), 0x16550005 },
1417 { _MMIO(0x9888), 0x125600a0 },
1418 { _MMIO(0x9888), 0x062f6000 },
1419 { _MMIO(0x9888), 0x022f2000 },
1420 { _MMIO(0x9888), 0x0c4c0050 },
1421 { _MMIO(0x9888), 0x0a4c0010 },
1422 { _MMIO(0x9888), 0x0c0d8000 },
1423 { _MMIO(0x9888), 0x0e0da000 },
1424 { _MMIO(0x9888), 0x000d8000 },
1425 { _MMIO(0x9888), 0x020da000 },
1426 { _MMIO(0x9888), 0x040da000 },
1427 { _MMIO(0x9888), 0x060d2000 },
1428 { _MMIO(0x9888), 0x100f0350 },
1429 { _MMIO(0x9888), 0x0c0fb000 },
1430 { _MMIO(0x9888), 0x0e0f00da },
1431 { _MMIO(0x9888), 0x182c0028 },
1432 { _MMIO(0x9888), 0x0a2c8000 },
1433 { _MMIO(0x9888), 0x022dc000 },
1434 { _MMIO(0x9888), 0x042d4000 },
1435 { _MMIO(0x9888), 0x0c138000 },
1436 { _MMIO(0x9888), 0x0e132000 },
1437 { _MMIO(0x9888), 0x0413c000 },
1438 { _MMIO(0x9888), 0x1c140018 },
1439 { _MMIO(0x9888), 0x0c157000 },
1440 { _MMIO(0x9888), 0x0e150078 },
1441 { _MMIO(0x9888), 0x10150000 },
1442 { _MMIO(0x9888), 0x04162180 },
1443 { _MMIO(0x9888), 0x02160000 },
1444 { _MMIO(0x9888), 0x04174000 },
1445 { _MMIO(0x9888), 0x0233a000 },
1446 { _MMIO(0x9888), 0x04333000 },
1447 { _MMIO(0x9888), 0x14348000 },
1448 { _MMIO(0x9888), 0x16348000 },
1449 { _MMIO(0x9888), 0x02357870 },
1450 { _MMIO(0x9888), 0x10350000 },
1451 { _MMIO(0x9888), 0x04360043 },
1452 { _MMIO(0x9888), 0x02360000 },
1453 { _MMIO(0x9888), 0x04371000 },
1454 { _MMIO(0x9888), 0x0e538000 },
1455 { _MMIO(0x9888), 0x00538000 },
1456 { _MMIO(0x9888), 0x06533000 },
1457 { _MMIO(0x9888), 0x1c540020 },
1458 { _MMIO(0x9888), 0x12548000 },
1459 { _MMIO(0x9888), 0x0e557000 },
1460 { _MMIO(0x9888), 0x00557800 },
1461 { _MMIO(0x9888), 0x10550000 },
1462 { _MMIO(0x9888), 0x06560043 },
1463 { _MMIO(0x9888), 0x02560000 },
1464 { _MMIO(0x9888), 0x06571000 },
1465 { _MMIO(0x9888), 0x1190ff80 },
1466 { _MMIO(0x9888), 0x57900000 },
1467 { _MMIO(0x9888), 0x49900000 },
1468 { _MMIO(0x9888), 0x37900000 },
1469 { _MMIO(0x9888), 0x33900000 },
1470 { _MMIO(0x9888), 0x4b900060 },
1471 { _MMIO(0x9888), 0x59900000 },
1472 { _MMIO(0x9888), 0x51900000 },
1473 { _MMIO(0x9888), 0x41900c00 },
1474 { _MMIO(0x9888), 0x43900842 },
1475 { _MMIO(0x9888), 0x53900000 },
1476 { _MMIO(0x9888), 0x45900060 },
1477};
1478
1479static int
1480get_sampler_mux_config(struct drm_i915_private *dev_priv,
1481 const struct i915_oa_reg **regs,
1482 int *lens)
1483{
1484 int n = 0;
1485
1486 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1487 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1488
1489 regs[n] = mux_config_sampler;
1490 lens[n] = ARRAY_SIZE(mux_config_sampler);
1491 n++;
1492
1493 return n;
1494}
1495
1496static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1497 { _MMIO(0x2740), 0x00000000 },
1498 { _MMIO(0x2744), 0x00800000 },
1499 { _MMIO(0x2710), 0x00000000 },
1500 { _MMIO(0x2714), 0xf0800000 },
1501 { _MMIO(0x2720), 0x00000000 },
1502 { _MMIO(0x2724), 0x30800000 },
1503 { _MMIO(0x2770), 0x00000002 },
1504 { _MMIO(0x2774), 0x00007fff },
1505 { _MMIO(0x2778), 0x00000000 },
1506 { _MMIO(0x277c), 0x00009fff },
1507 { _MMIO(0x2780), 0x00000002 },
1508 { _MMIO(0x2784), 0x0000efff },
1509 { _MMIO(0x2788), 0x00000000 },
1510 { _MMIO(0x278c), 0x0000f3ff },
1511 { _MMIO(0x2790), 0x00000002 },
1512 { _MMIO(0x2794), 0x0000fdff },
1513 { _MMIO(0x2798), 0x00000000 },
1514 { _MMIO(0x279c), 0x0000fe7f },
1515};
1516
1517static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1518 { _MMIO(0xe458), 0x00005004 },
1519 { _MMIO(0xe558), 0x00010003 },
1520 { _MMIO(0xe658), 0x00012011 },
1521 { _MMIO(0xe758), 0x00015014 },
1522 { _MMIO(0xe45c), 0x00051050 },
1523 { _MMIO(0xe55c), 0x00053052 },
1524 { _MMIO(0xe65c), 0x00055054 },
1525};
1526
1527static const struct i915_oa_reg mux_config_tdl_1[] = {
1528 { _MMIO(0x9888), 0x12120000 },
1529 { _MMIO(0x9888), 0x12320000 },
1530 { _MMIO(0x9888), 0x12520000 },
1531 { _MMIO(0x9888), 0x002f8000 },
1532 { _MMIO(0x9888), 0x022f3000 },
1533 { _MMIO(0x9888), 0x0a4c0015 },
1534 { _MMIO(0x9888), 0x0c0d8000 },
1535 { _MMIO(0x9888), 0x0e0da000 },
1536 { _MMIO(0x9888), 0x000d8000 },
1537 { _MMIO(0x9888), 0x020da000 },
1538 { _MMIO(0x9888), 0x040da000 },
1539 { _MMIO(0x9888), 0x060d2000 },
1540 { _MMIO(0x9888), 0x100f03a0 },
1541 { _MMIO(0x9888), 0x0c0ff000 },
1542 { _MMIO(0x9888), 0x0e0f0095 },
1543 { _MMIO(0x9888), 0x062c8000 },
1544 { _MMIO(0x9888), 0x082c8000 },
1545 { _MMIO(0x9888), 0x0a2c8000 },
1546 { _MMIO(0x9888), 0x0c2d8000 },
1547 { _MMIO(0x9888), 0x0e2d4000 },
1548 { _MMIO(0x9888), 0x062d4000 },
1549 { _MMIO(0x9888), 0x02108000 },
1550 { _MMIO(0x9888), 0x0410c000 },
1551 { _MMIO(0x9888), 0x02118000 },
1552 { _MMIO(0x9888), 0x0411c000 },
1553 { _MMIO(0x9888), 0x02121880 },
1554 { _MMIO(0x9888), 0x041219b5 },
1555 { _MMIO(0x9888), 0x00120000 },
1556 { _MMIO(0x9888), 0x02134000 },
1557 { _MMIO(0x9888), 0x04135000 },
1558 { _MMIO(0x9888), 0x0c308000 },
1559 { _MMIO(0x9888), 0x0e304000 },
1560 { _MMIO(0x9888), 0x06304000 },
1561 { _MMIO(0x9888), 0x0c318000 },
1562 { _MMIO(0x9888), 0x0e314000 },
1563 { _MMIO(0x9888), 0x06314000 },
1564 { _MMIO(0x9888), 0x0c321a80 },
1565 { _MMIO(0x9888), 0x0e320033 },
1566 { _MMIO(0x9888), 0x06320031 },
1567 { _MMIO(0x9888), 0x00320000 },
1568 { _MMIO(0x9888), 0x0c334000 },
1569 { _MMIO(0x9888), 0x0e331000 },
1570 { _MMIO(0x9888), 0x06331000 },
1571 { _MMIO(0x9888), 0x0e508000 },
1572 { _MMIO(0x9888), 0x00508000 },
1573 { _MMIO(0x9888), 0x02504000 },
1574 { _MMIO(0x9888), 0x0e518000 },
1575 { _MMIO(0x9888), 0x00518000 },
1576 { _MMIO(0x9888), 0x02514000 },
1577 { _MMIO(0x9888), 0x0e521880 },
1578 { _MMIO(0x9888), 0x00521a80 },
1579 { _MMIO(0x9888), 0x02520033 },
1580 { _MMIO(0x9888), 0x0e534000 },
1581 { _MMIO(0x9888), 0x00534000 },
1582 { _MMIO(0x9888), 0x02531000 },
1583 { _MMIO(0x9888), 0x1190ff80 },
1584 { _MMIO(0x9888), 0x57900000 },
1585 { _MMIO(0x9888), 0x49900800 },
1586 { _MMIO(0x9888), 0x37900000 },
1587 { _MMIO(0x9888), 0x33900000 },
1588 { _MMIO(0x9888), 0x4b900062 },
1589 { _MMIO(0x9888), 0x59900000 },
1590 { _MMIO(0x9888), 0x51900000 },
1591 { _MMIO(0x9888), 0x41900c00 },
1592 { _MMIO(0x9888), 0x43900003 },
1593 { _MMIO(0x9888), 0x53900000 },
1594 { _MMIO(0x9888), 0x45900040 },
1595};
1596
1597static int
1598get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1599 const struct i915_oa_reg **regs,
1600 int *lens)
1601{
1602 int n = 0;
1603
1604 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1605 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1606
1607 regs[n] = mux_config_tdl_1;
1608 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1609 n++;
1610
1611 return n;
1612}
1613
1614static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1615 { _MMIO(0x2740), 0x00000000 },
1616 { _MMIO(0x2744), 0x00800000 },
1617 { _MMIO(0x2710), 0x00000000 },
1618 { _MMIO(0x2714), 0x00800000 },
1619 { _MMIO(0x2720), 0x00000000 },
1620 { _MMIO(0x2724), 0x00800000 },
1621};
1622
1623static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1624 { _MMIO(0xe458), 0x00005004 },
1625 { _MMIO(0xe558), 0x00010003 },
1626 { _MMIO(0xe658), 0x00012011 },
1627 { _MMIO(0xe758), 0x00015014 },
1628 { _MMIO(0xe45c), 0x00051050 },
1629 { _MMIO(0xe55c), 0x00053052 },
1630 { _MMIO(0xe65c), 0x00055054 },
1631};
1632
1633static const struct i915_oa_reg mux_config_tdl_2[] = {
1634 { _MMIO(0x9888), 0x12124d60 },
1635 { _MMIO(0x9888), 0x12322e60 },
1636 { _MMIO(0x9888), 0x12524d60 },
1637 { _MMIO(0x9888), 0x022f3000 },
1638 { _MMIO(0x9888), 0x0a4c0014 },
1639 { _MMIO(0x9888), 0x000d8000 },
1640 { _MMIO(0x9888), 0x020da000 },
1641 { _MMIO(0x9888), 0x040da000 },
1642 { _MMIO(0x9888), 0x060d2000 },
1643 { _MMIO(0x9888), 0x0c0fe000 },
1644 { _MMIO(0x9888), 0x0e0f0097 },
1645 { _MMIO(0x9888), 0x082c8000 },
1646 { _MMIO(0x9888), 0x0a2c8000 },
1647 { _MMIO(0x9888), 0x002d8000 },
1648 { _MMIO(0x9888), 0x062d4000 },
1649 { _MMIO(0x9888), 0x0410c000 },
1650 { _MMIO(0x9888), 0x0411c000 },
1651 { _MMIO(0x9888), 0x04121fb7 },
1652 { _MMIO(0x9888), 0x00120000 },
1653 { _MMIO(0x9888), 0x04135000 },
1654 { _MMIO(0x9888), 0x00308000 },
1655 { _MMIO(0x9888), 0x06304000 },
1656 { _MMIO(0x9888), 0x00318000 },
1657 { _MMIO(0x9888), 0x06314000 },
1658 { _MMIO(0x9888), 0x00321b80 },
1659 { _MMIO(0x9888), 0x0632003f },
1660 { _MMIO(0x9888), 0x00334000 },
1661 { _MMIO(0x9888), 0x06331000 },
1662 { _MMIO(0x9888), 0x0250c000 },
1663 { _MMIO(0x9888), 0x0251c000 },
1664 { _MMIO(0x9888), 0x02521fb7 },
1665 { _MMIO(0x9888), 0x00520000 },
1666 { _MMIO(0x9888), 0x02535000 },
1667 { _MMIO(0x9888), 0x1190fc00 },
1668 { _MMIO(0x9888), 0x37900000 },
1669 { _MMIO(0x9888), 0x51900000 },
1670 { _MMIO(0x9888), 0x41900800 },
1671 { _MMIO(0x9888), 0x43900063 },
1672 { _MMIO(0x9888), 0x53900000 },
1673 { _MMIO(0x9888), 0x45900040 },
1674 { _MMIO(0x9888), 0x33900000 },
1675};
1676
1677static int
1678get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1679 const struct i915_oa_reg **regs,
1680 int *lens)
1681{
1682 int n = 0;
1683
1684 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1685 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1686
1687 regs[n] = mux_config_tdl_2;
1688 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1689 n++;
1690
1691 return n;
1692}
1693
1694static const struct i915_oa_reg b_counter_config_compute_extra[] = {
1695};
1696
1697static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
1698};
1699
1700static const struct i915_oa_reg mux_config_compute_extra[] = {
1701 { _MMIO(0x9888), 0x121203e0 },
1702 { _MMIO(0x9888), 0x123203e0 },
1703 { _MMIO(0x9888), 0x125203e0 },
1704 { _MMIO(0x9888), 0x129203e0 },
1705 { _MMIO(0x9888), 0x12b203e0 },
1706 { _MMIO(0x9888), 0x12d203e0 },
1707 { _MMIO(0x9888), 0x024ec000 },
1708 { _MMIO(0x9888), 0x044ec000 },
1709 { _MMIO(0x9888), 0x064ec000 },
1710 { _MMIO(0x9888), 0x022f4000 },
1711 { _MMIO(0x9888), 0x084ca000 },
1712 { _MMIO(0x9888), 0x0a4c0042 },
1713 { _MMIO(0x9888), 0x000d8000 },
1714 { _MMIO(0x9888), 0x020da000 },
1715 { _MMIO(0x9888), 0x040da000 },
1716 { _MMIO(0x9888), 0x060d2000 },
1717 { _MMIO(0x9888), 0x0c0f5000 },
1718 { _MMIO(0x9888), 0x0e0f006d },
1719 { _MMIO(0x9888), 0x022c8000 },
1720 { _MMIO(0x9888), 0x042c8000 },
1721 { _MMIO(0x9888), 0x062c8000 },
1722 { _MMIO(0x9888), 0x0c2c8000 },
1723 { _MMIO(0x9888), 0x042d8000 },
1724 { _MMIO(0x9888), 0x06104000 },
1725 { _MMIO(0x9888), 0x06114000 },
1726 { _MMIO(0x9888), 0x06120033 },
1727 { _MMIO(0x9888), 0x00120000 },
1728 { _MMIO(0x9888), 0x06131000 },
1729 { _MMIO(0x9888), 0x04308000 },
1730 { _MMIO(0x9888), 0x04318000 },
1731 { _MMIO(0x9888), 0x04321980 },
1732 { _MMIO(0x9888), 0x00320000 },
1733 { _MMIO(0x9888), 0x04334000 },
1734 { _MMIO(0x9888), 0x04504000 },
1735 { _MMIO(0x9888), 0x04514000 },
1736 { _MMIO(0x9888), 0x04520033 },
1737 { _MMIO(0x9888), 0x00520000 },
1738 { _MMIO(0x9888), 0x04531000 },
1739 { _MMIO(0x9888), 0x00af8000 },
1740 { _MMIO(0x9888), 0x0acc0001 },
1741 { _MMIO(0x9888), 0x008d8000 },
1742 { _MMIO(0x9888), 0x028da000 },
1743 { _MMIO(0x9888), 0x0c8fb000 },
1744 { _MMIO(0x9888), 0x0e8f0001 },
1745 { _MMIO(0x9888), 0x06ac8000 },
1746 { _MMIO(0x9888), 0x02ad4000 },
1747 { _MMIO(0x9888), 0x02908000 },
1748 { _MMIO(0x9888), 0x02918000 },
1749 { _MMIO(0x9888), 0x02921980 },
1750 { _MMIO(0x9888), 0x00920000 },
1751 { _MMIO(0x9888), 0x02934000 },
1752 { _MMIO(0x9888), 0x02b04000 },
1753 { _MMIO(0x9888), 0x02b14000 },
1754 { _MMIO(0x9888), 0x02b20033 },
1755 { _MMIO(0x9888), 0x00b20000 },
1756 { _MMIO(0x9888), 0x02b31000 },
1757 { _MMIO(0x9888), 0x00d08000 },
1758 { _MMIO(0x9888), 0x00d18000 },
1759 { _MMIO(0x9888), 0x00d21980 },
1760 { _MMIO(0x9888), 0x00d34000 },
1761 { _MMIO(0x9888), 0x1190fc00 },
1762 { _MMIO(0x9888), 0x37900000 },
1763 { _MMIO(0x9888), 0x51900000 },
1764 { _MMIO(0x9888), 0x41900c00 },
1765 { _MMIO(0x9888), 0x43900002 },
1766 { _MMIO(0x9888), 0x53900420 },
1767 { _MMIO(0x9888), 0x459000a1 },
1768 { _MMIO(0x9888), 0x33900000 },
1769};
1770
1771static int
1772get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
1773 const struct i915_oa_reg **regs,
1774 int *lens)
1775{
1776 int n = 0;
1777
1778 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1779 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1780
1781 regs[n] = mux_config_compute_extra;
1782 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
1783 n++;
1784
1785 return n;
1786}
1787
1788static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
1789 { _MMIO(0x2740), 0x00000000 },
1790 { _MMIO(0x2710), 0x00000000 },
1791 { _MMIO(0x2714), 0xf0800000 },
1792 { _MMIO(0x2720), 0x00000000 },
1793 { _MMIO(0x2724), 0x30800000 },
1794 { _MMIO(0x2770), 0x00100030 },
1795 { _MMIO(0x2774), 0x0000fff9 },
1796 { _MMIO(0x2778), 0x00000002 },
1797 { _MMIO(0x277c), 0x0000fffc },
1798 { _MMIO(0x2780), 0x00000002 },
1799 { _MMIO(0x2784), 0x0000fff3 },
1800 { _MMIO(0x2788), 0x00100180 },
1801 { _MMIO(0x278c), 0x0000ffcf },
1802 { _MMIO(0x2790), 0x00000002 },
1803 { _MMIO(0x2794), 0x0000ffcf },
1804 { _MMIO(0x2798), 0x00000002 },
1805 { _MMIO(0x279c), 0x0000ff3f },
1806};
1807
1808static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
1809 { _MMIO(0xe458), 0x00005004 },
1810 { _MMIO(0xe558), 0x00008003 },
1811};
1812
1813static const struct i915_oa_reg mux_config_vme_pipe[] = {
1814 { _MMIO(0x9888), 0x141a5800 },
1815 { _MMIO(0x9888), 0x161a00c0 },
1816 { _MMIO(0x9888), 0x12180240 },
1817 { _MMIO(0x9888), 0x14180002 },
1818 { _MMIO(0x9888), 0x149a5800 },
1819 { _MMIO(0x9888), 0x169a00c0 },
1820 { _MMIO(0x9888), 0x12980240 },
1821 { _MMIO(0x9888), 0x14980002 },
1822 { _MMIO(0x9888), 0x1a4e3fc0 },
1823 { _MMIO(0x9888), 0x002f1000 },
1824 { _MMIO(0x9888), 0x022f8000 },
1825 { _MMIO(0x9888), 0x042f3000 },
1826 { _MMIO(0x9888), 0x004c4000 },
1827 { _MMIO(0x9888), 0x0a4c9500 },
1828 { _MMIO(0x9888), 0x0c4c002a },
1829 { _MMIO(0x9888), 0x000d2000 },
1830 { _MMIO(0x9888), 0x060d8000 },
1831 { _MMIO(0x9888), 0x080da000 },
1832 { _MMIO(0x9888), 0x0a0da000 },
1833 { _MMIO(0x9888), 0x0c0da000 },
1834 { _MMIO(0x9888), 0x0c0f0400 },
1835 { _MMIO(0x9888), 0x0e0f5500 },
1836 { _MMIO(0x9888), 0x100f0015 },
1837 { _MMIO(0x9888), 0x002c8000 },
1838 { _MMIO(0x9888), 0x0e2c8000 },
1839 { _MMIO(0x9888), 0x162caa00 },
1840 { _MMIO(0x9888), 0x182c000a },
1841 { _MMIO(0x9888), 0x04193000 },
1842 { _MMIO(0x9888), 0x081a28c1 },
1843 { _MMIO(0x9888), 0x001a0000 },
1844 { _MMIO(0x9888), 0x00133000 },
1845 { _MMIO(0x9888), 0x0613c000 },
1846 { _MMIO(0x9888), 0x0813f000 },
1847 { _MMIO(0x9888), 0x00172000 },
1848 { _MMIO(0x9888), 0x06178000 },
1849 { _MMIO(0x9888), 0x0817a000 },
1850 { _MMIO(0x9888), 0x00180037 },
1851 { _MMIO(0x9888), 0x06180940 },
1852 { _MMIO(0x9888), 0x08180000 },
1853 { _MMIO(0x9888), 0x02180000 },
1854 { _MMIO(0x9888), 0x04183000 },
1855 { _MMIO(0x9888), 0x04afc000 },
1856 { _MMIO(0x9888), 0x06af3000 },
1857 { _MMIO(0x9888), 0x0acc4000 },
1858 { _MMIO(0x9888), 0x0ccc0015 },
1859 { _MMIO(0x9888), 0x0a8da000 },
1860 { _MMIO(0x9888), 0x0c8da000 },
1861 { _MMIO(0x9888), 0x0e8f4000 },
1862 { _MMIO(0x9888), 0x108f0015 },
1863 { _MMIO(0x9888), 0x16aca000 },
1864 { _MMIO(0x9888), 0x18ac000a },
1865 { _MMIO(0x9888), 0x06993000 },
1866 { _MMIO(0x9888), 0x0c9a28c1 },
1867 { _MMIO(0x9888), 0x009a0000 },
1868 { _MMIO(0x9888), 0x0a93f000 },
1869 { _MMIO(0x9888), 0x0c93f000 },
1870 { _MMIO(0x9888), 0x0a97a000 },
1871 { _MMIO(0x9888), 0x0c97a000 },
1872 { _MMIO(0x9888), 0x0a980977 },
1873 { _MMIO(0x9888), 0x08980000 },
1874 { _MMIO(0x9888), 0x04980000 },
1875 { _MMIO(0x9888), 0x06983000 },
1876 { _MMIO(0x9888), 0x119000ff },
1877 { _MMIO(0x9888), 0x51900040 },
1878 { _MMIO(0x9888), 0x41900020 },
1879 { _MMIO(0x9888), 0x55900004 },
1880 { _MMIO(0x9888), 0x45900400 },
1881 { _MMIO(0x9888), 0x479008a5 },
1882 { _MMIO(0x9888), 0x57900000 },
1883 { _MMIO(0x9888), 0x49900002 },
1884 { _MMIO(0x9888), 0x37900000 },
1885 { _MMIO(0x9888), 0x33900000 },
1886};
1887
1888static int
1889get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
1890 const struct i915_oa_reg **regs,
1891 int *lens)
1892{
1893 int n = 0;
1894
1895 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1896 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1897
1898 regs[n] = mux_config_vme_pipe;
1899 lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
1900 n++;
1901
1902 return n;
1903}
1904
1905static const struct i915_oa_reg b_counter_config_test_oa[] = {
1906 { _MMIO(0x2740), 0x00000000 },
1907 { _MMIO(0x2744), 0x00800000 },
1908 { _MMIO(0x2714), 0xf0800000 },
1909 { _MMIO(0x2710), 0x00000000 },
1910 { _MMIO(0x2724), 0xf0800000 },
1911 { _MMIO(0x2720), 0x00000000 },
1912 { _MMIO(0x2770), 0x00000004 },
1913 { _MMIO(0x2774), 0x00000000 },
1914 { _MMIO(0x2778), 0x00000003 },
1915 { _MMIO(0x277c), 0x00000000 },
1916 { _MMIO(0x2780), 0x00000007 },
1917 { _MMIO(0x2784), 0x00000000 },
1918 { _MMIO(0x2788), 0x00100002 },
1919 { _MMIO(0x278c), 0x0000fff7 },
1920 { _MMIO(0x2790), 0x00100002 },
1921 { _MMIO(0x2794), 0x0000ffcf },
1922 { _MMIO(0x2798), 0x00100082 },
1923 { _MMIO(0x279c), 0x0000ffef },
1924 { _MMIO(0x27a0), 0x001000c2 },
1925 { _MMIO(0x27a4), 0x0000ffe7 },
1926 { _MMIO(0x27a8), 0x00100001 },
1927 { _MMIO(0x27ac), 0x0000ffe7 },
1928};
1929
1930static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1931};
1932
1933static const struct i915_oa_reg mux_config_test_oa[] = {
1934 { _MMIO(0x9888), 0x11810000 },
1935 { _MMIO(0x9888), 0x07810013 },
1936 { _MMIO(0x9888), 0x1f810000 },
1937 { _MMIO(0x9888), 0x1d810000 },
1938 { _MMIO(0x9888), 0x1b930040 },
1939 { _MMIO(0x9888), 0x07e54000 },
1940 { _MMIO(0x9888), 0x1f908000 },
1941 { _MMIO(0x9888), 0x11900000 },
1942 { _MMIO(0x9888), 0x37900000 },
1943 { _MMIO(0x9888), 0x53900000 },
1944 { _MMIO(0x9888), 0x45900000 },
1945 { _MMIO(0x9888), 0x33900000 },
1946};
1947
1948static int
1949get_test_oa_mux_config(struct drm_i915_private *dev_priv,
1950 const struct i915_oa_reg **regs,
1951 int *lens)
1952{
1953 int n = 0;
1954
1955 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1956 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1957
1958 regs[n] = mux_config_test_oa;
1959 lens[n] = ARRAY_SIZE(mux_config_test_oa);
1960 n++;
1961
1962 return n;
1963}
1964
1965int i915_oa_select_metric_set_kblgt3(struct drm_i915_private *dev_priv)
1966{
1967 dev_priv->perf.oa.n_mux_configs = 0;
1968 dev_priv->perf.oa.b_counter_regs = NULL;
1969 dev_priv->perf.oa.b_counter_regs_len = 0;
1970 dev_priv->perf.oa.flex_regs = NULL;
1971 dev_priv->perf.oa.flex_regs_len = 0;
1972
1973 switch (dev_priv->perf.oa.metrics_set) {
1974 case METRIC_SET_ID_RENDER_BASIC:
1975 dev_priv->perf.oa.n_mux_configs =
1976 get_render_basic_mux_config(dev_priv,
1977 dev_priv->perf.oa.mux_regs,
1978 dev_priv->perf.oa.mux_regs_lens);
1979 if (dev_priv->perf.oa.n_mux_configs == 0) {
1980 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
1981
1982 /* EINVAL because *_register_sysfs already checked this
1983 * and so it wouldn't have been advertised to userspace and
1984 * so shouldn't have been requested
1985 */
1986 return -EINVAL;
1987 }
1988
1989 dev_priv->perf.oa.b_counter_regs =
1990 b_counter_config_render_basic;
1991 dev_priv->perf.oa.b_counter_regs_len =
1992 ARRAY_SIZE(b_counter_config_render_basic);
1993
1994 dev_priv->perf.oa.flex_regs =
1995 flex_eu_config_render_basic;
1996 dev_priv->perf.oa.flex_regs_len =
1997 ARRAY_SIZE(flex_eu_config_render_basic);
1998
1999 return 0;
2000 case METRIC_SET_ID_COMPUTE_BASIC:
2001 dev_priv->perf.oa.n_mux_configs =
2002 get_compute_basic_mux_config(dev_priv,
2003 dev_priv->perf.oa.mux_regs,
2004 dev_priv->perf.oa.mux_regs_lens);
2005 if (dev_priv->perf.oa.n_mux_configs == 0) {
2006 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
2007
2008 /* EINVAL because *_register_sysfs already checked this
2009 * and so it wouldn't have been advertised to userspace and
2010 * so shouldn't have been requested
2011 */
2012 return -EINVAL;
2013 }
2014
2015 dev_priv->perf.oa.b_counter_regs =
2016 b_counter_config_compute_basic;
2017 dev_priv->perf.oa.b_counter_regs_len =
2018 ARRAY_SIZE(b_counter_config_compute_basic);
2019
2020 dev_priv->perf.oa.flex_regs =
2021 flex_eu_config_compute_basic;
2022 dev_priv->perf.oa.flex_regs_len =
2023 ARRAY_SIZE(flex_eu_config_compute_basic);
2024
2025 return 0;
2026 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
2027 dev_priv->perf.oa.n_mux_configs =
2028 get_render_pipe_profile_mux_config(dev_priv,
2029 dev_priv->perf.oa.mux_regs,
2030 dev_priv->perf.oa.mux_regs_lens);
2031 if (dev_priv->perf.oa.n_mux_configs == 0) {
2032 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
2033
2034 /* EINVAL because *_register_sysfs already checked this
2035 * and so it wouldn't have been advertised to userspace and
2036 * so shouldn't have been requested
2037 */
2038 return -EINVAL;
2039 }
2040
2041 dev_priv->perf.oa.b_counter_regs =
2042 b_counter_config_render_pipe_profile;
2043 dev_priv->perf.oa.b_counter_regs_len =
2044 ARRAY_SIZE(b_counter_config_render_pipe_profile);
2045
2046 dev_priv->perf.oa.flex_regs =
2047 flex_eu_config_render_pipe_profile;
2048 dev_priv->perf.oa.flex_regs_len =
2049 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
2050
2051 return 0;
2052 case METRIC_SET_ID_MEMORY_READS:
2053 dev_priv->perf.oa.n_mux_configs =
2054 get_memory_reads_mux_config(dev_priv,
2055 dev_priv->perf.oa.mux_regs,
2056 dev_priv->perf.oa.mux_regs_lens);
2057 if (dev_priv->perf.oa.n_mux_configs == 0) {
2058 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
2059
2060 /* EINVAL because *_register_sysfs already checked this
2061 * and so it wouldn't have been advertised to userspace and
2062 * so shouldn't have been requested
2063 */
2064 return -EINVAL;
2065 }
2066
2067 dev_priv->perf.oa.b_counter_regs =
2068 b_counter_config_memory_reads;
2069 dev_priv->perf.oa.b_counter_regs_len =
2070 ARRAY_SIZE(b_counter_config_memory_reads);
2071
2072 dev_priv->perf.oa.flex_regs =
2073 flex_eu_config_memory_reads;
2074 dev_priv->perf.oa.flex_regs_len =
2075 ARRAY_SIZE(flex_eu_config_memory_reads);
2076
2077 return 0;
2078 case METRIC_SET_ID_MEMORY_WRITES:
2079 dev_priv->perf.oa.n_mux_configs =
2080 get_memory_writes_mux_config(dev_priv,
2081 dev_priv->perf.oa.mux_regs,
2082 dev_priv->perf.oa.mux_regs_lens);
2083 if (dev_priv->perf.oa.n_mux_configs == 0) {
2084 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
2085
2086 /* EINVAL because *_register_sysfs already checked this
2087 * and so it wouldn't have been advertised to userspace and
2088 * so shouldn't have been requested
2089 */
2090 return -EINVAL;
2091 }
2092
2093 dev_priv->perf.oa.b_counter_regs =
2094 b_counter_config_memory_writes;
2095 dev_priv->perf.oa.b_counter_regs_len =
2096 ARRAY_SIZE(b_counter_config_memory_writes);
2097
2098 dev_priv->perf.oa.flex_regs =
2099 flex_eu_config_memory_writes;
2100 dev_priv->perf.oa.flex_regs_len =
2101 ARRAY_SIZE(flex_eu_config_memory_writes);
2102
2103 return 0;
2104 case METRIC_SET_ID_COMPUTE_EXTENDED:
2105 dev_priv->perf.oa.n_mux_configs =
2106 get_compute_extended_mux_config(dev_priv,
2107 dev_priv->perf.oa.mux_regs,
2108 dev_priv->perf.oa.mux_regs_lens);
2109 if (dev_priv->perf.oa.n_mux_configs == 0) {
2110 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
2111
2112 /* EINVAL because *_register_sysfs already checked this
2113 * and so it wouldn't have been advertised to userspace and
2114 * so shouldn't have been requested
2115 */
2116 return -EINVAL;
2117 }
2118
2119 dev_priv->perf.oa.b_counter_regs =
2120 b_counter_config_compute_extended;
2121 dev_priv->perf.oa.b_counter_regs_len =
2122 ARRAY_SIZE(b_counter_config_compute_extended);
2123
2124 dev_priv->perf.oa.flex_regs =
2125 flex_eu_config_compute_extended;
2126 dev_priv->perf.oa.flex_regs_len =
2127 ARRAY_SIZE(flex_eu_config_compute_extended);
2128
2129 return 0;
2130 case METRIC_SET_ID_COMPUTE_L3_CACHE:
2131 dev_priv->perf.oa.n_mux_configs =
2132 get_compute_l3_cache_mux_config(dev_priv,
2133 dev_priv->perf.oa.mux_regs,
2134 dev_priv->perf.oa.mux_regs_lens);
2135 if (dev_priv->perf.oa.n_mux_configs == 0) {
2136 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
2137
2138 /* EINVAL because *_register_sysfs already checked this
2139 * and so it wouldn't have been advertised to userspace and
2140 * so shouldn't have been requested
2141 */
2142 return -EINVAL;
2143 }
2144
2145 dev_priv->perf.oa.b_counter_regs =
2146 b_counter_config_compute_l3_cache;
2147 dev_priv->perf.oa.b_counter_regs_len =
2148 ARRAY_SIZE(b_counter_config_compute_l3_cache);
2149
2150 dev_priv->perf.oa.flex_regs =
2151 flex_eu_config_compute_l3_cache;
2152 dev_priv->perf.oa.flex_regs_len =
2153 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
2154
2155 return 0;
2156 case METRIC_SET_ID_HDC_AND_SF:
2157 dev_priv->perf.oa.n_mux_configs =
2158 get_hdc_and_sf_mux_config(dev_priv,
2159 dev_priv->perf.oa.mux_regs,
2160 dev_priv->perf.oa.mux_regs_lens);
2161 if (dev_priv->perf.oa.n_mux_configs == 0) {
2162 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
2163
2164 /* EINVAL because *_register_sysfs already checked this
2165 * and so it wouldn't have been advertised to userspace and
2166 * so shouldn't have been requested
2167 */
2168 return -EINVAL;
2169 }
2170
2171 dev_priv->perf.oa.b_counter_regs =
2172 b_counter_config_hdc_and_sf;
2173 dev_priv->perf.oa.b_counter_regs_len =
2174 ARRAY_SIZE(b_counter_config_hdc_and_sf);
2175
2176 dev_priv->perf.oa.flex_regs =
2177 flex_eu_config_hdc_and_sf;
2178 dev_priv->perf.oa.flex_regs_len =
2179 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2180
2181 return 0;
2182 case METRIC_SET_ID_L3_1:
2183 dev_priv->perf.oa.n_mux_configs =
2184 get_l3_1_mux_config(dev_priv,
2185 dev_priv->perf.oa.mux_regs,
2186 dev_priv->perf.oa.mux_regs_lens);
2187 if (dev_priv->perf.oa.n_mux_configs == 0) {
2188 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2189
2190 /* EINVAL because *_register_sysfs already checked this
2191 * and so it wouldn't have been advertised to userspace and
2192 * so shouldn't have been requested
2193 */
2194 return -EINVAL;
2195 }
2196
2197 dev_priv->perf.oa.b_counter_regs =
2198 b_counter_config_l3_1;
2199 dev_priv->perf.oa.b_counter_regs_len =
2200 ARRAY_SIZE(b_counter_config_l3_1);
2201
2202 dev_priv->perf.oa.flex_regs =
2203 flex_eu_config_l3_1;
2204 dev_priv->perf.oa.flex_regs_len =
2205 ARRAY_SIZE(flex_eu_config_l3_1);
2206
2207 return 0;
2208 case METRIC_SET_ID_L3_2:
2209 dev_priv->perf.oa.n_mux_configs =
2210 get_l3_2_mux_config(dev_priv,
2211 dev_priv->perf.oa.mux_regs,
2212 dev_priv->perf.oa.mux_regs_lens);
2213 if (dev_priv->perf.oa.n_mux_configs == 0) {
2214 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
2215
2216 /* EINVAL because *_register_sysfs already checked this
2217 * and so it wouldn't have been advertised to userspace and
2218 * so shouldn't have been requested
2219 */
2220 return -EINVAL;
2221 }
2222
2223 dev_priv->perf.oa.b_counter_regs =
2224 b_counter_config_l3_2;
2225 dev_priv->perf.oa.b_counter_regs_len =
2226 ARRAY_SIZE(b_counter_config_l3_2);
2227
2228 dev_priv->perf.oa.flex_regs =
2229 flex_eu_config_l3_2;
2230 dev_priv->perf.oa.flex_regs_len =
2231 ARRAY_SIZE(flex_eu_config_l3_2);
2232
2233 return 0;
2234 case METRIC_SET_ID_L3_3:
2235 dev_priv->perf.oa.n_mux_configs =
2236 get_l3_3_mux_config(dev_priv,
2237 dev_priv->perf.oa.mux_regs,
2238 dev_priv->perf.oa.mux_regs_lens);
2239 if (dev_priv->perf.oa.n_mux_configs == 0) {
2240 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
2241
2242 /* EINVAL because *_register_sysfs already checked this
2243 * and so it wouldn't have been advertised to userspace and
2244 * so shouldn't have been requested
2245 */
2246 return -EINVAL;
2247 }
2248
2249 dev_priv->perf.oa.b_counter_regs =
2250 b_counter_config_l3_3;
2251 dev_priv->perf.oa.b_counter_regs_len =
2252 ARRAY_SIZE(b_counter_config_l3_3);
2253
2254 dev_priv->perf.oa.flex_regs =
2255 flex_eu_config_l3_3;
2256 dev_priv->perf.oa.flex_regs_len =
2257 ARRAY_SIZE(flex_eu_config_l3_3);
2258
2259 return 0;
2260 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2261 dev_priv->perf.oa.n_mux_configs =
2262 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2263 dev_priv->perf.oa.mux_regs,
2264 dev_priv->perf.oa.mux_regs_lens);
2265 if (dev_priv->perf.oa.n_mux_configs == 0) {
2266 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2267
2268 /* EINVAL because *_register_sysfs already checked this
2269 * and so it wouldn't have been advertised to userspace and
2270 * so shouldn't have been requested
2271 */
2272 return -EINVAL;
2273 }
2274
2275 dev_priv->perf.oa.b_counter_regs =
2276 b_counter_config_rasterizer_and_pixel_backend;
2277 dev_priv->perf.oa.b_counter_regs_len =
2278 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2279
2280 dev_priv->perf.oa.flex_regs =
2281 flex_eu_config_rasterizer_and_pixel_backend;
2282 dev_priv->perf.oa.flex_regs_len =
2283 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2284
2285 return 0;
2286 case METRIC_SET_ID_SAMPLER:
2287 dev_priv->perf.oa.n_mux_configs =
2288 get_sampler_mux_config(dev_priv,
2289 dev_priv->perf.oa.mux_regs,
2290 dev_priv->perf.oa.mux_regs_lens);
2291 if (dev_priv->perf.oa.n_mux_configs == 0) {
2292 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
2293
2294 /* EINVAL because *_register_sysfs already checked this
2295 * and so it wouldn't have been advertised to userspace and
2296 * so shouldn't have been requested
2297 */
2298 return -EINVAL;
2299 }
2300
2301 dev_priv->perf.oa.b_counter_regs =
2302 b_counter_config_sampler;
2303 dev_priv->perf.oa.b_counter_regs_len =
2304 ARRAY_SIZE(b_counter_config_sampler);
2305
2306 dev_priv->perf.oa.flex_regs =
2307 flex_eu_config_sampler;
2308 dev_priv->perf.oa.flex_regs_len =
2309 ARRAY_SIZE(flex_eu_config_sampler);
2310
2311 return 0;
2312 case METRIC_SET_ID_TDL_1:
2313 dev_priv->perf.oa.n_mux_configs =
2314 get_tdl_1_mux_config(dev_priv,
2315 dev_priv->perf.oa.mux_regs,
2316 dev_priv->perf.oa.mux_regs_lens);
2317 if (dev_priv->perf.oa.n_mux_configs == 0) {
2318 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2319
2320 /* EINVAL because *_register_sysfs already checked this
2321 * and so it wouldn't have been advertised to userspace and
2322 * so shouldn't have been requested
2323 */
2324 return -EINVAL;
2325 }
2326
2327 dev_priv->perf.oa.b_counter_regs =
2328 b_counter_config_tdl_1;
2329 dev_priv->perf.oa.b_counter_regs_len =
2330 ARRAY_SIZE(b_counter_config_tdl_1);
2331
2332 dev_priv->perf.oa.flex_regs =
2333 flex_eu_config_tdl_1;
2334 dev_priv->perf.oa.flex_regs_len =
2335 ARRAY_SIZE(flex_eu_config_tdl_1);
2336
2337 return 0;
2338 case METRIC_SET_ID_TDL_2:
2339 dev_priv->perf.oa.n_mux_configs =
2340 get_tdl_2_mux_config(dev_priv,
2341 dev_priv->perf.oa.mux_regs,
2342 dev_priv->perf.oa.mux_regs_lens);
2343 if (dev_priv->perf.oa.n_mux_configs == 0) {
2344 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2345
2346 /* EINVAL because *_register_sysfs already checked this
2347 * and so it wouldn't have been advertised to userspace and
2348 * so shouldn't have been requested
2349 */
2350 return -EINVAL;
2351 }
2352
2353 dev_priv->perf.oa.b_counter_regs =
2354 b_counter_config_tdl_2;
2355 dev_priv->perf.oa.b_counter_regs_len =
2356 ARRAY_SIZE(b_counter_config_tdl_2);
2357
2358 dev_priv->perf.oa.flex_regs =
2359 flex_eu_config_tdl_2;
2360 dev_priv->perf.oa.flex_regs_len =
2361 ARRAY_SIZE(flex_eu_config_tdl_2);
2362
2363 return 0;
2364 case METRIC_SET_ID_COMPUTE_EXTRA:
2365 dev_priv->perf.oa.n_mux_configs =
2366 get_compute_extra_mux_config(dev_priv,
2367 dev_priv->perf.oa.mux_regs,
2368 dev_priv->perf.oa.mux_regs_lens);
2369 if (dev_priv->perf.oa.n_mux_configs == 0) {
2370 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2371
2372 /* EINVAL because *_register_sysfs already checked this
2373 * and so it wouldn't have been advertised to userspace and
2374 * so shouldn't have been requested
2375 */
2376 return -EINVAL;
2377 }
2378
2379 dev_priv->perf.oa.b_counter_regs =
2380 b_counter_config_compute_extra;
2381 dev_priv->perf.oa.b_counter_regs_len =
2382 ARRAY_SIZE(b_counter_config_compute_extra);
2383
2384 dev_priv->perf.oa.flex_regs =
2385 flex_eu_config_compute_extra;
2386 dev_priv->perf.oa.flex_regs_len =
2387 ARRAY_SIZE(flex_eu_config_compute_extra);
2388
2389 return 0;
2390 case METRIC_SET_ID_VME_PIPE:
2391 dev_priv->perf.oa.n_mux_configs =
2392 get_vme_pipe_mux_config(dev_priv,
2393 dev_priv->perf.oa.mux_regs,
2394 dev_priv->perf.oa.mux_regs_lens);
2395 if (dev_priv->perf.oa.n_mux_configs == 0) {
2396 DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
2397
2398 /* EINVAL because *_register_sysfs already checked this
2399 * and so it wouldn't have been advertised to userspace and
2400 * so shouldn't have been requested
2401 */
2402 return -EINVAL;
2403 }
2404
2405 dev_priv->perf.oa.b_counter_regs =
2406 b_counter_config_vme_pipe;
2407 dev_priv->perf.oa.b_counter_regs_len =
2408 ARRAY_SIZE(b_counter_config_vme_pipe);
2409
2410 dev_priv->perf.oa.flex_regs =
2411 flex_eu_config_vme_pipe;
2412 dev_priv->perf.oa.flex_regs_len =
2413 ARRAY_SIZE(flex_eu_config_vme_pipe);
2414
2415 return 0;
2416 case METRIC_SET_ID_TEST_OA:
2417 dev_priv->perf.oa.n_mux_configs =
2418 get_test_oa_mux_config(dev_priv,
2419 dev_priv->perf.oa.mux_regs,
2420 dev_priv->perf.oa.mux_regs_lens);
2421 if (dev_priv->perf.oa.n_mux_configs == 0) {
2422 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2423
2424 /* EINVAL because *_register_sysfs already checked this
2425 * and so it wouldn't have been advertised to userspace and
2426 * so shouldn't have been requested
2427 */
2428 return -EINVAL;
2429 }
2430
2431 dev_priv->perf.oa.b_counter_regs =
2432 b_counter_config_test_oa;
2433 dev_priv->perf.oa.b_counter_regs_len =
2434 ARRAY_SIZE(b_counter_config_test_oa);
2435
2436 dev_priv->perf.oa.flex_regs =
2437 flex_eu_config_test_oa;
2438 dev_priv->perf.oa.flex_regs_len =
2439 ARRAY_SIZE(flex_eu_config_test_oa);
2440
2441 return 0;
2442 default:
2443 return -ENODEV;
2444 }
2445}
2446
2447static ssize_t
2448show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2449{
2450 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2451}
2452
2453static struct device_attribute dev_attr_render_basic_id = {
2454 .attr = { .name = "id", .mode = 0444 },
2455 .show = show_render_basic_id,
2456 .store = NULL,
2457};
2458
2459static struct attribute *attrs_render_basic[] = {
2460 &dev_attr_render_basic_id.attr,
2461 NULL,
2462};
2463
2464static struct attribute_group group_render_basic = {
2465 .name = "0286c920-2f6d-493b-b22d-7a5280df43de",
2466 .attrs = attrs_render_basic,
2467};
2468
2469static ssize_t
2470show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2471{
2472 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2473}
2474
2475static struct device_attribute dev_attr_compute_basic_id = {
2476 .attr = { .name = "id", .mode = 0444 },
2477 .show = show_compute_basic_id,
2478 .store = NULL,
2479};
2480
2481static struct attribute *attrs_compute_basic[] = {
2482 &dev_attr_compute_basic_id.attr,
2483 NULL,
2484};
2485
2486static struct attribute_group group_compute_basic = {
2487 .name = "9823aaa1-b06f-40ce-884b-cd798c79f0c2",
2488 .attrs = attrs_compute_basic,
2489};
2490
2491static ssize_t
2492show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2493{
2494 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2495}
2496
2497static struct device_attribute dev_attr_render_pipe_profile_id = {
2498 .attr = { .name = "id", .mode = 0444 },
2499 .show = show_render_pipe_profile_id,
2500 .store = NULL,
2501};
2502
2503static struct attribute *attrs_render_pipe_profile[] = {
2504 &dev_attr_render_pipe_profile_id.attr,
2505 NULL,
2506};
2507
2508static struct attribute_group group_render_pipe_profile = {
2509 .name = "c7c735f3-ce58-45cf-aa04-30b183f1faff",
2510 .attrs = attrs_render_pipe_profile,
2511};
2512
2513static ssize_t
2514show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2515{
2516 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2517}
2518
2519static struct device_attribute dev_attr_memory_reads_id = {
2520 .attr = { .name = "id", .mode = 0444 },
2521 .show = show_memory_reads_id,
2522 .store = NULL,
2523};
2524
2525static struct attribute *attrs_memory_reads[] = {
2526 &dev_attr_memory_reads_id.attr,
2527 NULL,
2528};
2529
2530static struct attribute_group group_memory_reads = {
2531 .name = "96ec2219-040b-428a-856a-6bc03363a057",
2532 .attrs = attrs_memory_reads,
2533};
2534
2535static ssize_t
2536show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2537{
2538 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2539}
2540
2541static struct device_attribute dev_attr_memory_writes_id = {
2542 .attr = { .name = "id", .mode = 0444 },
2543 .show = show_memory_writes_id,
2544 .store = NULL,
2545};
2546
2547static struct attribute *attrs_memory_writes[] = {
2548 &dev_attr_memory_writes_id.attr,
2549 NULL,
2550};
2551
2552static struct attribute_group group_memory_writes = {
2553 .name = "03372b64-4996-4d3b-aa18-790e75eeb9c2",
2554 .attrs = attrs_memory_writes,
2555};
2556
2557static ssize_t
2558show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2559{
2560 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
2561}
2562
2563static struct device_attribute dev_attr_compute_extended_id = {
2564 .attr = { .name = "id", .mode = 0444 },
2565 .show = show_compute_extended_id,
2566 .store = NULL,
2567};
2568
2569static struct attribute *attrs_compute_extended[] = {
2570 &dev_attr_compute_extended_id.attr,
2571 NULL,
2572};
2573
2574static struct attribute_group group_compute_extended = {
2575 .name = "31b4ce5a-bd61-4c1f-bb5d-f2e731412150",
2576 .attrs = attrs_compute_extended,
2577};
2578
2579static ssize_t
2580show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
2581{
2582 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
2583}
2584
2585static struct device_attribute dev_attr_compute_l3_cache_id = {
2586 .attr = { .name = "id", .mode = 0444 },
2587 .show = show_compute_l3_cache_id,
2588 .store = NULL,
2589};
2590
2591static struct attribute *attrs_compute_l3_cache[] = {
2592 &dev_attr_compute_l3_cache_id.attr,
2593 NULL,
2594};
2595
2596static struct attribute_group group_compute_l3_cache = {
2597 .name = "2ce0911a-27fc-4887-96f0-11084fa807c3",
2598 .attrs = attrs_compute_l3_cache,
2599};
2600
2601static ssize_t
2602show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2603{
2604 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2605}
2606
2607static struct device_attribute dev_attr_hdc_and_sf_id = {
2608 .attr = { .name = "id", .mode = 0444 },
2609 .show = show_hdc_and_sf_id,
2610 .store = NULL,
2611};
2612
2613static struct attribute *attrs_hdc_and_sf[] = {
2614 &dev_attr_hdc_and_sf_id.attr,
2615 NULL,
2616};
2617
2618static struct attribute_group group_hdc_and_sf = {
2619 .name = "546c4c1d-99b8-42fb-a107-5aaabb5314a8",
2620 .attrs = attrs_hdc_and_sf,
2621};
2622
2623static ssize_t
2624show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2625{
2626 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2627}
2628
2629static struct device_attribute dev_attr_l3_1_id = {
2630 .attr = { .name = "id", .mode = 0444 },
2631 .show = show_l3_1_id,
2632 .store = NULL,
2633};
2634
2635static struct attribute *attrs_l3_1[] = {
2636 &dev_attr_l3_1_id.attr,
2637 NULL,
2638};
2639
2640static struct attribute_group group_l3_1 = {
2641 .name = "4e93d156-9b39-4268-8544-a8e0480806d7",
2642 .attrs = attrs_l3_1,
2643};
2644
2645static ssize_t
2646show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2647{
2648 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
2649}
2650
2651static struct device_attribute dev_attr_l3_2_id = {
2652 .attr = { .name = "id", .mode = 0444 },
2653 .show = show_l3_2_id,
2654 .store = NULL,
2655};
2656
2657static struct attribute *attrs_l3_2[] = {
2658 &dev_attr_l3_2_id.attr,
2659 NULL,
2660};
2661
2662static struct attribute_group group_l3_2 = {
2663 .name = "de1bec86-ca92-4b43-89fa-147653221cc0",
2664 .attrs = attrs_l3_2,
2665};
2666
2667static ssize_t
2668show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
2669{
2670 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
2671}
2672
2673static struct device_attribute dev_attr_l3_3_id = {
2674 .attr = { .name = "id", .mode = 0444 },
2675 .show = show_l3_3_id,
2676 .store = NULL,
2677};
2678
2679static struct attribute *attrs_l3_3[] = {
2680 &dev_attr_l3_3_id.attr,
2681 NULL,
2682};
2683
2684static struct attribute_group group_l3_3 = {
2685 .name = "e63537bb-10be-4d4a-92c4-c6b0c65e02ef",
2686 .attrs = attrs_l3_3,
2687};
2688
2689static ssize_t
2690show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2691{
2692 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2693}
2694
2695static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2696 .attr = { .name = "id", .mode = 0444 },
2697 .show = show_rasterizer_and_pixel_backend_id,
2698 .store = NULL,
2699};
2700
2701static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2702 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2703 NULL,
2704};
2705
2706static struct attribute_group group_rasterizer_and_pixel_backend = {
2707 .name = "7a03a9f8-ec5e-46bb-8b67-1f0ff1476281",
2708 .attrs = attrs_rasterizer_and_pixel_backend,
2709};
2710
2711static ssize_t
2712show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
2713{
2714 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
2715}
2716
2717static struct device_attribute dev_attr_sampler_id = {
2718 .attr = { .name = "id", .mode = 0444 },
2719 .show = show_sampler_id,
2720 .store = NULL,
2721};
2722
2723static struct attribute *attrs_sampler[] = {
2724 &dev_attr_sampler_id.attr,
2725 NULL,
2726};
2727
2728static struct attribute_group group_sampler = {
2729 .name = "b25d2ebf-a6e0-4b29-96be-a9b010edeeda",
2730 .attrs = attrs_sampler,
2731};
2732
2733static ssize_t
2734show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2735{
2736 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2737}
2738
2739static struct device_attribute dev_attr_tdl_1_id = {
2740 .attr = { .name = "id", .mode = 0444 },
2741 .show = show_tdl_1_id,
2742 .store = NULL,
2743};
2744
2745static struct attribute *attrs_tdl_1[] = {
2746 &dev_attr_tdl_1_id.attr,
2747 NULL,
2748};
2749
2750static struct attribute_group group_tdl_1 = {
2751 .name = "469a05e5-e299-46f7-9598-7b05f3c34991",
2752 .attrs = attrs_tdl_1,
2753};
2754
2755static ssize_t
2756show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2757{
2758 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2759}
2760
2761static struct device_attribute dev_attr_tdl_2_id = {
2762 .attr = { .name = "id", .mode = 0444 },
2763 .show = show_tdl_2_id,
2764 .store = NULL,
2765};
2766
2767static struct attribute *attrs_tdl_2[] = {
2768 &dev_attr_tdl_2_id.attr,
2769 NULL,
2770};
2771
2772static struct attribute_group group_tdl_2 = {
2773 .name = "52f925c6-786a-4ec6-86ce-cba85c83453a",
2774 .attrs = attrs_tdl_2,
2775};
2776
2777static ssize_t
2778show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
2779{
2780 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
2781}
2782
2783static struct device_attribute dev_attr_compute_extra_id = {
2784 .attr = { .name = "id", .mode = 0444 },
2785 .show = show_compute_extra_id,
2786 .store = NULL,
2787};
2788
2789static struct attribute *attrs_compute_extra[] = {
2790 &dev_attr_compute_extra_id.attr,
2791 NULL,
2792};
2793
2794static struct attribute_group group_compute_extra = {
2795 .name = "efc497ac-884e-4ee4-a4a8-15fba22aaf21",
2796 .attrs = attrs_compute_extra,
2797};
2798
2799static ssize_t
2800show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
2801{
2802 return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
2803}
2804
2805static struct device_attribute dev_attr_vme_pipe_id = {
2806 .attr = { .name = "id", .mode = 0444 },
2807 .show = show_vme_pipe_id,
2808 .store = NULL,
2809};
2810
2811static struct attribute *attrs_vme_pipe[] = {
2812 &dev_attr_vme_pipe_id.attr,
2813 NULL,
2814};
2815
2816static struct attribute_group group_vme_pipe = {
2817 .name = "bfd9764d-2c5b-4c16-bfc1-89de3ca10917",
2818 .attrs = attrs_vme_pipe,
2819};
2820
2821static ssize_t
2822show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2823{
2824 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2825}
2826
2827static struct device_attribute dev_attr_test_oa_id = {
2828 .attr = { .name = "id", .mode = 0444 },
2829 .show = show_test_oa_id,
2830 .store = NULL,
2831};
2832
2833static struct attribute *attrs_test_oa[] = {
2834 &dev_attr_test_oa_id.attr,
2835 NULL,
2836};
2837
2838static struct attribute_group group_test_oa = {
2839 .name = "f1792f32-6db2-4b50-b4b2-557128f1688d",
2840 .attrs = attrs_test_oa,
2841};
2842
2843int
2844i915_perf_register_sysfs_kblgt3(struct drm_i915_private *dev_priv)
2845{
2846 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2847 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2848 int ret = 0;
2849
2850 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2851 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2852 if (ret)
2853 goto error_render_basic;
2854 }
2855 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2856 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2857 if (ret)
2858 goto error_compute_basic;
2859 }
2860 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2861 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2862 if (ret)
2863 goto error_render_pipe_profile;
2864 }
2865 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
2866 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2867 if (ret)
2868 goto error_memory_reads;
2869 }
2870 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
2871 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2872 if (ret)
2873 goto error_memory_writes;
2874 }
2875 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
2876 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2877 if (ret)
2878 goto error_compute_extended;
2879 }
2880 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
2881 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2882 if (ret)
2883 goto error_compute_l3_cache;
2884 }
2885 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2886 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2887 if (ret)
2888 goto error_hdc_and_sf;
2889 }
2890 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2891 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2892 if (ret)
2893 goto error_l3_1;
2894 }
2895 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2896 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2897 if (ret)
2898 goto error_l3_2;
2899 }
2900 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
2901 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2902 if (ret)
2903 goto error_l3_3;
2904 }
2905 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2906 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2907 if (ret)
2908 goto error_rasterizer_and_pixel_backend;
2909 }
2910 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
2911 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
2912 if (ret)
2913 goto error_sampler;
2914 }
2915 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2916 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2917 if (ret)
2918 goto error_tdl_1;
2919 }
2920 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2921 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2922 if (ret)
2923 goto error_tdl_2;
2924 }
2925 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
2926 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2927 if (ret)
2928 goto error_compute_extra;
2929 }
2930 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
2931 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2932 if (ret)
2933 goto error_vme_pipe;
2934 }
2935 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2936 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2937 if (ret)
2938 goto error_test_oa;
2939 }
2940
2941 return 0;
2942
2943error_test_oa:
2944 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
2945 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2946error_vme_pipe:
2947 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2948 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2949error_compute_extra:
2950 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2951 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2952error_tdl_2:
2953 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2954 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2955error_tdl_1:
2956 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2957 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2958error_sampler:
2959 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2960 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2961error_rasterizer_and_pixel_backend:
2962 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
2963 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2964error_l3_3:
2965 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
2966 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2967error_l3_2:
2968 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2969 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2970error_l3_1:
2971 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2972 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2973error_hdc_and_sf:
2974 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2975 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2976error_compute_l3_cache:
2977 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2978 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2979error_compute_extended:
2980 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2981 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2982error_memory_writes:
2983 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2984 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2985error_memory_reads:
2986 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2987 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2988error_render_pipe_profile:
2989 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2990 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2991error_compute_basic:
2992 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2993 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2994error_render_basic:
2995 return ret;
2996}
2997
2998void
2999i915_perf_unregister_sysfs_kblgt3(struct drm_i915_private *dev_priv)
3000{
3001 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
3002 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
3003
3004 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
3005 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3006 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
3007 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3008 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
3009 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3010 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
3011 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3012 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
3013 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3014 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
3015 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3016 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
3017 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3018 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
3019 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3020 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
3021 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3022 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
3023 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3024 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
3025 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3026 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
3027 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3028 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
3029 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
3030 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
3031 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3032 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
3033 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3034 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
3035 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3036 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
3037 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
3038 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
3039 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
3040}
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
new file mode 100644
index 000000000000..b0ca7f3114d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_KBLGT3_H__
30#define __I915_OA_KBLGT3_H__
31
32extern int i915_oa_n_builtin_metric_sets_kblgt3;
33
34extern int i915_oa_select_metric_set_kblgt3(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_kblgt3(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_kblgt3(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
new file mode 100644
index 000000000000..1268beda212c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -0,0 +1,3479 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_sklgt2.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_L3_2,
45 METRIC_SET_ID_L3_3,
46 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
47 METRIC_SET_ID_SAMPLER,
48 METRIC_SET_ID_TDL_1,
49 METRIC_SET_ID_TDL_2,
50 METRIC_SET_ID_COMPUTE_EXTRA,
51 METRIC_SET_ID_VME_PIPE,
52 METRIC_SET_ID_TEST_OA,
53};
54
55int i915_oa_n_builtin_metric_sets_sklgt2 = 18;
56
57static const struct i915_oa_reg b_counter_config_render_basic[] = {
58 { _MMIO(0x2710), 0x00000000 },
59 { _MMIO(0x2714), 0x00800000 },
60 { _MMIO(0x2720), 0x00000000 },
61 { _MMIO(0x2724), 0x00800000 },
62 { _MMIO(0x2740), 0x00000000 },
63};
64
65static const struct i915_oa_reg flex_eu_config_render_basic[] = {
66 { _MMIO(0xe458), 0x00005004 },
67 { _MMIO(0xe558), 0x00010003 },
68 { _MMIO(0xe658), 0x00012011 },
69 { _MMIO(0xe758), 0x00015014 },
70 { _MMIO(0xe45c), 0x00051050 },
71 { _MMIO(0xe55c), 0x00053052 },
72 { _MMIO(0xe65c), 0x00055054 },
73};
74
75static const struct i915_oa_reg mux_config_render_basic_1_sku_gte_0x02[] = {
76 { _MMIO(0x9888), 0x166c01e0 },
77 { _MMIO(0x9888), 0x12170280 },
78 { _MMIO(0x9888), 0x12370280 },
79 { _MMIO(0x9888), 0x11930317 },
80 { _MMIO(0x9888), 0x159303df },
81 { _MMIO(0x9888), 0x3f900003 },
82 { _MMIO(0x9888), 0x1a4e0080 },
83 { _MMIO(0x9888), 0x0a6c0053 },
84 { _MMIO(0x9888), 0x106c0000 },
85 { _MMIO(0x9888), 0x1c6c0000 },
86 { _MMIO(0x9888), 0x0a1b4000 },
87 { _MMIO(0x9888), 0x1c1c0001 },
88 { _MMIO(0x9888), 0x002f1000 },
89 { _MMIO(0x9888), 0x042f1000 },
90 { _MMIO(0x9888), 0x004c4000 },
91 { _MMIO(0x9888), 0x0a4c8400 },
92 { _MMIO(0x9888), 0x000d2000 },
93 { _MMIO(0x9888), 0x060d8000 },
94 { _MMIO(0x9888), 0x080da000 },
95 { _MMIO(0x9888), 0x0a0d2000 },
96 { _MMIO(0x9888), 0x0c0f0400 },
97 { _MMIO(0x9888), 0x0e0f6600 },
98 { _MMIO(0x9888), 0x002c8000 },
99 { _MMIO(0x9888), 0x162c2200 },
100 { _MMIO(0x9888), 0x062d8000 },
101 { _MMIO(0x9888), 0x082d8000 },
102 { _MMIO(0x9888), 0x00133000 },
103 { _MMIO(0x9888), 0x08133000 },
104 { _MMIO(0x9888), 0x00170020 },
105 { _MMIO(0x9888), 0x08170021 },
106 { _MMIO(0x9888), 0x10170000 },
107 { _MMIO(0x9888), 0x0633c000 },
108 { _MMIO(0x9888), 0x0833c000 },
109 { _MMIO(0x9888), 0x06370800 },
110 { _MMIO(0x9888), 0x08370840 },
111 { _MMIO(0x9888), 0x10370000 },
112 { _MMIO(0x9888), 0x0d933031 },
113 { _MMIO(0x9888), 0x0f933e3f },
114 { _MMIO(0x9888), 0x01933d00 },
115 { _MMIO(0x9888), 0x0393073c },
116 { _MMIO(0x9888), 0x0593000e },
117 { _MMIO(0x9888), 0x1d930000 },
118 { _MMIO(0x9888), 0x19930000 },
119 { _MMIO(0x9888), 0x1b930000 },
120 { _MMIO(0x9888), 0x1d900157 },
121 { _MMIO(0x9888), 0x1f900158 },
122 { _MMIO(0x9888), 0x35900000 },
123 { _MMIO(0x9888), 0x2b908000 },
124 { _MMIO(0x9888), 0x2d908000 },
125 { _MMIO(0x9888), 0x2f908000 },
126 { _MMIO(0x9888), 0x31908000 },
127 { _MMIO(0x9888), 0x15908000 },
128 { _MMIO(0x9888), 0x17908000 },
129 { _MMIO(0x9888), 0x19908000 },
130 { _MMIO(0x9888), 0x1b908000 },
131 { _MMIO(0x9888), 0x1190001f },
132 { _MMIO(0x9888), 0x51904400 },
133 { _MMIO(0x9888), 0x41900020 },
134 { _MMIO(0x9888), 0x55900000 },
135 { _MMIO(0x9888), 0x45900c21 },
136 { _MMIO(0x9888), 0x47900061 },
137 { _MMIO(0x9888), 0x57904440 },
138 { _MMIO(0x9888), 0x49900000 },
139 { _MMIO(0x9888), 0x37900000 },
140 { _MMIO(0x9888), 0x33900000 },
141 { _MMIO(0x9888), 0x4b900000 },
142 { _MMIO(0x9888), 0x59900004 },
143 { _MMIO(0x9888), 0x43900000 },
144 { _MMIO(0x9888), 0x53904444 },
145};
146
147static int
148get_render_basic_mux_config(struct drm_i915_private *dev_priv,
149 const struct i915_oa_reg **regs,
150 int *lens)
151{
152 int n = 0;
153
154 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
155 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
156
157 if (dev_priv->drm.pdev->revision >= 0x02) {
158 regs[n] = mux_config_render_basic_1_sku_gte_0x02;
159 lens[n] = ARRAY_SIZE(mux_config_render_basic_1_sku_gte_0x02);
160 n++;
161 }
162
163 return n;
164}
165
166static const struct i915_oa_reg b_counter_config_compute_basic[] = {
167 { _MMIO(0x2710), 0x00000000 },
168 { _MMIO(0x2714), 0x00800000 },
169 { _MMIO(0x2720), 0x00000000 },
170 { _MMIO(0x2724), 0x00800000 },
171 { _MMIO(0x2740), 0x00000000 },
172};
173
174static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
175 { _MMIO(0xe458), 0x00005004 },
176 { _MMIO(0xe558), 0x00000003 },
177 { _MMIO(0xe658), 0x00002001 },
178 { _MMIO(0xe758), 0x00778008 },
179 { _MMIO(0xe45c), 0x00088078 },
180 { _MMIO(0xe55c), 0x00808708 },
181 { _MMIO(0xe65c), 0x00a08908 },
182};
183
184static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01_and_sku_lt_0x02[] = {
185 { _MMIO(0x9888), 0x104f00e0 },
186 { _MMIO(0x9888), 0x124f1c00 },
187 { _MMIO(0x9888), 0x106c00e0 },
188 { _MMIO(0x9888), 0x37906800 },
189 { _MMIO(0x9888), 0x3f901403 },
190 { _MMIO(0x9888), 0x184e8000 },
191 { _MMIO(0x9888), 0x1a4e8200 },
192 { _MMIO(0x9888), 0x044e8000 },
193 { _MMIO(0x9888), 0x004f0db2 },
194 { _MMIO(0x9888), 0x064f0900 },
195 { _MMIO(0x9888), 0x084f1880 },
196 { _MMIO(0x9888), 0x0a4f0011 },
197 { _MMIO(0x9888), 0x0c4f0e3c },
198 { _MMIO(0x9888), 0x0e4f1d80 },
199 { _MMIO(0x9888), 0x086c0002 },
200 { _MMIO(0x9888), 0x0a6c0100 },
201 { _MMIO(0x9888), 0x0e6c000c },
202 { _MMIO(0x9888), 0x026c000b },
203 { _MMIO(0x9888), 0x1c6c0000 },
204 { _MMIO(0x9888), 0x1a6c0000 },
205 { _MMIO(0x9888), 0x081b4000 },
206 { _MMIO(0x9888), 0x0a1b8000 },
207 { _MMIO(0x9888), 0x0e1b4000 },
208 { _MMIO(0x9888), 0x021b4000 },
209 { _MMIO(0x9888), 0x1a1c4000 },
210 { _MMIO(0x9888), 0x1c1c0012 },
211 { _MMIO(0x9888), 0x141c8000 },
212 { _MMIO(0x9888), 0x005bc000 },
213 { _MMIO(0x9888), 0x065b8000 },
214 { _MMIO(0x9888), 0x085b8000 },
215 { _MMIO(0x9888), 0x0a5b4000 },
216 { _MMIO(0x9888), 0x0c5bc000 },
217 { _MMIO(0x9888), 0x0e5b8000 },
218 { _MMIO(0x9888), 0x105c8000 },
219 { _MMIO(0x9888), 0x1a5ca000 },
220 { _MMIO(0x9888), 0x1c5c002d },
221 { _MMIO(0x9888), 0x125c8000 },
222 { _MMIO(0x9888), 0x0a4c0800 },
223 { _MMIO(0x9888), 0x0c4c0082 },
224 { _MMIO(0x9888), 0x084c8000 },
225 { _MMIO(0x9888), 0x000da000 },
226 { _MMIO(0x9888), 0x060d8000 },
227 { _MMIO(0x9888), 0x080da000 },
228 { _MMIO(0x9888), 0x0a0da000 },
229 { _MMIO(0x9888), 0x0c0da000 },
230 { _MMIO(0x9888), 0x0e0da000 },
231 { _MMIO(0x9888), 0x020d2000 },
232 { _MMIO(0x9888), 0x0c0f5400 },
233 { _MMIO(0x9888), 0x0e0f5500 },
234 { _MMIO(0x9888), 0x100f0155 },
235 { _MMIO(0x9888), 0x002cc000 },
236 { _MMIO(0x9888), 0x0e2cc000 },
237 { _MMIO(0x9888), 0x162cbe00 },
238 { _MMIO(0x9888), 0x182c00ef },
239 { _MMIO(0x9888), 0x022cc000 },
240 { _MMIO(0x9888), 0x042c8000 },
241 { _MMIO(0x9888), 0x19900157 },
242 { _MMIO(0x9888), 0x1b900167 },
243 { _MMIO(0x9888), 0x1d900105 },
244 { _MMIO(0x9888), 0x1f900103 },
245 { _MMIO(0x9888), 0x35900000 },
246 { _MMIO(0xd28), 0x00000000 },
247 { _MMIO(0x9888), 0x11900fff },
248 { _MMIO(0x9888), 0x51900000 },
249 { _MMIO(0x9888), 0x41900840 },
250 { _MMIO(0x9888), 0x55900000 },
251 { _MMIO(0x9888), 0x45900842 },
252 { _MMIO(0x9888), 0x47900840 },
253 { _MMIO(0x9888), 0x57900000 },
254 { _MMIO(0x9888), 0x49900840 },
255 { _MMIO(0x9888), 0x33900000 },
256 { _MMIO(0x9888), 0x4b900040 },
257 { _MMIO(0x9888), 0x59900000 },
258 { _MMIO(0x9888), 0x43900840 },
259 { _MMIO(0x9888), 0x53901111 },
260};
261
262static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01_and_sku_gte_0x02[] = {
263 { _MMIO(0x9888), 0x104f00e0 },
264 { _MMIO(0x9888), 0x124f1c00 },
265 { _MMIO(0x9888), 0x106c00e0 },
266 { _MMIO(0x9888), 0x37906800 },
267 { _MMIO(0x9888), 0x3f901403 },
268 { _MMIO(0x9888), 0x004e8000 },
269 { _MMIO(0x9888), 0x1a4e0820 },
270 { _MMIO(0x9888), 0x1c4e0002 },
271 { _MMIO(0x9888), 0x064f0900 },
272 { _MMIO(0x9888), 0x084f0032 },
273 { _MMIO(0x9888), 0x0a4f1810 },
274 { _MMIO(0x9888), 0x0c4f0e00 },
275 { _MMIO(0x9888), 0x0e4f003c },
276 { _MMIO(0x9888), 0x004f0d80 },
277 { _MMIO(0x9888), 0x024f003b },
278 { _MMIO(0x9888), 0x006c0002 },
279 { _MMIO(0x9888), 0x086c0000 },
280 { _MMIO(0x9888), 0x0c6c000c },
281 { _MMIO(0x9888), 0x0e6c0b00 },
282 { _MMIO(0x9888), 0x186c0000 },
283 { _MMIO(0x9888), 0x1c6c0000 },
284 { _MMIO(0x9888), 0x1e6c0000 },
285 { _MMIO(0x9888), 0x001b4000 },
286 { _MMIO(0x9888), 0x081b8000 },
287 { _MMIO(0x9888), 0x0c1b4000 },
288 { _MMIO(0x9888), 0x0e1b8000 },
289 { _MMIO(0x9888), 0x101c8000 },
290 { _MMIO(0x9888), 0x1a1c8000 },
291 { _MMIO(0x9888), 0x1c1c0024 },
292 { _MMIO(0x9888), 0x065b8000 },
293 { _MMIO(0x9888), 0x085b4000 },
294 { _MMIO(0x9888), 0x0a5bc000 },
295 { _MMIO(0x9888), 0x0c5b8000 },
296 { _MMIO(0x9888), 0x0e5b4000 },
297 { _MMIO(0x9888), 0x005b8000 },
298 { _MMIO(0x9888), 0x025b4000 },
299 { _MMIO(0x9888), 0x1a5c6000 },
300 { _MMIO(0x9888), 0x1c5c001b },
301 { _MMIO(0x9888), 0x125c8000 },
302 { _MMIO(0x9888), 0x145c8000 },
303 { _MMIO(0x9888), 0x004c8000 },
304 { _MMIO(0x9888), 0x0a4c2000 },
305 { _MMIO(0x9888), 0x0c4c0208 },
306 { _MMIO(0x9888), 0x000da000 },
307 { _MMIO(0x9888), 0x060d8000 },
308 { _MMIO(0x9888), 0x080da000 },
309 { _MMIO(0x9888), 0x0a0da000 },
310 { _MMIO(0x9888), 0x0c0da000 },
311 { _MMIO(0x9888), 0x0e0da000 },
312 { _MMIO(0x9888), 0x020d2000 },
313 { _MMIO(0x9888), 0x0c0f5400 },
314 { _MMIO(0x9888), 0x0e0f5500 },
315 { _MMIO(0x9888), 0x100f0155 },
316 { _MMIO(0x9888), 0x002c8000 },
317 { _MMIO(0x9888), 0x0e2cc000 },
318 { _MMIO(0x9888), 0x162cfb00 },
319 { _MMIO(0x9888), 0x182c00be },
320 { _MMIO(0x9888), 0x022cc000 },
321 { _MMIO(0x9888), 0x042cc000 },
322 { _MMIO(0x9888), 0x19900157 },
323 { _MMIO(0x9888), 0x1b900167 },
324 { _MMIO(0x9888), 0x1d900105 },
325 { _MMIO(0x9888), 0x1f900103 },
326 { _MMIO(0x9888), 0x35900000 },
327 { _MMIO(0x9888), 0x11900fff },
328 { _MMIO(0x9888), 0x51900000 },
329 { _MMIO(0x9888), 0x41900800 },
330 { _MMIO(0x9888), 0x55900000 },
331 { _MMIO(0x9888), 0x45900842 },
332 { _MMIO(0x9888), 0x47900802 },
333 { _MMIO(0x9888), 0x57900000 },
334 { _MMIO(0x9888), 0x49900802 },
335 { _MMIO(0x9888), 0x33900000 },
336 { _MMIO(0x9888), 0x4b900002 },
337 { _MMIO(0x9888), 0x59900000 },
338 { _MMIO(0x9888), 0x43900842 },
339 { _MMIO(0x9888), 0x53901111 },
340};
341
342static int
343get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
344 const struct i915_oa_reg **regs,
345 int *lens)
346{
347 int n = 0;
348
349 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
350 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
351
352 if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
353 (dev_priv->drm.pdev->revision < 0x02)) {
354 regs[n] = mux_config_compute_basic_0_slices_0x01_and_sku_lt_0x02;
355 lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01_and_sku_lt_0x02);
356 n++;
357 }
358 if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
359 (dev_priv->drm.pdev->revision >= 0x02)) {
360 regs[n] = mux_config_compute_basic_0_slices_0x01_and_sku_gte_0x02;
361 lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01_and_sku_gte_0x02);
362 n++;
363 }
364
365 return n;
366}
367
368static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
369 { _MMIO(0x2724), 0xf0800000 },
370 { _MMIO(0x2720), 0x00000000 },
371 { _MMIO(0x2714), 0xf0800000 },
372 { _MMIO(0x2710), 0x00000000 },
373 { _MMIO(0x2740), 0x00000000 },
374 { _MMIO(0x2770), 0x0007ffea },
375 { _MMIO(0x2774), 0x00007ffc },
376 { _MMIO(0x2778), 0x0007affa },
377 { _MMIO(0x277c), 0x0000f5fd },
378 { _MMIO(0x2780), 0x00079ffa },
379 { _MMIO(0x2784), 0x0000f3fb },
380 { _MMIO(0x2788), 0x0007bf7a },
381 { _MMIO(0x278c), 0x0000f7e7 },
382 { _MMIO(0x2790), 0x0007fefa },
383 { _MMIO(0x2794), 0x0000f7cf },
384 { _MMIO(0x2798), 0x00077ffa },
385 { _MMIO(0x279c), 0x0000efdf },
386 { _MMIO(0x27a0), 0x0006fffa },
387 { _MMIO(0x27a4), 0x0000cfbf },
388 { _MMIO(0x27a8), 0x0003fffa },
389 { _MMIO(0x27ac), 0x00005f7f },
390};
391
392static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
393 { _MMIO(0xe458), 0x00005004 },
394 { _MMIO(0xe558), 0x00015014 },
395 { _MMIO(0xe658), 0x00025024 },
396 { _MMIO(0xe758), 0x00035034 },
397 { _MMIO(0xe45c), 0x00045044 },
398 { _MMIO(0xe55c), 0x00055054 },
399 { _MMIO(0xe65c), 0x00065064 },
400};
401
402static const struct i915_oa_reg mux_config_render_pipe_profile_0_sku_lt_0x02[] = {
403 { _MMIO(0x9888), 0x0c0e001f },
404 { _MMIO(0x9888), 0x0a0f0000 },
405 { _MMIO(0x9888), 0x10116800 },
406 { _MMIO(0x9888), 0x178a03e0 },
407 { _MMIO(0x9888), 0x11824c00 },
408 { _MMIO(0x9888), 0x11830020 },
409 { _MMIO(0x9888), 0x13840020 },
410 { _MMIO(0x9888), 0x11850019 },
411 { _MMIO(0x9888), 0x11860007 },
412 { _MMIO(0x9888), 0x01870c40 },
413 { _MMIO(0x9888), 0x17880000 },
414 { _MMIO(0x9888), 0x022f4000 },
415 { _MMIO(0x9888), 0x0a4c0040 },
416 { _MMIO(0x9888), 0x0c0d8000 },
417 { _MMIO(0x9888), 0x040d4000 },
418 { _MMIO(0x9888), 0x060d2000 },
419 { _MMIO(0x9888), 0x020e5400 },
420 { _MMIO(0x9888), 0x000e0000 },
421 { _MMIO(0x9888), 0x080f0040 },
422 { _MMIO(0x9888), 0x000f0000 },
423 { _MMIO(0x9888), 0x100f0000 },
424 { _MMIO(0x9888), 0x0e0f0040 },
425 { _MMIO(0x9888), 0x0c2c8000 },
426 { _MMIO(0x9888), 0x06104000 },
427 { _MMIO(0x9888), 0x06110012 },
428 { _MMIO(0x9888), 0x06131000 },
429 { _MMIO(0x9888), 0x01898000 },
430 { _MMIO(0x9888), 0x0d890100 },
431 { _MMIO(0x9888), 0x03898000 },
432 { _MMIO(0x9888), 0x09808000 },
433 { _MMIO(0x9888), 0x0b808000 },
434 { _MMIO(0x9888), 0x0380c000 },
435 { _MMIO(0x9888), 0x0f8a0075 },
436 { _MMIO(0x9888), 0x1d8a0000 },
437 { _MMIO(0x9888), 0x118a8000 },
438 { _MMIO(0x9888), 0x1b8a4000 },
439 { _MMIO(0x9888), 0x138a8000 },
440 { _MMIO(0x9888), 0x1d81a000 },
441 { _MMIO(0x9888), 0x15818000 },
442 { _MMIO(0x9888), 0x17818000 },
443 { _MMIO(0x9888), 0x0b820030 },
444 { _MMIO(0x9888), 0x07828000 },
445 { _MMIO(0x9888), 0x0d824000 },
446 { _MMIO(0x9888), 0x0f828000 },
447 { _MMIO(0x9888), 0x05824000 },
448 { _MMIO(0x9888), 0x0d830003 },
449 { _MMIO(0x9888), 0x0583000c },
450 { _MMIO(0x9888), 0x09830000 },
451 { _MMIO(0x9888), 0x03838000 },
452 { _MMIO(0x9888), 0x07838000 },
453 { _MMIO(0x9888), 0x0b840980 },
454 { _MMIO(0x9888), 0x03844d80 },
455 { _MMIO(0x9888), 0x11840000 },
456 { _MMIO(0x9888), 0x09848000 },
457 { _MMIO(0x9888), 0x09850080 },
458 { _MMIO(0x9888), 0x03850003 },
459 { _MMIO(0x9888), 0x01850000 },
460 { _MMIO(0x9888), 0x07860000 },
461 { _MMIO(0x9888), 0x0f860400 },
462 { _MMIO(0x9888), 0x09870032 },
463 { _MMIO(0x9888), 0x01888052 },
464 { _MMIO(0x9888), 0x11880000 },
465 { _MMIO(0x9888), 0x09884000 },
466 { _MMIO(0x9888), 0x15968000 },
467 { _MMIO(0x9888), 0x17968000 },
468 { _MMIO(0x9888), 0x0f96c000 },
469 { _MMIO(0x9888), 0x1f950011 },
470 { _MMIO(0x9888), 0x1d950014 },
471 { _MMIO(0x9888), 0x0592c000 },
472 { _MMIO(0x9888), 0x0b928000 },
473 { _MMIO(0x9888), 0x0d924000 },
474 { _MMIO(0x9888), 0x0f924000 },
475 { _MMIO(0x9888), 0x11928000 },
476 { _MMIO(0x9888), 0x1392c000 },
477 { _MMIO(0x9888), 0x09924000 },
478 { _MMIO(0x9888), 0x01985000 },
479 { _MMIO(0x9888), 0x07988000 },
480 { _MMIO(0x9888), 0x09981000 },
481 { _MMIO(0x9888), 0x0b982000 },
482 { _MMIO(0x9888), 0x0d982000 },
483 { _MMIO(0x9888), 0x0f989000 },
484 { _MMIO(0x9888), 0x05982000 },
485 { _MMIO(0x9888), 0x13904000 },
486 { _MMIO(0x9888), 0x21904000 },
487 { _MMIO(0x9888), 0x23904000 },
488 { _MMIO(0x9888), 0x25908000 },
489 { _MMIO(0x9888), 0x27904000 },
490 { _MMIO(0x9888), 0x29908000 },
491 { _MMIO(0x9888), 0x2b904000 },
492 { _MMIO(0x9888), 0x2f904000 },
493 { _MMIO(0x9888), 0x31904000 },
494 { _MMIO(0x9888), 0x15904000 },
495 { _MMIO(0x9888), 0x17908000 },
496 { _MMIO(0x9888), 0x19908000 },
497 { _MMIO(0x9888), 0x1b904000 },
498 { _MMIO(0x9888), 0x0b978000 },
499 { _MMIO(0x9888), 0x0f974000 },
500 { _MMIO(0x9888), 0x11974000 },
501 { _MMIO(0x9888), 0x13978000 },
502 { _MMIO(0x9888), 0x09974000 },
503 { _MMIO(0xd28), 0x00000000 },
504 { _MMIO(0x9888), 0x1190c080 },
505 { _MMIO(0x9888), 0x51900000 },
506 { _MMIO(0x9888), 0x419010a0 },
507 { _MMIO(0x9888), 0x55904000 },
508 { _MMIO(0x9888), 0x45901000 },
509 { _MMIO(0x9888), 0x47900084 },
510 { _MMIO(0x9888), 0x57904400 },
511 { _MMIO(0x9888), 0x499000a5 },
512 { _MMIO(0x9888), 0x37900000 },
513 { _MMIO(0x9888), 0x33900000 },
514 { _MMIO(0x9888), 0x4b900081 },
515 { _MMIO(0x9888), 0x59900000 },
516 { _MMIO(0x9888), 0x439014a4 },
517 { _MMIO(0x9888), 0x53900400 },
518};
519
520static const struct i915_oa_reg mux_config_render_pipe_profile_0_sku_gte_0x02[] = {
521 { _MMIO(0x9888), 0x0c0e001f },
522 { _MMIO(0x9888), 0x0a0f0000 },
523 { _MMIO(0x9888), 0x10116800 },
524 { _MMIO(0x9888), 0x178a03e0 },
525 { _MMIO(0x9888), 0x11824c00 },
526 { _MMIO(0x9888), 0x11830020 },
527 { _MMIO(0x9888), 0x13840020 },
528 { _MMIO(0x9888), 0x11850019 },
529 { _MMIO(0x9888), 0x11860007 },
530 { _MMIO(0x9888), 0x01870c40 },
531 { _MMIO(0x9888), 0x17880000 },
532 { _MMIO(0x9888), 0x022f4000 },
533 { _MMIO(0x9888), 0x0a4c0040 },
534 { _MMIO(0x9888), 0x0c0d8000 },
535 { _MMIO(0x9888), 0x040d4000 },
536 { _MMIO(0x9888), 0x060d2000 },
537 { _MMIO(0x9888), 0x020e5400 },
538 { _MMIO(0x9888), 0x000e0000 },
539 { _MMIO(0x9888), 0x080f0040 },
540 { _MMIO(0x9888), 0x000f0000 },
541 { _MMIO(0x9888), 0x100f0000 },
542 { _MMIO(0x9888), 0x0e0f0040 },
543 { _MMIO(0x9888), 0x0c2c8000 },
544 { _MMIO(0x9888), 0x06104000 },
545 { _MMIO(0x9888), 0x06110012 },
546 { _MMIO(0x9888), 0x06131000 },
547 { _MMIO(0x9888), 0x01898000 },
548 { _MMIO(0x9888), 0x0d890100 },
549 { _MMIO(0x9888), 0x03898000 },
550 { _MMIO(0x9888), 0x09808000 },
551 { _MMIO(0x9888), 0x0b808000 },
552 { _MMIO(0x9888), 0x0380c000 },
553 { _MMIO(0x9888), 0x0f8a0075 },
554 { _MMIO(0x9888), 0x1d8a0000 },
555 { _MMIO(0x9888), 0x118a8000 },
556 { _MMIO(0x9888), 0x1b8a4000 },
557 { _MMIO(0x9888), 0x138a8000 },
558 { _MMIO(0x9888), 0x1d81a000 },
559 { _MMIO(0x9888), 0x15818000 },
560 { _MMIO(0x9888), 0x17818000 },
561 { _MMIO(0x9888), 0x0b820030 },
562 { _MMIO(0x9888), 0x07828000 },
563 { _MMIO(0x9888), 0x0d824000 },
564 { _MMIO(0x9888), 0x0f828000 },
565 { _MMIO(0x9888), 0x05824000 },
566 { _MMIO(0x9888), 0x0d830003 },
567 { _MMIO(0x9888), 0x0583000c },
568 { _MMIO(0x9888), 0x09830000 },
569 { _MMIO(0x9888), 0x03838000 },
570 { _MMIO(0x9888), 0x07838000 },
571 { _MMIO(0x9888), 0x0b840980 },
572 { _MMIO(0x9888), 0x03844d80 },
573 { _MMIO(0x9888), 0x11840000 },
574 { _MMIO(0x9888), 0x09848000 },
575 { _MMIO(0x9888), 0x09850080 },
576 { _MMIO(0x9888), 0x03850003 },
577 { _MMIO(0x9888), 0x01850000 },
578 { _MMIO(0x9888), 0x07860000 },
579 { _MMIO(0x9888), 0x0f860400 },
580 { _MMIO(0x9888), 0x09870032 },
581 { _MMIO(0x9888), 0x01888052 },
582 { _MMIO(0x9888), 0x11880000 },
583 { _MMIO(0x9888), 0x09884000 },
584 { _MMIO(0x9888), 0x1b931001 },
585 { _MMIO(0x9888), 0x1d930001 },
586 { _MMIO(0x9888), 0x19934000 },
587 { _MMIO(0x9888), 0x1b958000 },
588 { _MMIO(0x9888), 0x1d950094 },
589 { _MMIO(0x9888), 0x19958000 },
590 { _MMIO(0x9888), 0x05e5a000 },
591 { _MMIO(0x9888), 0x01e5c000 },
592 { _MMIO(0x9888), 0x0592c000 },
593 { _MMIO(0x9888), 0x0b928000 },
594 { _MMIO(0x9888), 0x0d924000 },
595 { _MMIO(0x9888), 0x0f924000 },
596 { _MMIO(0x9888), 0x11928000 },
597 { _MMIO(0x9888), 0x1392c000 },
598 { _MMIO(0x9888), 0x09924000 },
599 { _MMIO(0x9888), 0x01985000 },
600 { _MMIO(0x9888), 0x07988000 },
601 { _MMIO(0x9888), 0x09981000 },
602 { _MMIO(0x9888), 0x0b982000 },
603 { _MMIO(0x9888), 0x0d982000 },
604 { _MMIO(0x9888), 0x0f989000 },
605 { _MMIO(0x9888), 0x05982000 },
606 { _MMIO(0x9888), 0x13904000 },
607 { _MMIO(0x9888), 0x21904000 },
608 { _MMIO(0x9888), 0x23904000 },
609 { _MMIO(0x9888), 0x25908000 },
610 { _MMIO(0x9888), 0x27904000 },
611 { _MMIO(0x9888), 0x29908000 },
612 { _MMIO(0x9888), 0x2b904000 },
613 { _MMIO(0x9888), 0x2f904000 },
614 { _MMIO(0x9888), 0x31904000 },
615 { _MMIO(0x9888), 0x15904000 },
616 { _MMIO(0x9888), 0x17908000 },
617 { _MMIO(0x9888), 0x19908000 },
618 { _MMIO(0x9888), 0x1b904000 },
619 { _MMIO(0x9888), 0x1190c080 },
620 { _MMIO(0x9888), 0x51900000 },
621 { _MMIO(0x9888), 0x419010a0 },
622 { _MMIO(0x9888), 0x55904000 },
623 { _MMIO(0x9888), 0x45901000 },
624 { _MMIO(0x9888), 0x47900084 },
625 { _MMIO(0x9888), 0x57904400 },
626 { _MMIO(0x9888), 0x499000a5 },
627 { _MMIO(0x9888), 0x37900000 },
628 { _MMIO(0x9888), 0x33900000 },
629 { _MMIO(0x9888), 0x4b900081 },
630 { _MMIO(0x9888), 0x59900000 },
631 { _MMIO(0x9888), 0x439014a4 },
632 { _MMIO(0x9888), 0x53900400 },
633};
634
635static int
636get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
637 const struct i915_oa_reg **regs,
638 int *lens)
639{
640 int n = 0;
641
642 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
643 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
644
645 if (dev_priv->drm.pdev->revision < 0x02) {
646 regs[n] = mux_config_render_pipe_profile_0_sku_lt_0x02;
647 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile_0_sku_lt_0x02);
648 n++;
649 }
650 if (dev_priv->drm.pdev->revision >= 0x02) {
651 regs[n] = mux_config_render_pipe_profile_0_sku_gte_0x02;
652 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile_0_sku_gte_0x02);
653 n++;
654 }
655
656 return n;
657}
658
659static const struct i915_oa_reg b_counter_config_memory_reads[] = {
660 { _MMIO(0x272c), 0xffffffff },
661 { _MMIO(0x2728), 0xffffffff },
662 { _MMIO(0x2724), 0xf0800000 },
663 { _MMIO(0x2720), 0x00000000 },
664 { _MMIO(0x271c), 0xffffffff },
665 { _MMIO(0x2718), 0xffffffff },
666 { _MMIO(0x2714), 0xf0800000 },
667 { _MMIO(0x2710), 0x00000000 },
668 { _MMIO(0x274c), 0x86543210 },
669 { _MMIO(0x2748), 0x86543210 },
670 { _MMIO(0x2744), 0x00006667 },
671 { _MMIO(0x2740), 0x00000000 },
672 { _MMIO(0x275c), 0x86543210 },
673 { _MMIO(0x2758), 0x86543210 },
674 { _MMIO(0x2754), 0x00006465 },
675 { _MMIO(0x2750), 0x00000000 },
676 { _MMIO(0x2770), 0x0007f81a },
677 { _MMIO(0x2774), 0x0000fe00 },
678 { _MMIO(0x2778), 0x0007f82a },
679 { _MMIO(0x277c), 0x0000fe00 },
680 { _MMIO(0x2780), 0x0007f872 },
681 { _MMIO(0x2784), 0x0000fe00 },
682 { _MMIO(0x2788), 0x0007f8ba },
683 { _MMIO(0x278c), 0x0000fe00 },
684 { _MMIO(0x2790), 0x0007f87a },
685 { _MMIO(0x2794), 0x0000fe00 },
686 { _MMIO(0x2798), 0x0007f8ea },
687 { _MMIO(0x279c), 0x0000fe00 },
688 { _MMIO(0x27a0), 0x0007f8e2 },
689 { _MMIO(0x27a4), 0x0000fe00 },
690 { _MMIO(0x27a8), 0x0007f8f2 },
691 { _MMIO(0x27ac), 0x0000fe00 },
692};
693
694static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
695 { _MMIO(0xe458), 0x00005004 },
696 { _MMIO(0xe558), 0x00015014 },
697 { _MMIO(0xe658), 0x00025024 },
698 { _MMIO(0xe758), 0x00035034 },
699 { _MMIO(0xe45c), 0x00045044 },
700 { _MMIO(0xe55c), 0x00055054 },
701 { _MMIO(0xe65c), 0x00065064 },
702};
703
704static const struct i915_oa_reg mux_config_memory_reads_0_slices_0x01_and_sku_lt_0x02[] = {
705 { _MMIO(0x9888), 0x11810c00 },
706 { _MMIO(0x9888), 0x1381001a },
707 { _MMIO(0x9888), 0x13946000 },
708 { _MMIO(0x9888), 0x37906800 },
709 { _MMIO(0x9888), 0x3f900003 },
710 { _MMIO(0x9888), 0x03811300 },
711 { _MMIO(0x9888), 0x05811b12 },
712 { _MMIO(0x9888), 0x0781001a },
713 { _MMIO(0x9888), 0x1f810000 },
714 { _MMIO(0x9888), 0x17810000 },
715 { _MMIO(0x9888), 0x19810000 },
716 { _MMIO(0x9888), 0x1b810000 },
717 { _MMIO(0x9888), 0x1d810000 },
718 { _MMIO(0x9888), 0x0f968000 },
719 { _MMIO(0x9888), 0x1196c000 },
720 { _MMIO(0x9888), 0x13964000 },
721 { _MMIO(0x9888), 0x11938000 },
722 { _MMIO(0x9888), 0x1b93fe00 },
723 { _MMIO(0x9888), 0x01940010 },
724 { _MMIO(0x9888), 0x07941100 },
725 { _MMIO(0x9888), 0x09941312 },
726 { _MMIO(0x9888), 0x0b941514 },
727 { _MMIO(0x9888), 0x0d941716 },
728 { _MMIO(0x9888), 0x11940000 },
729 { _MMIO(0x9888), 0x19940000 },
730 { _MMIO(0x9888), 0x1b940000 },
731 { _MMIO(0x9888), 0x1d940000 },
732 { _MMIO(0x9888), 0x1b954000 },
733 { _MMIO(0x9888), 0x1d95a550 },
734 { _MMIO(0x9888), 0x1f9502aa },
735 { _MMIO(0x9888), 0x2f900157 },
736 { _MMIO(0x9888), 0x31900105 },
737 { _MMIO(0x9888), 0x15900103 },
738 { _MMIO(0x9888), 0x17900101 },
739 { _MMIO(0x9888), 0x35900000 },
740 { _MMIO(0x9888), 0x13908000 },
741 { _MMIO(0x9888), 0x21908000 },
742 { _MMIO(0x9888), 0x23908000 },
743 { _MMIO(0x9888), 0x25908000 },
744 { _MMIO(0x9888), 0x27908000 },
745 { _MMIO(0x9888), 0x29908000 },
746 { _MMIO(0x9888), 0x2b908000 },
747 { _MMIO(0x9888), 0x2d908000 },
748 { _MMIO(0x9888), 0x19908000 },
749 { _MMIO(0x9888), 0x1b908000 },
750 { _MMIO(0x9888), 0x1d908000 },
751 { _MMIO(0x9888), 0x1f908000 },
752 { _MMIO(0xd28), 0x00000000 },
753 { _MMIO(0x9888), 0x11900000 },
754 { _MMIO(0x9888), 0x51900000 },
755 { _MMIO(0x9888), 0x41900c00 },
756 { _MMIO(0x9888), 0x55900000 },
757 { _MMIO(0x9888), 0x45900000 },
758 { _MMIO(0x9888), 0x47900000 },
759 { _MMIO(0x9888), 0x57900000 },
760 { _MMIO(0x9888), 0x49900000 },
761 { _MMIO(0x9888), 0x33900000 },
762 { _MMIO(0x9888), 0x4b900063 },
763 { _MMIO(0x9888), 0x59900000 },
764 { _MMIO(0x9888), 0x43900003 },
765 { _MMIO(0x9888), 0x53900000 },
766};
767
768static const struct i915_oa_reg mux_config_memory_reads_0_sku_lt_0x05_and_sku_gte_0x02[] = {
769 { _MMIO(0x9888), 0x11810c00 },
770 { _MMIO(0x9888), 0x1381001a },
771 { _MMIO(0x9888), 0x13946000 },
772 { _MMIO(0x9888), 0x15940016 },
773 { _MMIO(0x9888), 0x37906800 },
774 { _MMIO(0x9888), 0x03811300 },
775 { _MMIO(0x9888), 0x05811b12 },
776 { _MMIO(0x9888), 0x0781001a },
777 { _MMIO(0x9888), 0x1f810000 },
778 { _MMIO(0x9888), 0x17810000 },
779 { _MMIO(0x9888), 0x19810000 },
780 { _MMIO(0x9888), 0x1b810000 },
781 { _MMIO(0x9888), 0x1d810000 },
782 { _MMIO(0x9888), 0x19930800 },
783 { _MMIO(0x9888), 0x1b93aa55 },
784 { _MMIO(0x9888), 0x1d9300aa },
785 { _MMIO(0x9888), 0x01940010 },
786 { _MMIO(0x9888), 0x07941100 },
787 { _MMIO(0x9888), 0x09941312 },
788 { _MMIO(0x9888), 0x0b941514 },
789 { _MMIO(0x9888), 0x0d941716 },
790 { _MMIO(0x9888), 0x0f940018 },
791 { _MMIO(0x9888), 0x1b940000 },
792 { _MMIO(0x9888), 0x11940000 },
793 { _MMIO(0x9888), 0x01e58000 },
794 { _MMIO(0x9888), 0x03e57000 },
795 { _MMIO(0x9888), 0x31900105 },
796 { _MMIO(0x9888), 0x15900103 },
797 { _MMIO(0x9888), 0x17900101 },
798 { _MMIO(0x9888), 0x35900000 },
799 { _MMIO(0x9888), 0x13908000 },
800 { _MMIO(0x9888), 0x21908000 },
801 { _MMIO(0x9888), 0x23908000 },
802 { _MMIO(0x9888), 0x25908000 },
803 { _MMIO(0x9888), 0x27908000 },
804 { _MMIO(0x9888), 0x29908000 },
805 { _MMIO(0x9888), 0x2b908000 },
806 { _MMIO(0x9888), 0x2d908000 },
807 { _MMIO(0x9888), 0x2f908000 },
808 { _MMIO(0x9888), 0x19908000 },
809 { _MMIO(0x9888), 0x1b908000 },
810 { _MMIO(0x9888), 0x1d908000 },
811 { _MMIO(0x9888), 0x1f908000 },
812 { _MMIO(0x9888), 0x11900000 },
813 { _MMIO(0x9888), 0x51900000 },
814 { _MMIO(0x9888), 0x41900c20 },
815 { _MMIO(0x9888), 0x55900000 },
816 { _MMIO(0x9888), 0x45900400 },
817 { _MMIO(0x9888), 0x47900421 },
818 { _MMIO(0x9888), 0x57900000 },
819 { _MMIO(0x9888), 0x49900421 },
820 { _MMIO(0x9888), 0x33900000 },
821 { _MMIO(0x9888), 0x4b900061 },
822 { _MMIO(0x9888), 0x59900000 },
823 { _MMIO(0x9888), 0x43900003 },
824 { _MMIO(0x9888), 0x53900000 },
825};
826
827static const struct i915_oa_reg mux_config_memory_reads_0_sku_gte_0x05[] = {
828 { _MMIO(0x9888), 0x11810c00 },
829 { _MMIO(0x9888), 0x1381001a },
830 { _MMIO(0x9888), 0x37906800 },
831 { _MMIO(0x9888), 0x3f900064 },
832 { _MMIO(0x9888), 0x03811300 },
833 { _MMIO(0x9888), 0x05811b12 },
834 { _MMIO(0x9888), 0x0781001a },
835 { _MMIO(0x9888), 0x1f810000 },
836 { _MMIO(0x9888), 0x17810000 },
837 { _MMIO(0x9888), 0x19810000 },
838 { _MMIO(0x9888), 0x1b810000 },
839 { _MMIO(0x9888), 0x1d810000 },
840 { _MMIO(0x9888), 0x1b930055 },
841 { _MMIO(0x9888), 0x03e58000 },
842 { _MMIO(0x9888), 0x05e5c000 },
843 { _MMIO(0x9888), 0x07e54000 },
844 { _MMIO(0x9888), 0x13900150 },
845 { _MMIO(0x9888), 0x21900151 },
846 { _MMIO(0x9888), 0x23900152 },
847 { _MMIO(0x9888), 0x25900153 },
848 { _MMIO(0x9888), 0x27900154 },
849 { _MMIO(0x9888), 0x29900155 },
850 { _MMIO(0x9888), 0x2b900156 },
851 { _MMIO(0x9888), 0x2d900157 },
852 { _MMIO(0x9888), 0x2f90015f },
853 { _MMIO(0x9888), 0x31900105 },
854 { _MMIO(0x9888), 0x15900103 },
855 { _MMIO(0x9888), 0x17900101 },
856 { _MMIO(0x9888), 0x35900000 },
857 { _MMIO(0x9888), 0x19908000 },
858 { _MMIO(0x9888), 0x1b908000 },
859 { _MMIO(0x9888), 0x1d908000 },
860 { _MMIO(0x9888), 0x1f908000 },
861 { _MMIO(0x9888), 0x11900000 },
862 { _MMIO(0x9888), 0x51900000 },
863 { _MMIO(0x9888), 0x41900c60 },
864 { _MMIO(0x9888), 0x55900000 },
865 { _MMIO(0x9888), 0x45900c00 },
866 { _MMIO(0x9888), 0x47900c63 },
867 { _MMIO(0x9888), 0x57900000 },
868 { _MMIO(0x9888), 0x49900c63 },
869 { _MMIO(0x9888), 0x33900000 },
870 { _MMIO(0x9888), 0x4b900063 },
871 { _MMIO(0x9888), 0x59900000 },
872 { _MMIO(0x9888), 0x43900003 },
873 { _MMIO(0x9888), 0x53900000 },
874};
875
876static int
877get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
878 const struct i915_oa_reg **regs,
879 int *lens)
880{
881 int n = 0;
882
883 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 3);
884 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 3);
885
886 if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
887 (dev_priv->drm.pdev->revision < 0x02)) {
888 regs[n] = mux_config_memory_reads_0_slices_0x01_and_sku_lt_0x02;
889 lens[n] = ARRAY_SIZE(mux_config_memory_reads_0_slices_0x01_and_sku_lt_0x02);
890 n++;
891 }
892 if ((dev_priv->drm.pdev->revision < 0x05) &&
893 (dev_priv->drm.pdev->revision >= 0x02)) {
894 regs[n] = mux_config_memory_reads_0_sku_lt_0x05_and_sku_gte_0x02;
895 lens[n] = ARRAY_SIZE(mux_config_memory_reads_0_sku_lt_0x05_and_sku_gte_0x02);
896 n++;
897 }
898 if (dev_priv->drm.pdev->revision >= 0x05) {
899 regs[n] = mux_config_memory_reads_0_sku_gte_0x05;
900 lens[n] = ARRAY_SIZE(mux_config_memory_reads_0_sku_gte_0x05);
901 n++;
902 }
903
904 return n;
905}
906
907static const struct i915_oa_reg b_counter_config_memory_writes[] = {
908 { _MMIO(0x272c), 0xffffffff },
909 { _MMIO(0x2728), 0xffffffff },
910 { _MMIO(0x2724), 0xf0800000 },
911 { _MMIO(0x2720), 0x00000000 },
912 { _MMIO(0x271c), 0xffffffff },
913 { _MMIO(0x2718), 0xffffffff },
914 { _MMIO(0x2714), 0xf0800000 },
915 { _MMIO(0x2710), 0x00000000 },
916 { _MMIO(0x274c), 0x86543210 },
917 { _MMIO(0x2748), 0x86543210 },
918 { _MMIO(0x2744), 0x00006667 },
919 { _MMIO(0x2740), 0x00000000 },
920 { _MMIO(0x275c), 0x86543210 },
921 { _MMIO(0x2758), 0x86543210 },
922 { _MMIO(0x2754), 0x00006465 },
923 { _MMIO(0x2750), 0x00000000 },
924 { _MMIO(0x2770), 0x0007f81a },
925 { _MMIO(0x2774), 0x0000fe00 },
926 { _MMIO(0x2778), 0x0007f82a },
927 { _MMIO(0x277c), 0x0000fe00 },
928 { _MMIO(0x2780), 0x0007f822 },
929 { _MMIO(0x2784), 0x0000fe00 },
930 { _MMIO(0x2788), 0x0007f8ba },
931 { _MMIO(0x278c), 0x0000fe00 },
932 { _MMIO(0x2790), 0x0007f87a },
933 { _MMIO(0x2794), 0x0000fe00 },
934 { _MMIO(0x2798), 0x0007f8ea },
935 { _MMIO(0x279c), 0x0000fe00 },
936 { _MMIO(0x27a0), 0x0007f8e2 },
937 { _MMIO(0x27a4), 0x0000fe00 },
938 { _MMIO(0x27a8), 0x0007f8f2 },
939 { _MMIO(0x27ac), 0x0000fe00 },
940};
941
942static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
943 { _MMIO(0xe458), 0x00005004 },
944 { _MMIO(0xe558), 0x00015014 },
945 { _MMIO(0xe658), 0x00025024 },
946 { _MMIO(0xe758), 0x00035034 },
947 { _MMIO(0xe45c), 0x00045044 },
948 { _MMIO(0xe55c), 0x00055054 },
949 { _MMIO(0xe65c), 0x00065064 },
950};
951
952static const struct i915_oa_reg mux_config_memory_writes_0_slices_0x01_and_sku_lt_0x02[] = {
953 { _MMIO(0x9888), 0x11810c00 },
954 { _MMIO(0x9888), 0x1381001a },
955 { _MMIO(0x9888), 0x13945400 },
956 { _MMIO(0x9888), 0x37906800 },
957 { _MMIO(0x9888), 0x3f901400 },
958 { _MMIO(0x9888), 0x03811300 },
959 { _MMIO(0x9888), 0x05811b12 },
960 { _MMIO(0x9888), 0x0781001a },
961 { _MMIO(0x9888), 0x1f810000 },
962 { _MMIO(0x9888), 0x17810000 },
963 { _MMIO(0x9888), 0x19810000 },
964 { _MMIO(0x9888), 0x1b810000 },
965 { _MMIO(0x9888), 0x1d810000 },
966 { _MMIO(0x9888), 0x0f968000 },
967 { _MMIO(0x9888), 0x1196c000 },
968 { _MMIO(0x9888), 0x13964000 },
969 { _MMIO(0x9888), 0x11938000 },
970 { _MMIO(0x9888), 0x1b93fe00 },
971 { _MMIO(0x9888), 0x01940010 },
972 { _MMIO(0x9888), 0x07941100 },
973 { _MMIO(0x9888), 0x09941312 },
974 { _MMIO(0x9888), 0x0b941514 },
975 { _MMIO(0x9888), 0x0d941716 },
976 { _MMIO(0x9888), 0x11940000 },
977 { _MMIO(0x9888), 0x19940000 },
978 { _MMIO(0x9888), 0x1b940000 },
979 { _MMIO(0x9888), 0x1d940000 },
980 { _MMIO(0x9888), 0x1b954000 },
981 { _MMIO(0x9888), 0x1d95a550 },
982 { _MMIO(0x9888), 0x1f9502aa },
983 { _MMIO(0x9888), 0x2f900167 },
984 { _MMIO(0x9888), 0x31900105 },
985 { _MMIO(0x9888), 0x15900103 },
986 { _MMIO(0x9888), 0x17900101 },
987 { _MMIO(0x9888), 0x35900000 },
988 { _MMIO(0x9888), 0x13908000 },
989 { _MMIO(0x9888), 0x21908000 },
990 { _MMIO(0x9888), 0x23908000 },
991 { _MMIO(0x9888), 0x25908000 },
992 { _MMIO(0x9888), 0x27908000 },
993 { _MMIO(0x9888), 0x29908000 },
994 { _MMIO(0x9888), 0x2b908000 },
995 { _MMIO(0x9888), 0x2d908000 },
996 { _MMIO(0x9888), 0x19908000 },
997 { _MMIO(0x9888), 0x1b908000 },
998 { _MMIO(0x9888), 0x1d908000 },
999 { _MMIO(0x9888), 0x1f908000 },
1000 { _MMIO(0xd28), 0x00000000 },
1001 { _MMIO(0x9888), 0x11900000 },
1002 { _MMIO(0x9888), 0x51900000 },
1003 { _MMIO(0x9888), 0x41900c00 },
1004 { _MMIO(0x9888), 0x55900000 },
1005 { _MMIO(0x9888), 0x45900000 },
1006 { _MMIO(0x9888), 0x47900000 },
1007 { _MMIO(0x9888), 0x57900000 },
1008 { _MMIO(0x9888), 0x49900000 },
1009 { _MMIO(0x9888), 0x33900000 },
1010 { _MMIO(0x9888), 0x4b900063 },
1011 { _MMIO(0x9888), 0x59900000 },
1012 { _MMIO(0x9888), 0x43900003 },
1013 { _MMIO(0x9888), 0x53900000 },
1014};
1015
1016static const struct i915_oa_reg mux_config_memory_writes_0_sku_lt_0x05_and_sku_gte_0x02[] = {
1017 { _MMIO(0x9888), 0x11810c00 },
1018 { _MMIO(0x9888), 0x1381001a },
1019 { _MMIO(0x9888), 0x13945400 },
1020 { _MMIO(0x9888), 0x37906800 },
1021 { _MMIO(0x9888), 0x3f901400 },
1022 { _MMIO(0x9888), 0x03811300 },
1023 { _MMIO(0x9888), 0x05811b12 },
1024 { _MMIO(0x9888), 0x0781001a },
1025 { _MMIO(0x9888), 0x1f810000 },
1026 { _MMIO(0x9888), 0x17810000 },
1027 { _MMIO(0x9888), 0x19810000 },
1028 { _MMIO(0x9888), 0x1b810000 },
1029 { _MMIO(0x9888), 0x1d810000 },
1030 { _MMIO(0x9888), 0x19930800 },
1031 { _MMIO(0x9888), 0x1b93aa55 },
1032 { _MMIO(0x9888), 0x1d93002a },
1033 { _MMIO(0x9888), 0x01940010 },
1034 { _MMIO(0x9888), 0x07941100 },
1035 { _MMIO(0x9888), 0x09941312 },
1036 { _MMIO(0x9888), 0x0b941514 },
1037 { _MMIO(0x9888), 0x0d941716 },
1038 { _MMIO(0x9888), 0x1b940000 },
1039 { _MMIO(0x9888), 0x11940000 },
1040 { _MMIO(0x9888), 0x01e58000 },
1041 { _MMIO(0x9888), 0x03e57000 },
1042 { _MMIO(0x9888), 0x2f900167 },
1043 { _MMIO(0x9888), 0x31900105 },
1044 { _MMIO(0x9888), 0x15900103 },
1045 { _MMIO(0x9888), 0x17900101 },
1046 { _MMIO(0x9888), 0x35900000 },
1047 { _MMIO(0x9888), 0x13908000 },
1048 { _MMIO(0x9888), 0x21908000 },
1049 { _MMIO(0x9888), 0x23908000 },
1050 { _MMIO(0x9888), 0x25908000 },
1051 { _MMIO(0x9888), 0x27908000 },
1052 { _MMIO(0x9888), 0x29908000 },
1053 { _MMIO(0x9888), 0x2b908000 },
1054 { _MMIO(0x9888), 0x2d908000 },
1055 { _MMIO(0x9888), 0x19908000 },
1056 { _MMIO(0x9888), 0x1b908000 },
1057 { _MMIO(0x9888), 0x1d908000 },
1058 { _MMIO(0x9888), 0x1f908000 },
1059 { _MMIO(0x9888), 0x11900000 },
1060 { _MMIO(0x9888), 0x51900000 },
1061 { _MMIO(0x9888), 0x41900c20 },
1062 { _MMIO(0x9888), 0x55900000 },
1063 { _MMIO(0x9888), 0x45900400 },
1064 { _MMIO(0x9888), 0x47900421 },
1065 { _MMIO(0x9888), 0x57900000 },
1066 { _MMIO(0x9888), 0x49900421 },
1067 { _MMIO(0x9888), 0x33900000 },
1068 { _MMIO(0x9888), 0x4b900063 },
1069 { _MMIO(0x9888), 0x59900000 },
1070 { _MMIO(0x9888), 0x43900003 },
1071 { _MMIO(0x9888), 0x53900000 },
1072};
1073
1074static const struct i915_oa_reg mux_config_memory_writes_0_sku_gte_0x05[] = {
1075 { _MMIO(0x9888), 0x11810c00 },
1076 { _MMIO(0x9888), 0x1381001a },
1077 { _MMIO(0x9888), 0x37906800 },
1078 { _MMIO(0x9888), 0x3f901000 },
1079 { _MMIO(0x9888), 0x03811300 },
1080 { _MMIO(0x9888), 0x05811b12 },
1081 { _MMIO(0x9888), 0x0781001a },
1082 { _MMIO(0x9888), 0x1f810000 },
1083 { _MMIO(0x9888), 0x17810000 },
1084 { _MMIO(0x9888), 0x19810000 },
1085 { _MMIO(0x9888), 0x1b810000 },
1086 { _MMIO(0x9888), 0x1d810000 },
1087 { _MMIO(0x9888), 0x1b930055 },
1088 { _MMIO(0x9888), 0x03e58000 },
1089 { _MMIO(0x9888), 0x05e5c000 },
1090 { _MMIO(0x9888), 0x07e54000 },
1091 { _MMIO(0x9888), 0x13900160 },
1092 { _MMIO(0x9888), 0x21900161 },
1093 { _MMIO(0x9888), 0x23900162 },
1094 { _MMIO(0x9888), 0x25900163 },
1095 { _MMIO(0x9888), 0x27900164 },
1096 { _MMIO(0x9888), 0x29900165 },
1097 { _MMIO(0x9888), 0x2b900166 },
1098 { _MMIO(0x9888), 0x2d900167 },
1099 { _MMIO(0x9888), 0x2f900150 },
1100 { _MMIO(0x9888), 0x31900105 },
1101 { _MMIO(0x9888), 0x15900103 },
1102 { _MMIO(0x9888), 0x17900101 },
1103 { _MMIO(0x9888), 0x35900000 },
1104 { _MMIO(0x9888), 0x19908000 },
1105 { _MMIO(0x9888), 0x1b908000 },
1106 { _MMIO(0x9888), 0x1d908000 },
1107 { _MMIO(0x9888), 0x1f908000 },
1108 { _MMIO(0x9888), 0x11900000 },
1109 { _MMIO(0x9888), 0x51900000 },
1110 { _MMIO(0x9888), 0x41900c60 },
1111 { _MMIO(0x9888), 0x55900000 },
1112 { _MMIO(0x9888), 0x45900c00 },
1113 { _MMIO(0x9888), 0x47900c63 },
1114 { _MMIO(0x9888), 0x57900000 },
1115 { _MMIO(0x9888), 0x49900c63 },
1116 { _MMIO(0x9888), 0x33900000 },
1117 { _MMIO(0x9888), 0x4b900063 },
1118 { _MMIO(0x9888), 0x59900000 },
1119 { _MMIO(0x9888), 0x43900003 },
1120 { _MMIO(0x9888), 0x53900000 },
1121};
1122
1123static int
1124get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
1125 const struct i915_oa_reg **regs,
1126 int *lens)
1127{
1128 int n = 0;
1129
1130 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 3);
1131 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 3);
1132
1133 if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
1134 (dev_priv->drm.pdev->revision < 0x02)) {
1135 regs[n] = mux_config_memory_writes_0_slices_0x01_and_sku_lt_0x02;
1136 lens[n] = ARRAY_SIZE(mux_config_memory_writes_0_slices_0x01_and_sku_lt_0x02);
1137 n++;
1138 }
1139 if ((dev_priv->drm.pdev->revision < 0x05) &&
1140 (dev_priv->drm.pdev->revision >= 0x02)) {
1141 regs[n] = mux_config_memory_writes_0_sku_lt_0x05_and_sku_gte_0x02;
1142 lens[n] = ARRAY_SIZE(mux_config_memory_writes_0_sku_lt_0x05_and_sku_gte_0x02);
1143 n++;
1144 }
1145 if (dev_priv->drm.pdev->revision >= 0x05) {
1146 regs[n] = mux_config_memory_writes_0_sku_gte_0x05;
1147 lens[n] = ARRAY_SIZE(mux_config_memory_writes_0_sku_gte_0x05);
1148 n++;
1149 }
1150
1151 return n;
1152}
1153
1154static const struct i915_oa_reg b_counter_config_compute_extended[] = {
1155 { _MMIO(0x2724), 0xf0800000 },
1156 { _MMIO(0x2720), 0x00000000 },
1157 { _MMIO(0x2714), 0xf0800000 },
1158 { _MMIO(0x2710), 0x00000000 },
1159 { _MMIO(0x2740), 0x00000000 },
1160 { _MMIO(0x2770), 0x0007fc2a },
1161 { _MMIO(0x2774), 0x0000bf00 },
1162 { _MMIO(0x2778), 0x0007fc6a },
1163 { _MMIO(0x277c), 0x0000bf00 },
1164 { _MMIO(0x2780), 0x0007fc92 },
1165 { _MMIO(0x2784), 0x0000bf00 },
1166 { _MMIO(0x2788), 0x0007fca2 },
1167 { _MMIO(0x278c), 0x0000bf00 },
1168 { _MMIO(0x2790), 0x0007fc32 },
1169 { _MMIO(0x2794), 0x0000bf00 },
1170 { _MMIO(0x2798), 0x0007fc9a },
1171 { _MMIO(0x279c), 0x0000bf00 },
1172 { _MMIO(0x27a0), 0x0007fe6a },
1173 { _MMIO(0x27a4), 0x0000bf00 },
1174 { _MMIO(0x27a8), 0x0007fe7a },
1175 { _MMIO(0x27ac), 0x0000bf00 },
1176};
1177
1178static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
1179 { _MMIO(0xe458), 0x00005004 },
1180 { _MMIO(0xe558), 0x00000003 },
1181 { _MMIO(0xe658), 0x00002001 },
1182 { _MMIO(0xe758), 0x00778008 },
1183 { _MMIO(0xe45c), 0x00088078 },
1184 { _MMIO(0xe55c), 0x00808708 },
1185 { _MMIO(0xe65c), 0x00a08908 },
1186};
1187
1188static const struct i915_oa_reg mux_config_compute_extended_0_subslices_0x01[] = {
1189 { _MMIO(0x9888), 0x106c00e0 },
1190 { _MMIO(0x9888), 0x141c8160 },
1191 { _MMIO(0x9888), 0x161c8015 },
1192 { _MMIO(0x9888), 0x181c0120 },
1193 { _MMIO(0x9888), 0x004e8000 },
1194 { _MMIO(0x9888), 0x0e4e8000 },
1195 { _MMIO(0x9888), 0x184e8000 },
1196 { _MMIO(0x9888), 0x1a4eaaa0 },
1197 { _MMIO(0x9888), 0x1c4e0002 },
1198 { _MMIO(0x9888), 0x024e8000 },
1199 { _MMIO(0x9888), 0x044e8000 },
1200 { _MMIO(0x9888), 0x064e8000 },
1201 { _MMIO(0x9888), 0x084e8000 },
1202 { _MMIO(0x9888), 0x0a4e8000 },
1203 { _MMIO(0x9888), 0x0e6c0b01 },
1204 { _MMIO(0x9888), 0x006c0200 },
1205 { _MMIO(0x9888), 0x026c000c },
1206 { _MMIO(0x9888), 0x1c6c0000 },
1207 { _MMIO(0x9888), 0x1e6c0000 },
1208 { _MMIO(0x9888), 0x1a6c0000 },
1209 { _MMIO(0x9888), 0x0e1bc000 },
1210 { _MMIO(0x9888), 0x001b8000 },
1211 { _MMIO(0x9888), 0x021bc000 },
1212 { _MMIO(0x9888), 0x001c0041 },
1213 { _MMIO(0x9888), 0x061c4200 },
1214 { _MMIO(0x9888), 0x081c4443 },
1215 { _MMIO(0x9888), 0x0a1c4645 },
1216 { _MMIO(0x9888), 0x0c1c7647 },
1217 { _MMIO(0x9888), 0x041c7357 },
1218 { _MMIO(0x9888), 0x1c1c0030 },
1219 { _MMIO(0x9888), 0x101c0000 },
1220 { _MMIO(0x9888), 0x1a1c0000 },
1221 { _MMIO(0x9888), 0x121c8000 },
1222 { _MMIO(0x9888), 0x004c8000 },
1223 { _MMIO(0x9888), 0x0a4caa2a },
1224 { _MMIO(0x9888), 0x0c4c02aa },
1225 { _MMIO(0x9888), 0x084ca000 },
1226 { _MMIO(0x9888), 0x000da000 },
1227 { _MMIO(0x9888), 0x060d8000 },
1228 { _MMIO(0x9888), 0x080da000 },
1229 { _MMIO(0x9888), 0x0a0da000 },
1230 { _MMIO(0x9888), 0x0c0da000 },
1231 { _MMIO(0x9888), 0x0e0da000 },
1232 { _MMIO(0x9888), 0x020da000 },
1233 { _MMIO(0x9888), 0x040da000 },
1234 { _MMIO(0x9888), 0x0c0f5400 },
1235 { _MMIO(0x9888), 0x0e0f5515 },
1236 { _MMIO(0x9888), 0x100f0155 },
1237 { _MMIO(0x9888), 0x002c8000 },
1238 { _MMIO(0x9888), 0x0e2c8000 },
1239 { _MMIO(0x9888), 0x162caa00 },
1240 { _MMIO(0x9888), 0x182c00aa },
1241 { _MMIO(0x9888), 0x022c8000 },
1242 { _MMIO(0x9888), 0x042c8000 },
1243 { _MMIO(0x9888), 0x062c8000 },
1244 { _MMIO(0x9888), 0x082c8000 },
1245 { _MMIO(0x9888), 0x0a2c8000 },
1246 { _MMIO(0xd28), 0x00000000 },
1247 { _MMIO(0x9888), 0x11907fff },
1248 { _MMIO(0x9888), 0x51900000 },
1249 { _MMIO(0x9888), 0x41900040 },
1250 { _MMIO(0x9888), 0x55900000 },
1251 { _MMIO(0x9888), 0x45900802 },
1252 { _MMIO(0x9888), 0x47900842 },
1253 { _MMIO(0x9888), 0x57900000 },
1254 { _MMIO(0x9888), 0x49900842 },
1255 { _MMIO(0x9888), 0x37900000 },
1256 { _MMIO(0x9888), 0x33900000 },
1257 { _MMIO(0x9888), 0x4b900000 },
1258 { _MMIO(0x9888), 0x59900000 },
1259 { _MMIO(0x9888), 0x43900800 },
1260 { _MMIO(0x9888), 0x53900000 },
1261};
1262
1263static int
1264get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
1265 const struct i915_oa_reg **regs,
1266 int *lens)
1267{
1268 int n = 0;
1269
1270 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1271 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1272
1273 if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
1274 regs[n] = mux_config_compute_extended_0_subslices_0x01;
1275 lens[n] = ARRAY_SIZE(mux_config_compute_extended_0_subslices_0x01);
1276 n++;
1277 }
1278
1279 return n;
1280}
1281
1282static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
1283 { _MMIO(0x2710), 0x00000000 },
1284 { _MMIO(0x2714), 0x30800000 },
1285 { _MMIO(0x2720), 0x00000000 },
1286 { _MMIO(0x2724), 0x30800000 },
1287 { _MMIO(0x2740), 0x00000000 },
1288 { _MMIO(0x2770), 0x0007fffa },
1289 { _MMIO(0x2774), 0x0000fefe },
1290 { _MMIO(0x2778), 0x0007fffa },
1291 { _MMIO(0x277c), 0x0000fefd },
1292 { _MMIO(0x2790), 0x0007fffa },
1293 { _MMIO(0x2794), 0x0000fbef },
1294 { _MMIO(0x2798), 0x0007fffa },
1295 { _MMIO(0x279c), 0x0000fbdf },
1296};
1297
1298static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
1299 { _MMIO(0xe458), 0x00005004 },
1300 { _MMIO(0xe558), 0x00000003 },
1301 { _MMIO(0xe658), 0x00002001 },
1302 { _MMIO(0xe758), 0x00101100 },
1303 { _MMIO(0xe45c), 0x00201200 },
1304 { _MMIO(0xe55c), 0x00301300 },
1305 { _MMIO(0xe65c), 0x00401400 },
1306};
1307
1308static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
1309 { _MMIO(0x9888), 0x166c0760 },
1310 { _MMIO(0x9888), 0x1593001e },
1311 { _MMIO(0x9888), 0x3f901403 },
1312 { _MMIO(0x9888), 0x004e8000 },
1313 { _MMIO(0x9888), 0x0e4e8000 },
1314 { _MMIO(0x9888), 0x184e8000 },
1315 { _MMIO(0x9888), 0x1a4e8020 },
1316 { _MMIO(0x9888), 0x1c4e0002 },
1317 { _MMIO(0x9888), 0x006c0051 },
1318 { _MMIO(0x9888), 0x066c5000 },
1319 { _MMIO(0x9888), 0x086c5c5d },
1320 { _MMIO(0x9888), 0x0e6c5e5f },
1321 { _MMIO(0x9888), 0x106c0000 },
1322 { _MMIO(0x9888), 0x186c0000 },
1323 { _MMIO(0x9888), 0x1c6c0000 },
1324 { _MMIO(0x9888), 0x1e6c0000 },
1325 { _MMIO(0x9888), 0x001b4000 },
1326 { _MMIO(0x9888), 0x061b8000 },
1327 { _MMIO(0x9888), 0x081bc000 },
1328 { _MMIO(0x9888), 0x0e1bc000 },
1329 { _MMIO(0x9888), 0x101c8000 },
1330 { _MMIO(0x9888), 0x1a1ce000 },
1331 { _MMIO(0x9888), 0x1c1c0030 },
1332 { _MMIO(0x9888), 0x004c8000 },
1333 { _MMIO(0x9888), 0x0a4c2a00 },
1334 { _MMIO(0x9888), 0x0c4c0280 },
1335 { _MMIO(0x9888), 0x000d2000 },
1336 { _MMIO(0x9888), 0x060d8000 },
1337 { _MMIO(0x9888), 0x080da000 },
1338 { _MMIO(0x9888), 0x0e0da000 },
1339 { _MMIO(0x9888), 0x0c0f0400 },
1340 { _MMIO(0x9888), 0x0e0f1500 },
1341 { _MMIO(0x9888), 0x100f0140 },
1342 { _MMIO(0x9888), 0x002c8000 },
1343 { _MMIO(0x9888), 0x0e2c8000 },
1344 { _MMIO(0x9888), 0x162c0a00 },
1345 { _MMIO(0x9888), 0x182c00a0 },
1346 { _MMIO(0x9888), 0x03933300 },
1347 { _MMIO(0x9888), 0x05930032 },
1348 { _MMIO(0x9888), 0x11930000 },
1349 { _MMIO(0x9888), 0x1b930000 },
1350 { _MMIO(0x9888), 0x1d900157 },
1351 { _MMIO(0x9888), 0x1f900167 },
1352 { _MMIO(0x9888), 0x35900000 },
1353 { _MMIO(0x9888), 0x19908000 },
1354 { _MMIO(0x9888), 0x1b908000 },
1355 { _MMIO(0x9888), 0x1190030f },
1356 { _MMIO(0x9888), 0x51900000 },
1357 { _MMIO(0x9888), 0x41900000 },
1358 { _MMIO(0x9888), 0x55900000 },
1359 { _MMIO(0x9888), 0x45900042 },
1360 { _MMIO(0x9888), 0x47900000 },
1361 { _MMIO(0x9888), 0x37900000 },
1362 { _MMIO(0x9888), 0x33900000 },
1363 { _MMIO(0x9888), 0x57900000 },
1364 { _MMIO(0x9888), 0x4b900000 },
1365 { _MMIO(0x9888), 0x59900000 },
1366 { _MMIO(0x9888), 0x53901111 },
1367 { _MMIO(0x9888), 0x43900420 },
1368};
1369
1370static int
1371get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
1372 const struct i915_oa_reg **regs,
1373 int *lens)
1374{
1375 int n = 0;
1376
1377 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1378 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1379
1380 regs[n] = mux_config_compute_l3_cache;
1381 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
1382 n++;
1383
1384 return n;
1385}
1386
1387static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
1388 { _MMIO(0x2740), 0x00000000 },
1389 { _MMIO(0x2744), 0x00800000 },
1390 { _MMIO(0x2710), 0x00000000 },
1391 { _MMIO(0x2714), 0x10800000 },
1392 { _MMIO(0x2720), 0x00000000 },
1393 { _MMIO(0x2724), 0x00800000 },
1394 { _MMIO(0x2770), 0x00000002 },
1395 { _MMIO(0x2774), 0x0000fdff },
1396};
1397
1398static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
1399 { _MMIO(0xe458), 0x00005004 },
1400 { _MMIO(0xe558), 0x00010003 },
1401 { _MMIO(0xe658), 0x00012011 },
1402 { _MMIO(0xe758), 0x00015014 },
1403 { _MMIO(0xe45c), 0x00051050 },
1404 { _MMIO(0xe55c), 0x00053052 },
1405 { _MMIO(0xe65c), 0x00055054 },
1406};
1407
1408static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
1409 { _MMIO(0x9888), 0x104f0232 },
1410 { _MMIO(0x9888), 0x124f4640 },
1411 { _MMIO(0x9888), 0x106c0232 },
1412 { _MMIO(0x9888), 0x11834400 },
1413 { _MMIO(0x9888), 0x0a4e8000 },
1414 { _MMIO(0x9888), 0x0c4e8000 },
1415 { _MMIO(0x9888), 0x004f1880 },
1416 { _MMIO(0x9888), 0x024f08bb },
1417 { _MMIO(0x9888), 0x044f001b },
1418 { _MMIO(0x9888), 0x046c0100 },
1419 { _MMIO(0x9888), 0x066c000b },
1420 { _MMIO(0x9888), 0x1a6c0000 },
1421 { _MMIO(0x9888), 0x041b8000 },
1422 { _MMIO(0x9888), 0x061b4000 },
1423 { _MMIO(0x9888), 0x1a1c1800 },
1424 { _MMIO(0x9888), 0x005b8000 },
1425 { _MMIO(0x9888), 0x025bc000 },
1426 { _MMIO(0x9888), 0x045b4000 },
1427 { _MMIO(0x9888), 0x125c8000 },
1428 { _MMIO(0x9888), 0x145c8000 },
1429 { _MMIO(0x9888), 0x165c8000 },
1430 { _MMIO(0x9888), 0x185c8000 },
1431 { _MMIO(0x9888), 0x0a4c00a0 },
1432 { _MMIO(0x9888), 0x000d8000 },
1433 { _MMIO(0x9888), 0x020da000 },
1434 { _MMIO(0x9888), 0x040da000 },
1435 { _MMIO(0x9888), 0x060d2000 },
1436 { _MMIO(0x9888), 0x0c0f5000 },
1437 { _MMIO(0x9888), 0x0e0f0055 },
1438 { _MMIO(0x9888), 0x022cc000 },
1439 { _MMIO(0x9888), 0x042cc000 },
1440 { _MMIO(0x9888), 0x062cc000 },
1441 { _MMIO(0x9888), 0x082cc000 },
1442 { _MMIO(0x9888), 0x0a2c8000 },
1443 { _MMIO(0x9888), 0x0c2c8000 },
1444 { _MMIO(0x9888), 0x0f828000 },
1445 { _MMIO(0x9888), 0x0f8305c0 },
1446 { _MMIO(0x9888), 0x09830000 },
1447 { _MMIO(0x9888), 0x07830000 },
1448 { _MMIO(0x9888), 0x1d950080 },
1449 { _MMIO(0x9888), 0x13928000 },
1450 { _MMIO(0x9888), 0x0f988000 },
1451 { _MMIO(0x9888), 0x31904000 },
1452 { _MMIO(0x9888), 0x1190fc00 },
1453 { _MMIO(0x9888), 0x37900000 },
1454 { _MMIO(0x9888), 0x59900000 },
1455 { _MMIO(0x9888), 0x4b9000a0 },
1456 { _MMIO(0x9888), 0x51900000 },
1457 { _MMIO(0x9888), 0x41900800 },
1458 { _MMIO(0x9888), 0x43900842 },
1459 { _MMIO(0x9888), 0x53900000 },
1460 { _MMIO(0x9888), 0x45900000 },
1461 { _MMIO(0x9888), 0x33900000 },
1462};
1463
1464static int
1465get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
1466 const struct i915_oa_reg **regs,
1467 int *lens)
1468{
1469 int n = 0;
1470
1471 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1472 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1473
1474 regs[n] = mux_config_hdc_and_sf;
1475 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
1476 n++;
1477
1478 return n;
1479}
1480
1481static const struct i915_oa_reg b_counter_config_l3_1[] = {
1482 { _MMIO(0x2740), 0x00000000 },
1483 { _MMIO(0x2744), 0x00800000 },
1484 { _MMIO(0x2710), 0x00000000 },
1485 { _MMIO(0x2714), 0xf0800000 },
1486 { _MMIO(0x2720), 0x00000000 },
1487 { _MMIO(0x2724), 0xf0800000 },
1488 { _MMIO(0x2770), 0x00100070 },
1489 { _MMIO(0x2774), 0x0000fff1 },
1490 { _MMIO(0x2778), 0x00014002 },
1491 { _MMIO(0x277c), 0x0000c3ff },
1492 { _MMIO(0x2780), 0x00010002 },
1493 { _MMIO(0x2784), 0x0000c7ff },
1494 { _MMIO(0x2788), 0x00004002 },
1495 { _MMIO(0x278c), 0x0000d3ff },
1496 { _MMIO(0x2790), 0x00100700 },
1497 { _MMIO(0x2794), 0x0000ff1f },
1498 { _MMIO(0x2798), 0x00001402 },
1499 { _MMIO(0x279c), 0x0000fc3f },
1500 { _MMIO(0x27a0), 0x00001002 },
1501 { _MMIO(0x27a4), 0x0000fc7f },
1502 { _MMIO(0x27a8), 0x00000402 },
1503 { _MMIO(0x27ac), 0x0000fd3f },
1504};
1505
1506static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1507 { _MMIO(0xe458), 0x00005004 },
1508 { _MMIO(0xe558), 0x00010003 },
1509 { _MMIO(0xe658), 0x00012011 },
1510 { _MMIO(0xe758), 0x00015014 },
1511 { _MMIO(0xe45c), 0x00051050 },
1512 { _MMIO(0xe55c), 0x00053052 },
1513 { _MMIO(0xe65c), 0x00055054 },
1514};
1515
1516static const struct i915_oa_reg mux_config_l3_1[] = {
1517 { _MMIO(0x9888), 0x126c7b40 },
1518 { _MMIO(0x9888), 0x166c0020 },
1519 { _MMIO(0x9888), 0x0a603444 },
1520 { _MMIO(0x9888), 0x0a613400 },
1521 { _MMIO(0x9888), 0x1a4ea800 },
1522 { _MMIO(0x9888), 0x1c4e0002 },
1523 { _MMIO(0x9888), 0x024e8000 },
1524 { _MMIO(0x9888), 0x044e8000 },
1525 { _MMIO(0x9888), 0x064e8000 },
1526 { _MMIO(0x9888), 0x084e8000 },
1527 { _MMIO(0x9888), 0x0a4e8000 },
1528 { _MMIO(0x9888), 0x064f4000 },
1529 { _MMIO(0x9888), 0x0c6c5327 },
1530 { _MMIO(0x9888), 0x0e6c5425 },
1531 { _MMIO(0x9888), 0x006c2a00 },
1532 { _MMIO(0x9888), 0x026c285b },
1533 { _MMIO(0x9888), 0x046c005c },
1534 { _MMIO(0x9888), 0x106c0000 },
1535 { _MMIO(0x9888), 0x1c6c0000 },
1536 { _MMIO(0x9888), 0x1e6c0000 },
1537 { _MMIO(0x9888), 0x1a6c0800 },
1538 { _MMIO(0x9888), 0x0c1bc000 },
1539 { _MMIO(0x9888), 0x0e1bc000 },
1540 { _MMIO(0x9888), 0x001b8000 },
1541 { _MMIO(0x9888), 0x021bc000 },
1542 { _MMIO(0x9888), 0x041bc000 },
1543 { _MMIO(0x9888), 0x1c1c003c },
1544 { _MMIO(0x9888), 0x121c8000 },
1545 { _MMIO(0x9888), 0x141c8000 },
1546 { _MMIO(0x9888), 0x161c8000 },
1547 { _MMIO(0x9888), 0x181c8000 },
1548 { _MMIO(0x9888), 0x1a1c0800 },
1549 { _MMIO(0x9888), 0x065b4000 },
1550 { _MMIO(0x9888), 0x1a5c1000 },
1551 { _MMIO(0x9888), 0x10600000 },
1552 { _MMIO(0x9888), 0x04600000 },
1553 { _MMIO(0x9888), 0x0c610044 },
1554 { _MMIO(0x9888), 0x10610000 },
1555 { _MMIO(0x9888), 0x06610000 },
1556 { _MMIO(0x9888), 0x0c4c02a8 },
1557 { _MMIO(0x9888), 0x084ca000 },
1558 { _MMIO(0x9888), 0x0a4c002a },
1559 { _MMIO(0x9888), 0x0c0da000 },
1560 { _MMIO(0x9888), 0x0e0da000 },
1561 { _MMIO(0x9888), 0x000d8000 },
1562 { _MMIO(0x9888), 0x020da000 },
1563 { _MMIO(0x9888), 0x040da000 },
1564 { _MMIO(0x9888), 0x060d2000 },
1565 { _MMIO(0x9888), 0x100f0154 },
1566 { _MMIO(0x9888), 0x0c0f5000 },
1567 { _MMIO(0x9888), 0x0e0f0055 },
1568 { _MMIO(0x9888), 0x182c00aa },
1569 { _MMIO(0x9888), 0x022c8000 },
1570 { _MMIO(0x9888), 0x042c8000 },
1571 { _MMIO(0x9888), 0x062c8000 },
1572 { _MMIO(0x9888), 0x082c8000 },
1573 { _MMIO(0x9888), 0x0a2c8000 },
1574 { _MMIO(0x9888), 0x0c2cc000 },
1575 { _MMIO(0x9888), 0x1190ffc0 },
1576 { _MMIO(0x9888), 0x57900000 },
1577 { _MMIO(0x9888), 0x49900420 },
1578 { _MMIO(0x9888), 0x37900000 },
1579 { _MMIO(0x9888), 0x33900000 },
1580 { _MMIO(0x9888), 0x4b900021 },
1581 { _MMIO(0x9888), 0x59900000 },
1582 { _MMIO(0x9888), 0x51900000 },
1583 { _MMIO(0x9888), 0x41900400 },
1584 { _MMIO(0x9888), 0x43900421 },
1585 { _MMIO(0x9888), 0x53900000 },
1586 { _MMIO(0x9888), 0x45900040 },
1587};
1588
1589static int
1590get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1591 const struct i915_oa_reg **regs,
1592 int *lens)
1593{
1594 int n = 0;
1595
1596 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1597 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1598
1599 regs[n] = mux_config_l3_1;
1600 lens[n] = ARRAY_SIZE(mux_config_l3_1);
1601 n++;
1602
1603 return n;
1604}
1605
1606static const struct i915_oa_reg b_counter_config_l3_2[] = {
1607 { _MMIO(0x2740), 0x00000000 },
1608 { _MMIO(0x2744), 0x00800000 },
1609 { _MMIO(0x2710), 0x00000000 },
1610 { _MMIO(0x2714), 0xf0800000 },
1611 { _MMIO(0x2720), 0x00000000 },
1612 { _MMIO(0x2724), 0x00800000 },
1613 { _MMIO(0x2770), 0x00100070 },
1614 { _MMIO(0x2774), 0x0000fff1 },
1615 { _MMIO(0x2778), 0x00028002 },
1616 { _MMIO(0x277c), 0x000087ff },
1617 { _MMIO(0x2780), 0x00020002 },
1618 { _MMIO(0x2784), 0x00008fff },
1619 { _MMIO(0x2788), 0x00008002 },
1620 { _MMIO(0x278c), 0x0000a7ff },
1621};
1622
1623static const struct i915_oa_reg flex_eu_config_l3_2[] = {
1624 { _MMIO(0xe458), 0x00005004 },
1625 { _MMIO(0xe558), 0x00010003 },
1626 { _MMIO(0xe658), 0x00012011 },
1627 { _MMIO(0xe758), 0x00015014 },
1628 { _MMIO(0xe45c), 0x00051050 },
1629 { _MMIO(0xe55c), 0x00053052 },
1630 { _MMIO(0xe65c), 0x00055054 },
1631};
1632
1633static const struct i915_oa_reg mux_config_l3_2[] = {
1634 { _MMIO(0x9888), 0x126c02e0 },
1635 { _MMIO(0x9888), 0x146c0001 },
1636 { _MMIO(0x9888), 0x0a623400 },
1637 { _MMIO(0x9888), 0x044e8000 },
1638 { _MMIO(0x9888), 0x064e8000 },
1639 { _MMIO(0x9888), 0x084e8000 },
1640 { _MMIO(0x9888), 0x0a4e8000 },
1641 { _MMIO(0x9888), 0x064f4000 },
1642 { _MMIO(0x9888), 0x026c3324 },
1643 { _MMIO(0x9888), 0x046c3422 },
1644 { _MMIO(0x9888), 0x106c0000 },
1645 { _MMIO(0x9888), 0x1a6c0000 },
1646 { _MMIO(0x9888), 0x021bc000 },
1647 { _MMIO(0x9888), 0x041bc000 },
1648 { _MMIO(0x9888), 0x141c8000 },
1649 { _MMIO(0x9888), 0x161c8000 },
1650 { _MMIO(0x9888), 0x181c8000 },
1651 { _MMIO(0x9888), 0x1a1c0800 },
1652 { _MMIO(0x9888), 0x065b4000 },
1653 { _MMIO(0x9888), 0x1a5c1000 },
1654 { _MMIO(0x9888), 0x06614000 },
1655 { _MMIO(0x9888), 0x0c620044 },
1656 { _MMIO(0x9888), 0x10620000 },
1657 { _MMIO(0x9888), 0x06620000 },
1658 { _MMIO(0x9888), 0x084c8000 },
1659 { _MMIO(0x9888), 0x0a4c002a },
1660 { _MMIO(0x9888), 0x020da000 },
1661 { _MMIO(0x9888), 0x040da000 },
1662 { _MMIO(0x9888), 0x060d2000 },
1663 { _MMIO(0x9888), 0x0c0f4000 },
1664 { _MMIO(0x9888), 0x0e0f0055 },
1665 { _MMIO(0x9888), 0x042c8000 },
1666 { _MMIO(0x9888), 0x062c8000 },
1667 { _MMIO(0x9888), 0x082c8000 },
1668 { _MMIO(0x9888), 0x0a2c8000 },
1669 { _MMIO(0x9888), 0x0c2cc000 },
1670 { _MMIO(0x9888), 0x1190f800 },
1671 { _MMIO(0x9888), 0x37900000 },
1672 { _MMIO(0x9888), 0x51900000 },
1673 { _MMIO(0x9888), 0x43900000 },
1674 { _MMIO(0x9888), 0x53900000 },
1675 { _MMIO(0x9888), 0x45900000 },
1676 { _MMIO(0x9888), 0x33900000 },
1677};
1678
1679static int
1680get_l3_2_mux_config(struct drm_i915_private *dev_priv,
1681 const struct i915_oa_reg **regs,
1682 int *lens)
1683{
1684 int n = 0;
1685
1686 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1687 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1688
1689 regs[n] = mux_config_l3_2;
1690 lens[n] = ARRAY_SIZE(mux_config_l3_2);
1691 n++;
1692
1693 return n;
1694}
1695
1696static const struct i915_oa_reg b_counter_config_l3_3[] = {
1697 { _MMIO(0x2740), 0x00000000 },
1698 { _MMIO(0x2744), 0x00800000 },
1699 { _MMIO(0x2710), 0x00000000 },
1700 { _MMIO(0x2714), 0xf0800000 },
1701 { _MMIO(0x2720), 0x00000000 },
1702 { _MMIO(0x2724), 0x00800000 },
1703 { _MMIO(0x2770), 0x00100070 },
1704 { _MMIO(0x2774), 0x0000fff1 },
1705 { _MMIO(0x2778), 0x00028002 },
1706 { _MMIO(0x277c), 0x000087ff },
1707 { _MMIO(0x2780), 0x00020002 },
1708 { _MMIO(0x2784), 0x00008fff },
1709 { _MMIO(0x2788), 0x00008002 },
1710 { _MMIO(0x278c), 0x0000a7ff },
1711};
1712
1713static const struct i915_oa_reg flex_eu_config_l3_3[] = {
1714 { _MMIO(0xe458), 0x00005004 },
1715 { _MMIO(0xe558), 0x00010003 },
1716 { _MMIO(0xe658), 0x00012011 },
1717 { _MMIO(0xe758), 0x00015014 },
1718 { _MMIO(0xe45c), 0x00051050 },
1719 { _MMIO(0xe55c), 0x00053052 },
1720 { _MMIO(0xe65c), 0x00055054 },
1721};
1722
1723static const struct i915_oa_reg mux_config_l3_3[] = {
1724 { _MMIO(0x9888), 0x126c4e80 },
1725 { _MMIO(0x9888), 0x146c0000 },
1726 { _MMIO(0x9888), 0x0a633400 },
1727 { _MMIO(0x9888), 0x044e8000 },
1728 { _MMIO(0x9888), 0x064e8000 },
1729 { _MMIO(0x9888), 0x084e8000 },
1730 { _MMIO(0x9888), 0x0a4e8000 },
1731 { _MMIO(0x9888), 0x0c4e8000 },
1732 { _MMIO(0x9888), 0x026c3321 },
1733 { _MMIO(0x9888), 0x046c342f },
1734 { _MMIO(0x9888), 0x106c0000 },
1735 { _MMIO(0x9888), 0x1a6c2000 },
1736 { _MMIO(0x9888), 0x021bc000 },
1737 { _MMIO(0x9888), 0x041bc000 },
1738 { _MMIO(0x9888), 0x061b4000 },
1739 { _MMIO(0x9888), 0x141c8000 },
1740 { _MMIO(0x9888), 0x161c8000 },
1741 { _MMIO(0x9888), 0x181c8000 },
1742 { _MMIO(0x9888), 0x1a1c1800 },
1743 { _MMIO(0x9888), 0x06604000 },
1744 { _MMIO(0x9888), 0x0c630044 },
1745 { _MMIO(0x9888), 0x10630000 },
1746 { _MMIO(0x9888), 0x06630000 },
1747 { _MMIO(0x9888), 0x084c8000 },
1748 { _MMIO(0x9888), 0x0a4c00aa },
1749 { _MMIO(0x9888), 0x020da000 },
1750 { _MMIO(0x9888), 0x040da000 },
1751 { _MMIO(0x9888), 0x060d2000 },
1752 { _MMIO(0x9888), 0x0c0f4000 },
1753 { _MMIO(0x9888), 0x0e0f0055 },
1754 { _MMIO(0x9888), 0x042c8000 },
1755 { _MMIO(0x9888), 0x062c8000 },
1756 { _MMIO(0x9888), 0x082c8000 },
1757 { _MMIO(0x9888), 0x0a2c8000 },
1758 { _MMIO(0x9888), 0x0c2c8000 },
1759 { _MMIO(0x9888), 0x1190f800 },
1760 { _MMIO(0x9888), 0x37900000 },
1761 { _MMIO(0x9888), 0x51900000 },
1762 { _MMIO(0x9888), 0x43900842 },
1763 { _MMIO(0x9888), 0x53900000 },
1764 { _MMIO(0x9888), 0x45900002 },
1765 { _MMIO(0x9888), 0x33900000 },
1766};
1767
1768static int
1769get_l3_3_mux_config(struct drm_i915_private *dev_priv,
1770 const struct i915_oa_reg **regs,
1771 int *lens)
1772{
1773 int n = 0;
1774
1775 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1776 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1777
1778 regs[n] = mux_config_l3_3;
1779 lens[n] = ARRAY_SIZE(mux_config_l3_3);
1780 n++;
1781
1782 return n;
1783}
1784
1785static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1786 { _MMIO(0x2740), 0x00000000 },
1787 { _MMIO(0x2744), 0x00800000 },
1788 { _MMIO(0x2710), 0x00000000 },
1789 { _MMIO(0x2714), 0x30800000 },
1790 { _MMIO(0x2720), 0x00000000 },
1791 { _MMIO(0x2724), 0x00800000 },
1792 { _MMIO(0x2770), 0x00000002 },
1793 { _MMIO(0x2774), 0x0000efff },
1794 { _MMIO(0x2778), 0x00006000 },
1795 { _MMIO(0x277c), 0x0000f3ff },
1796};
1797
1798static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1799 { _MMIO(0xe458), 0x00005004 },
1800 { _MMIO(0xe558), 0x00010003 },
1801 { _MMIO(0xe658), 0x00012011 },
1802 { _MMIO(0xe758), 0x00015014 },
1803 { _MMIO(0xe45c), 0x00051050 },
1804 { _MMIO(0xe55c), 0x00053052 },
1805 { _MMIO(0xe65c), 0x00055054 },
1806};
1807
1808static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1809 { _MMIO(0x9888), 0x102f3800 },
1810 { _MMIO(0x9888), 0x144d0500 },
1811 { _MMIO(0x9888), 0x120d03c0 },
1812 { _MMIO(0x9888), 0x140d03cf },
1813 { _MMIO(0x9888), 0x0c0f0004 },
1814 { _MMIO(0x9888), 0x0c4e4000 },
1815 { _MMIO(0x9888), 0x042f0480 },
1816 { _MMIO(0x9888), 0x082f0000 },
1817 { _MMIO(0x9888), 0x022f0000 },
1818 { _MMIO(0x9888), 0x0a4c0090 },
1819 { _MMIO(0x9888), 0x064d0027 },
1820 { _MMIO(0x9888), 0x004d0000 },
1821 { _MMIO(0x9888), 0x000d0d40 },
1822 { _MMIO(0x9888), 0x020d803f },
1823 { _MMIO(0x9888), 0x040d8023 },
1824 { _MMIO(0x9888), 0x100d0000 },
1825 { _MMIO(0x9888), 0x060d2000 },
1826 { _MMIO(0x9888), 0x020f0010 },
1827 { _MMIO(0x9888), 0x000f0000 },
1828 { _MMIO(0x9888), 0x0e0f0050 },
1829 { _MMIO(0x9888), 0x0a2c8000 },
1830 { _MMIO(0x9888), 0x0c2c8000 },
1831 { _MMIO(0x9888), 0x1190fc00 },
1832 { _MMIO(0x9888), 0x37900000 },
1833 { _MMIO(0x9888), 0x51900000 },
1834 { _MMIO(0x9888), 0x41901400 },
1835 { _MMIO(0x9888), 0x43901485 },
1836 { _MMIO(0x9888), 0x53900000 },
1837 { _MMIO(0x9888), 0x45900001 },
1838 { _MMIO(0x9888), 0x33900000 },
1839};
1840
1841static int
1842get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1843 const struct i915_oa_reg **regs,
1844 int *lens)
1845{
1846 int n = 0;
1847
1848 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1849 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1850
1851 regs[n] = mux_config_rasterizer_and_pixel_backend;
1852 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1853 n++;
1854
1855 return n;
1856}
1857
1858static const struct i915_oa_reg b_counter_config_sampler[] = {
1859 { _MMIO(0x2740), 0x00000000 },
1860 { _MMIO(0x2744), 0x00800000 },
1861 { _MMIO(0x2710), 0x00000000 },
1862 { _MMIO(0x2714), 0x70800000 },
1863 { _MMIO(0x2720), 0x00000000 },
1864 { _MMIO(0x2724), 0x00800000 },
1865 { _MMIO(0x2770), 0x0000c000 },
1866 { _MMIO(0x2774), 0x0000e7ff },
1867 { _MMIO(0x2778), 0x00003000 },
1868 { _MMIO(0x277c), 0x0000f9ff },
1869 { _MMIO(0x2780), 0x00000c00 },
1870 { _MMIO(0x2784), 0x0000fe7f },
1871};
1872
1873static const struct i915_oa_reg flex_eu_config_sampler[] = {
1874 { _MMIO(0xe458), 0x00005004 },
1875 { _MMIO(0xe558), 0x00010003 },
1876 { _MMIO(0xe658), 0x00012011 },
1877 { _MMIO(0xe758), 0x00015014 },
1878 { _MMIO(0xe45c), 0x00051050 },
1879 { _MMIO(0xe55c), 0x00053052 },
1880 { _MMIO(0xe65c), 0x00055054 },
1881};
1882
1883static const struct i915_oa_reg mux_config_sampler[] = {
1884 { _MMIO(0x9888), 0x14152c00 },
1885 { _MMIO(0x9888), 0x16150005 },
1886 { _MMIO(0x9888), 0x121600a0 },
1887 { _MMIO(0x9888), 0x14352c00 },
1888 { _MMIO(0x9888), 0x16350005 },
1889 { _MMIO(0x9888), 0x123600a0 },
1890 { _MMIO(0x9888), 0x14552c00 },
1891 { _MMIO(0x9888), 0x16550005 },
1892 { _MMIO(0x9888), 0x125600a0 },
1893 { _MMIO(0x9888), 0x062f6000 },
1894 { _MMIO(0x9888), 0x022f2000 },
1895 { _MMIO(0x9888), 0x0c4c0050 },
1896 { _MMIO(0x9888), 0x0a4c0010 },
1897 { _MMIO(0x9888), 0x0c0d8000 },
1898 { _MMIO(0x9888), 0x0e0da000 },
1899 { _MMIO(0x9888), 0x000d8000 },
1900 { _MMIO(0x9888), 0x020da000 },
1901 { _MMIO(0x9888), 0x040da000 },
1902 { _MMIO(0x9888), 0x060d2000 },
1903 { _MMIO(0x9888), 0x100f0350 },
1904 { _MMIO(0x9888), 0x0c0fb000 },
1905 { _MMIO(0x9888), 0x0e0f00da },
1906 { _MMIO(0x9888), 0x182c0028 },
1907 { _MMIO(0x9888), 0x0a2c8000 },
1908 { _MMIO(0x9888), 0x022dc000 },
1909 { _MMIO(0x9888), 0x042d4000 },
1910 { _MMIO(0x9888), 0x0c138000 },
1911 { _MMIO(0x9888), 0x0e132000 },
1912 { _MMIO(0x9888), 0x0413c000 },
1913 { _MMIO(0x9888), 0x1c140018 },
1914 { _MMIO(0x9888), 0x0c157000 },
1915 { _MMIO(0x9888), 0x0e150078 },
1916 { _MMIO(0x9888), 0x10150000 },
1917 { _MMIO(0x9888), 0x04162180 },
1918 { _MMIO(0x9888), 0x02160000 },
1919 { _MMIO(0x9888), 0x04174000 },
1920 { _MMIO(0x9888), 0x0233a000 },
1921 { _MMIO(0x9888), 0x04333000 },
1922 { _MMIO(0x9888), 0x14348000 },
1923 { _MMIO(0x9888), 0x16348000 },
1924 { _MMIO(0x9888), 0x02357870 },
1925 { _MMIO(0x9888), 0x10350000 },
1926 { _MMIO(0x9888), 0x04360043 },
1927 { _MMIO(0x9888), 0x02360000 },
1928 { _MMIO(0x9888), 0x04371000 },
1929 { _MMIO(0x9888), 0x0e538000 },
1930 { _MMIO(0x9888), 0x00538000 },
1931 { _MMIO(0x9888), 0x06533000 },
1932 { _MMIO(0x9888), 0x1c540020 },
1933 { _MMIO(0x9888), 0x12548000 },
1934 { _MMIO(0x9888), 0x0e557000 },
1935 { _MMIO(0x9888), 0x00557800 },
1936 { _MMIO(0x9888), 0x10550000 },
1937 { _MMIO(0x9888), 0x06560043 },
1938 { _MMIO(0x9888), 0x02560000 },
1939 { _MMIO(0x9888), 0x06571000 },
1940 { _MMIO(0x9888), 0x1190ff80 },
1941 { _MMIO(0x9888), 0x57900000 },
1942 { _MMIO(0x9888), 0x49900000 },
1943 { _MMIO(0x9888), 0x37900000 },
1944 { _MMIO(0x9888), 0x33900000 },
1945 { _MMIO(0x9888), 0x4b900060 },
1946 { _MMIO(0x9888), 0x59900000 },
1947 { _MMIO(0x9888), 0x51900000 },
1948 { _MMIO(0x9888), 0x41900c00 },
1949 { _MMIO(0x9888), 0x43900842 },
1950 { _MMIO(0x9888), 0x53900000 },
1951 { _MMIO(0x9888), 0x45900060 },
1952};
1953
1954static int
1955get_sampler_mux_config(struct drm_i915_private *dev_priv,
1956 const struct i915_oa_reg **regs,
1957 int *lens)
1958{
1959 int n = 0;
1960
1961 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1962 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1963
1964 regs[n] = mux_config_sampler;
1965 lens[n] = ARRAY_SIZE(mux_config_sampler);
1966 n++;
1967
1968 return n;
1969}
1970
1971static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1972 { _MMIO(0x2740), 0x00000000 },
1973 { _MMIO(0x2744), 0x00800000 },
1974 { _MMIO(0x2710), 0x00000000 },
1975 { _MMIO(0x2714), 0xf0800000 },
1976 { _MMIO(0x2720), 0x00000000 },
1977 { _MMIO(0x2724), 0x30800000 },
1978 { _MMIO(0x2770), 0x00000002 },
1979 { _MMIO(0x2774), 0x00007fff },
1980 { _MMIO(0x2778), 0x00000000 },
1981 { _MMIO(0x277c), 0x00009fff },
1982 { _MMIO(0x2780), 0x00000002 },
1983 { _MMIO(0x2784), 0x0000efff },
1984 { _MMIO(0x2788), 0x00000000 },
1985 { _MMIO(0x278c), 0x0000f3ff },
1986 { _MMIO(0x2790), 0x00000002 },
1987 { _MMIO(0x2794), 0x0000fdff },
1988 { _MMIO(0x2798), 0x00000000 },
1989 { _MMIO(0x279c), 0x0000fe7f },
1990};
1991
1992static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1993 { _MMIO(0xe458), 0x00005004 },
1994 { _MMIO(0xe558), 0x00010003 },
1995 { _MMIO(0xe658), 0x00012011 },
1996 { _MMIO(0xe758), 0x00015014 },
1997 { _MMIO(0xe45c), 0x00051050 },
1998 { _MMIO(0xe55c), 0x00053052 },
1999 { _MMIO(0xe65c), 0x00055054 },
2000};
2001
2002static const struct i915_oa_reg mux_config_tdl_1[] = {
2003 { _MMIO(0x9888), 0x12120000 },
2004 { _MMIO(0x9888), 0x12320000 },
2005 { _MMIO(0x9888), 0x12520000 },
2006 { _MMIO(0x9888), 0x002f8000 },
2007 { _MMIO(0x9888), 0x022f3000 },
2008 { _MMIO(0x9888), 0x0a4c0015 },
2009 { _MMIO(0x9888), 0x0c0d8000 },
2010 { _MMIO(0x9888), 0x0e0da000 },
2011 { _MMIO(0x9888), 0x000d8000 },
2012 { _MMIO(0x9888), 0x020da000 },
2013 { _MMIO(0x9888), 0x040da000 },
2014 { _MMIO(0x9888), 0x060d2000 },
2015 { _MMIO(0x9888), 0x100f03a0 },
2016 { _MMIO(0x9888), 0x0c0ff000 },
2017 { _MMIO(0x9888), 0x0e0f0095 },
2018 { _MMIO(0x9888), 0x062c8000 },
2019 { _MMIO(0x9888), 0x082c8000 },
2020 { _MMIO(0x9888), 0x0a2c8000 },
2021 { _MMIO(0x9888), 0x0c2d8000 },
2022 { _MMIO(0x9888), 0x0e2d4000 },
2023 { _MMIO(0x9888), 0x062d4000 },
2024 { _MMIO(0x9888), 0x02108000 },
2025 { _MMIO(0x9888), 0x0410c000 },
2026 { _MMIO(0x9888), 0x02118000 },
2027 { _MMIO(0x9888), 0x0411c000 },
2028 { _MMIO(0x9888), 0x02121880 },
2029 { _MMIO(0x9888), 0x041219b5 },
2030 { _MMIO(0x9888), 0x00120000 },
2031 { _MMIO(0x9888), 0x02134000 },
2032 { _MMIO(0x9888), 0x04135000 },
2033 { _MMIO(0x9888), 0x0c308000 },
2034 { _MMIO(0x9888), 0x0e304000 },
2035 { _MMIO(0x9888), 0x06304000 },
2036 { _MMIO(0x9888), 0x0c318000 },
2037 { _MMIO(0x9888), 0x0e314000 },
2038 { _MMIO(0x9888), 0x06314000 },
2039 { _MMIO(0x9888), 0x0c321a80 },
2040 { _MMIO(0x9888), 0x0e320033 },
2041 { _MMIO(0x9888), 0x06320031 },
2042 { _MMIO(0x9888), 0x00320000 },
2043 { _MMIO(0x9888), 0x0c334000 },
2044 { _MMIO(0x9888), 0x0e331000 },
2045 { _MMIO(0x9888), 0x06331000 },
2046 { _MMIO(0x9888), 0x0e508000 },
2047 { _MMIO(0x9888), 0x00508000 },
2048 { _MMIO(0x9888), 0x02504000 },
2049 { _MMIO(0x9888), 0x0e518000 },
2050 { _MMIO(0x9888), 0x00518000 },
2051 { _MMIO(0x9888), 0x02514000 },
2052 { _MMIO(0x9888), 0x0e521880 },
2053 { _MMIO(0x9888), 0x00521a80 },
2054 { _MMIO(0x9888), 0x02520033 },
2055 { _MMIO(0x9888), 0x0e534000 },
2056 { _MMIO(0x9888), 0x00534000 },
2057 { _MMIO(0x9888), 0x02531000 },
2058 { _MMIO(0x9888), 0x1190ff80 },
2059 { _MMIO(0x9888), 0x57900000 },
2060 { _MMIO(0x9888), 0x49900800 },
2061 { _MMIO(0x9888), 0x37900000 },
2062 { _MMIO(0x9888), 0x33900000 },
2063 { _MMIO(0x9888), 0x4b900062 },
2064 { _MMIO(0x9888), 0x59900000 },
2065 { _MMIO(0x9888), 0x51900000 },
2066 { _MMIO(0x9888), 0x41900c00 },
2067 { _MMIO(0x9888), 0x43900003 },
2068 { _MMIO(0x9888), 0x53900000 },
2069 { _MMIO(0x9888), 0x45900040 },
2070};
2071
2072static int
2073get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
2074 const struct i915_oa_reg **regs,
2075 int *lens)
2076{
2077 int n = 0;
2078
2079 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2080 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2081
2082 regs[n] = mux_config_tdl_1;
2083 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
2084 n++;
2085
2086 return n;
2087}
2088
2089static const struct i915_oa_reg b_counter_config_tdl_2[] = {
2090 { _MMIO(0x2740), 0x00000000 },
2091 { _MMIO(0x2744), 0x00800000 },
2092 { _MMIO(0x2710), 0x00000000 },
2093 { _MMIO(0x2714), 0x00800000 },
2094 { _MMIO(0x2720), 0x00000000 },
2095 { _MMIO(0x2724), 0x00800000 },
2096};
2097
2098static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
2099 { _MMIO(0xe458), 0x00005004 },
2100 { _MMIO(0xe558), 0x00010003 },
2101 { _MMIO(0xe658), 0x00012011 },
2102 { _MMIO(0xe758), 0x00015014 },
2103 { _MMIO(0xe45c), 0x00051050 },
2104 { _MMIO(0xe55c), 0x00053052 },
2105 { _MMIO(0xe65c), 0x00055054 },
2106};
2107
2108static const struct i915_oa_reg mux_config_tdl_2[] = {
2109 { _MMIO(0x9888), 0x12124d60 },
2110 { _MMIO(0x9888), 0x12322e60 },
2111 { _MMIO(0x9888), 0x12524d60 },
2112 { _MMIO(0x9888), 0x022f3000 },
2113 { _MMIO(0x9888), 0x0a4c0014 },
2114 { _MMIO(0x9888), 0x000d8000 },
2115 { _MMIO(0x9888), 0x020da000 },
2116 { _MMIO(0x9888), 0x040da000 },
2117 { _MMIO(0x9888), 0x060d2000 },
2118 { _MMIO(0x9888), 0x0c0fe000 },
2119 { _MMIO(0x9888), 0x0e0f0097 },
2120 { _MMIO(0x9888), 0x082c8000 },
2121 { _MMIO(0x9888), 0x0a2c8000 },
2122 { _MMIO(0x9888), 0x002d8000 },
2123 { _MMIO(0x9888), 0x062d4000 },
2124 { _MMIO(0x9888), 0x0410c000 },
2125 { _MMIO(0x9888), 0x0411c000 },
2126 { _MMIO(0x9888), 0x04121fb7 },
2127 { _MMIO(0x9888), 0x00120000 },
2128 { _MMIO(0x9888), 0x04135000 },
2129 { _MMIO(0x9888), 0x00308000 },
2130 { _MMIO(0x9888), 0x06304000 },
2131 { _MMIO(0x9888), 0x00318000 },
2132 { _MMIO(0x9888), 0x06314000 },
2133 { _MMIO(0x9888), 0x00321b80 },
2134 { _MMIO(0x9888), 0x0632003f },
2135 { _MMIO(0x9888), 0x00334000 },
2136 { _MMIO(0x9888), 0x06331000 },
2137 { _MMIO(0x9888), 0x0250c000 },
2138 { _MMIO(0x9888), 0x0251c000 },
2139 { _MMIO(0x9888), 0x02521fb7 },
2140 { _MMIO(0x9888), 0x00520000 },
2141 { _MMIO(0x9888), 0x02535000 },
2142 { _MMIO(0x9888), 0x1190fc00 },
2143 { _MMIO(0x9888), 0x37900000 },
2144 { _MMIO(0x9888), 0x51900000 },
2145 { _MMIO(0x9888), 0x41900800 },
2146 { _MMIO(0x9888), 0x43900063 },
2147 { _MMIO(0x9888), 0x53900000 },
2148 { _MMIO(0x9888), 0x45900040 },
2149 { _MMIO(0x9888), 0x33900000 },
2150};
2151
2152static int
2153get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
2154 const struct i915_oa_reg **regs,
2155 int *lens)
2156{
2157 int n = 0;
2158
2159 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2160 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2161
2162 regs[n] = mux_config_tdl_2;
2163 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
2164 n++;
2165
2166 return n;
2167}
2168
2169static const struct i915_oa_reg b_counter_config_compute_extra[] = {
2170 { _MMIO(0x2740), 0x00000000 },
2171 { _MMIO(0x2744), 0x00800000 },
2172 { _MMIO(0x2710), 0x00000000 },
2173 { _MMIO(0x2714), 0x00800000 },
2174 { _MMIO(0x2720), 0x00000000 },
2175 { _MMIO(0x2724), 0x00800000 },
2176};
2177
2178static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
2179 { _MMIO(0xe458), 0x00001000 },
2180 { _MMIO(0xe558), 0x00003002 },
2181 { _MMIO(0xe658), 0x00005004 },
2182 { _MMIO(0xe758), 0x00011010 },
2183 { _MMIO(0xe45c), 0x00050012 },
2184 { _MMIO(0xe55c), 0x00052051 },
2185 { _MMIO(0xe65c), 0x00000008 },
2186};
2187
2188static const struct i915_oa_reg mux_config_compute_extra[] = {
2189 { _MMIO(0x9888), 0x121203e0 },
2190 { _MMIO(0x9888), 0x123203e0 },
2191 { _MMIO(0x9888), 0x125203e0 },
2192 { _MMIO(0x9888), 0x022f4000 },
2193 { _MMIO(0x9888), 0x0a4c0040 },
2194 { _MMIO(0x9888), 0x040da000 },
2195 { _MMIO(0x9888), 0x060d2000 },
2196 { _MMIO(0x9888), 0x0e0f006c },
2197 { _MMIO(0x9888), 0x0c2c8000 },
2198 { _MMIO(0x9888), 0x042d8000 },
2199 { _MMIO(0x9888), 0x06104000 },
2200 { _MMIO(0x9888), 0x06114000 },
2201 { _MMIO(0x9888), 0x06120033 },
2202 { _MMIO(0x9888), 0x00120000 },
2203 { _MMIO(0x9888), 0x06131000 },
2204 { _MMIO(0x9888), 0x04308000 },
2205 { _MMIO(0x9888), 0x04318000 },
2206 { _MMIO(0x9888), 0x04321980 },
2207 { _MMIO(0x9888), 0x00320000 },
2208 { _MMIO(0x9888), 0x04334000 },
2209 { _MMIO(0x9888), 0x04504000 },
2210 { _MMIO(0x9888), 0x04514000 },
2211 { _MMIO(0x9888), 0x04520033 },
2212 { _MMIO(0x9888), 0x00520000 },
2213 { _MMIO(0x9888), 0x04531000 },
2214 { _MMIO(0x9888), 0x1190e000 },
2215 { _MMIO(0x9888), 0x37900000 },
2216 { _MMIO(0x9888), 0x53900000 },
2217 { _MMIO(0x9888), 0x43900c00 },
2218 { _MMIO(0x9888), 0x45900002 },
2219 { _MMIO(0x9888), 0x33900000 },
2220};
2221
2222static int
2223get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
2224 const struct i915_oa_reg **regs,
2225 int *lens)
2226{
2227 int n = 0;
2228
2229 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2230 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2231
2232 regs[n] = mux_config_compute_extra;
2233 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
2234 n++;
2235
2236 return n;
2237}
2238
2239static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
2240 { _MMIO(0x2740), 0x00000000 },
2241 { _MMIO(0x2710), 0x00000000 },
2242 { _MMIO(0x2714), 0xf0800000 },
2243 { _MMIO(0x2720), 0x00000000 },
2244 { _MMIO(0x2724), 0x30800000 },
2245 { _MMIO(0x2770), 0x00100030 },
2246 { _MMIO(0x2774), 0x0000fff9 },
2247 { _MMIO(0x2778), 0x00000002 },
2248 { _MMIO(0x277c), 0x0000fffc },
2249 { _MMIO(0x2780), 0x00000002 },
2250 { _MMIO(0x2784), 0x0000fff3 },
2251 { _MMIO(0x2788), 0x00100180 },
2252 { _MMIO(0x278c), 0x0000ffcf },
2253 { _MMIO(0x2790), 0x00000002 },
2254 { _MMIO(0x2794), 0x0000ffcf },
2255 { _MMIO(0x2798), 0x00000002 },
2256 { _MMIO(0x279c), 0x0000ff3f },
2257};
2258
2259static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
2260 { _MMIO(0xe458), 0x00005004 },
2261 { _MMIO(0xe558), 0x00008003 },
2262};
2263
2264static const struct i915_oa_reg mux_config_vme_pipe[] = {
2265 { _MMIO(0x9888), 0x141a5800 },
2266 { _MMIO(0x9888), 0x161a00c0 },
2267 { _MMIO(0x9888), 0x12180240 },
2268 { _MMIO(0x9888), 0x14180002 },
2269 { _MMIO(0x9888), 0x143a5800 },
2270 { _MMIO(0x9888), 0x163a00c0 },
2271 { _MMIO(0x9888), 0x12380240 },
2272 { _MMIO(0x9888), 0x14380002 },
2273 { _MMIO(0x9888), 0x002f1000 },
2274 { _MMIO(0x9888), 0x022f8000 },
2275 { _MMIO(0x9888), 0x042f3000 },
2276 { _MMIO(0x9888), 0x004c4000 },
2277 { _MMIO(0x9888), 0x0a4c1500 },
2278 { _MMIO(0x9888), 0x000d2000 },
2279 { _MMIO(0x9888), 0x060d8000 },
2280 { _MMIO(0x9888), 0x080da000 },
2281 { _MMIO(0x9888), 0x0a0da000 },
2282 { _MMIO(0x9888), 0x0c0da000 },
2283 { _MMIO(0x9888), 0x0c0f0400 },
2284 { _MMIO(0x9888), 0x0e0f9500 },
2285 { _MMIO(0x9888), 0x100f002a },
2286 { _MMIO(0x9888), 0x002c8000 },
2287 { _MMIO(0x9888), 0x0e2c8000 },
2288 { _MMIO(0x9888), 0x162c0a00 },
2289 { _MMIO(0x9888), 0x0a2dc000 },
2290 { _MMIO(0x9888), 0x0c2dc000 },
2291 { _MMIO(0x9888), 0x04193000 },
2292 { _MMIO(0x9888), 0x081a28c1 },
2293 { _MMIO(0x9888), 0x001a0000 },
2294 { _MMIO(0x9888), 0x00133000 },
2295 { _MMIO(0x9888), 0x0613c000 },
2296 { _MMIO(0x9888), 0x0813f000 },
2297 { _MMIO(0x9888), 0x00172000 },
2298 { _MMIO(0x9888), 0x06178000 },
2299 { _MMIO(0x9888), 0x0817a000 },
2300 { _MMIO(0x9888), 0x00180037 },
2301 { _MMIO(0x9888), 0x06180940 },
2302 { _MMIO(0x9888), 0x08180000 },
2303 { _MMIO(0x9888), 0x02180000 },
2304 { _MMIO(0x9888), 0x04183000 },
2305 { _MMIO(0x9888), 0x06393000 },
2306 { _MMIO(0x9888), 0x0c3a28c1 },
2307 { _MMIO(0x9888), 0x003a0000 },
2308 { _MMIO(0x9888), 0x0a33f000 },
2309 { _MMIO(0x9888), 0x0c33f000 },
2310 { _MMIO(0x9888), 0x0a37a000 },
2311 { _MMIO(0x9888), 0x0c37a000 },
2312 { _MMIO(0x9888), 0x0a380977 },
2313 { _MMIO(0x9888), 0x08380000 },
2314 { _MMIO(0x9888), 0x04380000 },
2315 { _MMIO(0x9888), 0x06383000 },
2316 { _MMIO(0x9888), 0x119000ff },
2317 { _MMIO(0x9888), 0x51900000 },
2318 { _MMIO(0x9888), 0x41900040 },
2319 { _MMIO(0x9888), 0x55900000 },
2320 { _MMIO(0x9888), 0x45900800 },
2321 { _MMIO(0x9888), 0x47901000 },
2322 { _MMIO(0x9888), 0x57900000 },
2323 { _MMIO(0x9888), 0x49900844 },
2324 { _MMIO(0x9888), 0x37900000 },
2325 { _MMIO(0x9888), 0x33900000 },
2326};
2327
2328static int
2329get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
2330 const struct i915_oa_reg **regs,
2331 int *lens)
2332{
2333 int n = 0;
2334
2335 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2336 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2337
2338 regs[n] = mux_config_vme_pipe;
2339 lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
2340 n++;
2341
2342 return n;
2343}
2344
2345static const struct i915_oa_reg b_counter_config_test_oa[] = {
2346 { _MMIO(0x2740), 0x00000000 },
2347 { _MMIO(0x2714), 0xf0800000 },
2348 { _MMIO(0x2710), 0x00000000 },
2349 { _MMIO(0x2724), 0xf0800000 },
2350 { _MMIO(0x2720), 0x00000000 },
2351 { _MMIO(0x2770), 0x00000004 },
2352 { _MMIO(0x2774), 0x00000000 },
2353 { _MMIO(0x2778), 0x00000003 },
2354 { _MMIO(0x277c), 0x00000000 },
2355 { _MMIO(0x2780), 0x00000007 },
2356 { _MMIO(0x2784), 0x00000000 },
2357 { _MMIO(0x2788), 0x00100002 },
2358 { _MMIO(0x278c), 0x0000fff7 },
2359 { _MMIO(0x2790), 0x00100002 },
2360 { _MMIO(0x2794), 0x0000ffcf },
2361 { _MMIO(0x2798), 0x00100082 },
2362 { _MMIO(0x279c), 0x0000ffef },
2363 { _MMIO(0x27a0), 0x001000c2 },
2364 { _MMIO(0x27a4), 0x0000ffe7 },
2365 { _MMIO(0x27a8), 0x00100001 },
2366 { _MMIO(0x27ac), 0x0000ffe7 },
2367};
2368
2369static const struct i915_oa_reg flex_eu_config_test_oa[] = {
2370};
2371
2372static const struct i915_oa_reg mux_config_test_oa[] = {
2373 { _MMIO(0x9888), 0x11810000 },
2374 { _MMIO(0x9888), 0x07810016 },
2375 { _MMIO(0x9888), 0x1f810000 },
2376 { _MMIO(0x9888), 0x1d810000 },
2377 { _MMIO(0x9888), 0x1b930040 },
2378 { _MMIO(0x9888), 0x07e54000 },
2379 { _MMIO(0x9888), 0x1f908000 },
2380 { _MMIO(0x9888), 0x11900000 },
2381 { _MMIO(0x9888), 0x37900000 },
2382 { _MMIO(0x9888), 0x53900000 },
2383 { _MMIO(0x9888), 0x45900000 },
2384 { _MMIO(0x9888), 0x33900000 },
2385};
2386
2387static int
2388get_test_oa_mux_config(struct drm_i915_private *dev_priv,
2389 const struct i915_oa_reg **regs,
2390 int *lens)
2391{
2392 int n = 0;
2393
2394 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2395 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2396
2397 regs[n] = mux_config_test_oa;
2398 lens[n] = ARRAY_SIZE(mux_config_test_oa);
2399 n++;
2400
2401 return n;
2402}
2403
2404int i915_oa_select_metric_set_sklgt2(struct drm_i915_private *dev_priv)
2405{
2406 dev_priv->perf.oa.n_mux_configs = 0;
2407 dev_priv->perf.oa.b_counter_regs = NULL;
2408 dev_priv->perf.oa.b_counter_regs_len = 0;
2409 dev_priv->perf.oa.flex_regs = NULL;
2410 dev_priv->perf.oa.flex_regs_len = 0;
2411
2412 switch (dev_priv->perf.oa.metrics_set) {
2413 case METRIC_SET_ID_RENDER_BASIC:
2414 dev_priv->perf.oa.n_mux_configs =
2415 get_render_basic_mux_config(dev_priv,
2416 dev_priv->perf.oa.mux_regs,
2417 dev_priv->perf.oa.mux_regs_lens);
2418 if (dev_priv->perf.oa.n_mux_configs == 0) {
2419 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
2420
2421 /* EINVAL because *_register_sysfs already checked this
2422 * and so it wouldn't have been advertised to userspace and
2423 * so shouldn't have been requested
2424 */
2425 return -EINVAL;
2426 }
2427
2428 dev_priv->perf.oa.b_counter_regs =
2429 b_counter_config_render_basic;
2430 dev_priv->perf.oa.b_counter_regs_len =
2431 ARRAY_SIZE(b_counter_config_render_basic);
2432
2433 dev_priv->perf.oa.flex_regs =
2434 flex_eu_config_render_basic;
2435 dev_priv->perf.oa.flex_regs_len =
2436 ARRAY_SIZE(flex_eu_config_render_basic);
2437
2438 return 0;
2439 case METRIC_SET_ID_COMPUTE_BASIC:
2440 dev_priv->perf.oa.n_mux_configs =
2441 get_compute_basic_mux_config(dev_priv,
2442 dev_priv->perf.oa.mux_regs,
2443 dev_priv->perf.oa.mux_regs_lens);
2444 if (dev_priv->perf.oa.n_mux_configs == 0) {
2445 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
2446
2447 /* EINVAL because *_register_sysfs already checked this
2448 * and so it wouldn't have been advertised to userspace and
2449 * so shouldn't have been requested
2450 */
2451 return -EINVAL;
2452 }
2453
2454 dev_priv->perf.oa.b_counter_regs =
2455 b_counter_config_compute_basic;
2456 dev_priv->perf.oa.b_counter_regs_len =
2457 ARRAY_SIZE(b_counter_config_compute_basic);
2458
2459 dev_priv->perf.oa.flex_regs =
2460 flex_eu_config_compute_basic;
2461 dev_priv->perf.oa.flex_regs_len =
2462 ARRAY_SIZE(flex_eu_config_compute_basic);
2463
2464 return 0;
2465 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
2466 dev_priv->perf.oa.n_mux_configs =
2467 get_render_pipe_profile_mux_config(dev_priv,
2468 dev_priv->perf.oa.mux_regs,
2469 dev_priv->perf.oa.mux_regs_lens);
2470 if (dev_priv->perf.oa.n_mux_configs == 0) {
2471 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
2472
2473 /* EINVAL because *_register_sysfs already checked this
2474 * and so it wouldn't have been advertised to userspace and
2475 * so shouldn't have been requested
2476 */
2477 return -EINVAL;
2478 }
2479
2480 dev_priv->perf.oa.b_counter_regs =
2481 b_counter_config_render_pipe_profile;
2482 dev_priv->perf.oa.b_counter_regs_len =
2483 ARRAY_SIZE(b_counter_config_render_pipe_profile);
2484
2485 dev_priv->perf.oa.flex_regs =
2486 flex_eu_config_render_pipe_profile;
2487 dev_priv->perf.oa.flex_regs_len =
2488 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
2489
2490 return 0;
2491 case METRIC_SET_ID_MEMORY_READS:
2492 dev_priv->perf.oa.n_mux_configs =
2493 get_memory_reads_mux_config(dev_priv,
2494 dev_priv->perf.oa.mux_regs,
2495 dev_priv->perf.oa.mux_regs_lens);
2496 if (dev_priv->perf.oa.n_mux_configs == 0) {
2497 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
2498
2499 /* EINVAL because *_register_sysfs already checked this
2500 * and so it wouldn't have been advertised to userspace and
2501 * so shouldn't have been requested
2502 */
2503 return -EINVAL;
2504 }
2505
2506 dev_priv->perf.oa.b_counter_regs =
2507 b_counter_config_memory_reads;
2508 dev_priv->perf.oa.b_counter_regs_len =
2509 ARRAY_SIZE(b_counter_config_memory_reads);
2510
2511 dev_priv->perf.oa.flex_regs =
2512 flex_eu_config_memory_reads;
2513 dev_priv->perf.oa.flex_regs_len =
2514 ARRAY_SIZE(flex_eu_config_memory_reads);
2515
2516 return 0;
2517 case METRIC_SET_ID_MEMORY_WRITES:
2518 dev_priv->perf.oa.n_mux_configs =
2519 get_memory_writes_mux_config(dev_priv,
2520 dev_priv->perf.oa.mux_regs,
2521 dev_priv->perf.oa.mux_regs_lens);
2522 if (dev_priv->perf.oa.n_mux_configs == 0) {
2523 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
2524
2525 /* EINVAL because *_register_sysfs already checked this
2526 * and so it wouldn't have been advertised to userspace and
2527 * so shouldn't have been requested
2528 */
2529 return -EINVAL;
2530 }
2531
2532 dev_priv->perf.oa.b_counter_regs =
2533 b_counter_config_memory_writes;
2534 dev_priv->perf.oa.b_counter_regs_len =
2535 ARRAY_SIZE(b_counter_config_memory_writes);
2536
2537 dev_priv->perf.oa.flex_regs =
2538 flex_eu_config_memory_writes;
2539 dev_priv->perf.oa.flex_regs_len =
2540 ARRAY_SIZE(flex_eu_config_memory_writes);
2541
2542 return 0;
2543 case METRIC_SET_ID_COMPUTE_EXTENDED:
2544 dev_priv->perf.oa.n_mux_configs =
2545 get_compute_extended_mux_config(dev_priv,
2546 dev_priv->perf.oa.mux_regs,
2547 dev_priv->perf.oa.mux_regs_lens);
2548 if (dev_priv->perf.oa.n_mux_configs == 0) {
2549 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
2550
2551 /* EINVAL because *_register_sysfs already checked this
2552 * and so it wouldn't have been advertised to userspace and
2553 * so shouldn't have been requested
2554 */
2555 return -EINVAL;
2556 }
2557
2558 dev_priv->perf.oa.b_counter_regs =
2559 b_counter_config_compute_extended;
2560 dev_priv->perf.oa.b_counter_regs_len =
2561 ARRAY_SIZE(b_counter_config_compute_extended);
2562
2563 dev_priv->perf.oa.flex_regs =
2564 flex_eu_config_compute_extended;
2565 dev_priv->perf.oa.flex_regs_len =
2566 ARRAY_SIZE(flex_eu_config_compute_extended);
2567
2568 return 0;
2569 case METRIC_SET_ID_COMPUTE_L3_CACHE:
2570 dev_priv->perf.oa.n_mux_configs =
2571 get_compute_l3_cache_mux_config(dev_priv,
2572 dev_priv->perf.oa.mux_regs,
2573 dev_priv->perf.oa.mux_regs_lens);
2574 if (dev_priv->perf.oa.n_mux_configs == 0) {
2575 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
2576
2577 /* EINVAL because *_register_sysfs already checked this
2578 * and so it wouldn't have been advertised to userspace and
2579 * so shouldn't have been requested
2580 */
2581 return -EINVAL;
2582 }
2583
2584 dev_priv->perf.oa.b_counter_regs =
2585 b_counter_config_compute_l3_cache;
2586 dev_priv->perf.oa.b_counter_regs_len =
2587 ARRAY_SIZE(b_counter_config_compute_l3_cache);
2588
2589 dev_priv->perf.oa.flex_regs =
2590 flex_eu_config_compute_l3_cache;
2591 dev_priv->perf.oa.flex_regs_len =
2592 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
2593
2594 return 0;
2595 case METRIC_SET_ID_HDC_AND_SF:
2596 dev_priv->perf.oa.n_mux_configs =
2597 get_hdc_and_sf_mux_config(dev_priv,
2598 dev_priv->perf.oa.mux_regs,
2599 dev_priv->perf.oa.mux_regs_lens);
2600 if (dev_priv->perf.oa.n_mux_configs == 0) {
2601 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
2602
2603 /* EINVAL because *_register_sysfs already checked this
2604 * and so it wouldn't have been advertised to userspace and
2605 * so shouldn't have been requested
2606 */
2607 return -EINVAL;
2608 }
2609
2610 dev_priv->perf.oa.b_counter_regs =
2611 b_counter_config_hdc_and_sf;
2612 dev_priv->perf.oa.b_counter_regs_len =
2613 ARRAY_SIZE(b_counter_config_hdc_and_sf);
2614
2615 dev_priv->perf.oa.flex_regs =
2616 flex_eu_config_hdc_and_sf;
2617 dev_priv->perf.oa.flex_regs_len =
2618 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2619
2620 return 0;
2621 case METRIC_SET_ID_L3_1:
2622 dev_priv->perf.oa.n_mux_configs =
2623 get_l3_1_mux_config(dev_priv,
2624 dev_priv->perf.oa.mux_regs,
2625 dev_priv->perf.oa.mux_regs_lens);
2626 if (dev_priv->perf.oa.n_mux_configs == 0) {
2627 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2628
2629 /* EINVAL because *_register_sysfs already checked this
2630 * and so it wouldn't have been advertised to userspace and
2631 * so shouldn't have been requested
2632 */
2633 return -EINVAL;
2634 }
2635
2636 dev_priv->perf.oa.b_counter_regs =
2637 b_counter_config_l3_1;
2638 dev_priv->perf.oa.b_counter_regs_len =
2639 ARRAY_SIZE(b_counter_config_l3_1);
2640
2641 dev_priv->perf.oa.flex_regs =
2642 flex_eu_config_l3_1;
2643 dev_priv->perf.oa.flex_regs_len =
2644 ARRAY_SIZE(flex_eu_config_l3_1);
2645
2646 return 0;
2647 case METRIC_SET_ID_L3_2:
2648 dev_priv->perf.oa.n_mux_configs =
2649 get_l3_2_mux_config(dev_priv,
2650 dev_priv->perf.oa.mux_regs,
2651 dev_priv->perf.oa.mux_regs_lens);
2652 if (dev_priv->perf.oa.n_mux_configs == 0) {
2653 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
2654
2655 /* EINVAL because *_register_sysfs already checked this
2656 * and so it wouldn't have been advertised to userspace and
2657 * so shouldn't have been requested
2658 */
2659 return -EINVAL;
2660 }
2661
2662 dev_priv->perf.oa.b_counter_regs =
2663 b_counter_config_l3_2;
2664 dev_priv->perf.oa.b_counter_regs_len =
2665 ARRAY_SIZE(b_counter_config_l3_2);
2666
2667 dev_priv->perf.oa.flex_regs =
2668 flex_eu_config_l3_2;
2669 dev_priv->perf.oa.flex_regs_len =
2670 ARRAY_SIZE(flex_eu_config_l3_2);
2671
2672 return 0;
2673 case METRIC_SET_ID_L3_3:
2674 dev_priv->perf.oa.n_mux_configs =
2675 get_l3_3_mux_config(dev_priv,
2676 dev_priv->perf.oa.mux_regs,
2677 dev_priv->perf.oa.mux_regs_lens);
2678 if (dev_priv->perf.oa.n_mux_configs == 0) {
2679 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
2680
2681 /* EINVAL because *_register_sysfs already checked this
2682 * and so it wouldn't have been advertised to userspace and
2683 * so shouldn't have been requested
2684 */
2685 return -EINVAL;
2686 }
2687
2688 dev_priv->perf.oa.b_counter_regs =
2689 b_counter_config_l3_3;
2690 dev_priv->perf.oa.b_counter_regs_len =
2691 ARRAY_SIZE(b_counter_config_l3_3);
2692
2693 dev_priv->perf.oa.flex_regs =
2694 flex_eu_config_l3_3;
2695 dev_priv->perf.oa.flex_regs_len =
2696 ARRAY_SIZE(flex_eu_config_l3_3);
2697
2698 return 0;
2699 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2700 dev_priv->perf.oa.n_mux_configs =
2701 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2702 dev_priv->perf.oa.mux_regs,
2703 dev_priv->perf.oa.mux_regs_lens);
2704 if (dev_priv->perf.oa.n_mux_configs == 0) {
2705 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2706
2707 /* EINVAL because *_register_sysfs already checked this
2708 * and so it wouldn't have been advertised to userspace and
2709 * so shouldn't have been requested
2710 */
2711 return -EINVAL;
2712 }
2713
2714 dev_priv->perf.oa.b_counter_regs =
2715 b_counter_config_rasterizer_and_pixel_backend;
2716 dev_priv->perf.oa.b_counter_regs_len =
2717 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2718
2719 dev_priv->perf.oa.flex_regs =
2720 flex_eu_config_rasterizer_and_pixel_backend;
2721 dev_priv->perf.oa.flex_regs_len =
2722 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2723
2724 return 0;
2725 case METRIC_SET_ID_SAMPLER:
2726 dev_priv->perf.oa.n_mux_configs =
2727 get_sampler_mux_config(dev_priv,
2728 dev_priv->perf.oa.mux_regs,
2729 dev_priv->perf.oa.mux_regs_lens);
2730 if (dev_priv->perf.oa.n_mux_configs == 0) {
2731 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
2732
2733 /* EINVAL because *_register_sysfs already checked this
2734 * and so it wouldn't have been advertised to userspace and
2735 * so shouldn't have been requested
2736 */
2737 return -EINVAL;
2738 }
2739
2740 dev_priv->perf.oa.b_counter_regs =
2741 b_counter_config_sampler;
2742 dev_priv->perf.oa.b_counter_regs_len =
2743 ARRAY_SIZE(b_counter_config_sampler);
2744
2745 dev_priv->perf.oa.flex_regs =
2746 flex_eu_config_sampler;
2747 dev_priv->perf.oa.flex_regs_len =
2748 ARRAY_SIZE(flex_eu_config_sampler);
2749
2750 return 0;
2751 case METRIC_SET_ID_TDL_1:
2752 dev_priv->perf.oa.n_mux_configs =
2753 get_tdl_1_mux_config(dev_priv,
2754 dev_priv->perf.oa.mux_regs,
2755 dev_priv->perf.oa.mux_regs_lens);
2756 if (dev_priv->perf.oa.n_mux_configs == 0) {
2757 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2758
2759 /* EINVAL because *_register_sysfs already checked this
2760 * and so it wouldn't have been advertised to userspace and
2761 * so shouldn't have been requested
2762 */
2763 return -EINVAL;
2764 }
2765
2766 dev_priv->perf.oa.b_counter_regs =
2767 b_counter_config_tdl_1;
2768 dev_priv->perf.oa.b_counter_regs_len =
2769 ARRAY_SIZE(b_counter_config_tdl_1);
2770
2771 dev_priv->perf.oa.flex_regs =
2772 flex_eu_config_tdl_1;
2773 dev_priv->perf.oa.flex_regs_len =
2774 ARRAY_SIZE(flex_eu_config_tdl_1);
2775
2776 return 0;
2777 case METRIC_SET_ID_TDL_2:
2778 dev_priv->perf.oa.n_mux_configs =
2779 get_tdl_2_mux_config(dev_priv,
2780 dev_priv->perf.oa.mux_regs,
2781 dev_priv->perf.oa.mux_regs_lens);
2782 if (dev_priv->perf.oa.n_mux_configs == 0) {
2783 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2784
2785 /* EINVAL because *_register_sysfs already checked this
2786 * and so it wouldn't have been advertised to userspace and
2787 * so shouldn't have been requested
2788 */
2789 return -EINVAL;
2790 }
2791
2792 dev_priv->perf.oa.b_counter_regs =
2793 b_counter_config_tdl_2;
2794 dev_priv->perf.oa.b_counter_regs_len =
2795 ARRAY_SIZE(b_counter_config_tdl_2);
2796
2797 dev_priv->perf.oa.flex_regs =
2798 flex_eu_config_tdl_2;
2799 dev_priv->perf.oa.flex_regs_len =
2800 ARRAY_SIZE(flex_eu_config_tdl_2);
2801
2802 return 0;
2803 case METRIC_SET_ID_COMPUTE_EXTRA:
2804 dev_priv->perf.oa.n_mux_configs =
2805 get_compute_extra_mux_config(dev_priv,
2806 dev_priv->perf.oa.mux_regs,
2807 dev_priv->perf.oa.mux_regs_lens);
2808 if (dev_priv->perf.oa.n_mux_configs == 0) {
2809 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2810
2811 /* EINVAL because *_register_sysfs already checked this
2812 * and so it wouldn't have been advertised to userspace and
2813 * so shouldn't have been requested
2814 */
2815 return -EINVAL;
2816 }
2817
2818 dev_priv->perf.oa.b_counter_regs =
2819 b_counter_config_compute_extra;
2820 dev_priv->perf.oa.b_counter_regs_len =
2821 ARRAY_SIZE(b_counter_config_compute_extra);
2822
2823 dev_priv->perf.oa.flex_regs =
2824 flex_eu_config_compute_extra;
2825 dev_priv->perf.oa.flex_regs_len =
2826 ARRAY_SIZE(flex_eu_config_compute_extra);
2827
2828 return 0;
2829 case METRIC_SET_ID_VME_PIPE:
2830 dev_priv->perf.oa.n_mux_configs =
2831 get_vme_pipe_mux_config(dev_priv,
2832 dev_priv->perf.oa.mux_regs,
2833 dev_priv->perf.oa.mux_regs_lens);
2834 if (dev_priv->perf.oa.n_mux_configs == 0) {
2835 DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
2836
2837 /* EINVAL because *_register_sysfs already checked this
2838 * and so it wouldn't have been advertised to userspace and
2839 * so shouldn't have been requested
2840 */
2841 return -EINVAL;
2842 }
2843
2844 dev_priv->perf.oa.b_counter_regs =
2845 b_counter_config_vme_pipe;
2846 dev_priv->perf.oa.b_counter_regs_len =
2847 ARRAY_SIZE(b_counter_config_vme_pipe);
2848
2849 dev_priv->perf.oa.flex_regs =
2850 flex_eu_config_vme_pipe;
2851 dev_priv->perf.oa.flex_regs_len =
2852 ARRAY_SIZE(flex_eu_config_vme_pipe);
2853
2854 return 0;
2855 case METRIC_SET_ID_TEST_OA:
2856 dev_priv->perf.oa.n_mux_configs =
2857 get_test_oa_mux_config(dev_priv,
2858 dev_priv->perf.oa.mux_regs,
2859 dev_priv->perf.oa.mux_regs_lens);
2860 if (dev_priv->perf.oa.n_mux_configs == 0) {
2861 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2862
2863 /* EINVAL because *_register_sysfs already checked this
2864 * and so it wouldn't have been advertised to userspace and
2865 * so shouldn't have been requested
2866 */
2867 return -EINVAL;
2868 }
2869
2870 dev_priv->perf.oa.b_counter_regs =
2871 b_counter_config_test_oa;
2872 dev_priv->perf.oa.b_counter_regs_len =
2873 ARRAY_SIZE(b_counter_config_test_oa);
2874
2875 dev_priv->perf.oa.flex_regs =
2876 flex_eu_config_test_oa;
2877 dev_priv->perf.oa.flex_regs_len =
2878 ARRAY_SIZE(flex_eu_config_test_oa);
2879
2880 return 0;
2881 default:
2882 return -ENODEV;
2883 }
2884}
2885
2886static ssize_t
2887show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2888{
2889 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2890}
2891
2892static struct device_attribute dev_attr_render_basic_id = {
2893 .attr = { .name = "id", .mode = 0444 },
2894 .show = show_render_basic_id,
2895 .store = NULL,
2896};
2897
2898static struct attribute *attrs_render_basic[] = {
2899 &dev_attr_render_basic_id.attr,
2900 NULL,
2901};
2902
2903static struct attribute_group group_render_basic = {
2904 .name = "f519e481-24d2-4d42-87c9-3fdd12c00202",
2905 .attrs = attrs_render_basic,
2906};
2907
2908static ssize_t
2909show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2910{
2911 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2912}
2913
2914static struct device_attribute dev_attr_compute_basic_id = {
2915 .attr = { .name = "id", .mode = 0444 },
2916 .show = show_compute_basic_id,
2917 .store = NULL,
2918};
2919
2920static struct attribute *attrs_compute_basic[] = {
2921 &dev_attr_compute_basic_id.attr,
2922 NULL,
2923};
2924
2925static struct attribute_group group_compute_basic = {
2926 .name = "fe47b29d-ae51-423e-bff4-27d965a95b60",
2927 .attrs = attrs_compute_basic,
2928};
2929
2930static ssize_t
2931show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2932{
2933 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2934}
2935
2936static struct device_attribute dev_attr_render_pipe_profile_id = {
2937 .attr = { .name = "id", .mode = 0444 },
2938 .show = show_render_pipe_profile_id,
2939 .store = NULL,
2940};
2941
2942static struct attribute *attrs_render_pipe_profile[] = {
2943 &dev_attr_render_pipe_profile_id.attr,
2944 NULL,
2945};
2946
2947static struct attribute_group group_render_pipe_profile = {
2948 .name = "e0ad5ae0-84ba-4f29-a723-1906c12cb774",
2949 .attrs = attrs_render_pipe_profile,
2950};
2951
2952static ssize_t
2953show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2954{
2955 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2956}
2957
2958static struct device_attribute dev_attr_memory_reads_id = {
2959 .attr = { .name = "id", .mode = 0444 },
2960 .show = show_memory_reads_id,
2961 .store = NULL,
2962};
2963
2964static struct attribute *attrs_memory_reads[] = {
2965 &dev_attr_memory_reads_id.attr,
2966 NULL,
2967};
2968
2969static struct attribute_group group_memory_reads = {
2970 .name = "9bc436dd-6130-4add-affc-283eb6eaa864",
2971 .attrs = attrs_memory_reads,
2972};
2973
2974static ssize_t
2975show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2976{
2977 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2978}
2979
2980static struct device_attribute dev_attr_memory_writes_id = {
2981 .attr = { .name = "id", .mode = 0444 },
2982 .show = show_memory_writes_id,
2983 .store = NULL,
2984};
2985
2986static struct attribute *attrs_memory_writes[] = {
2987 &dev_attr_memory_writes_id.attr,
2988 NULL,
2989};
2990
2991static struct attribute_group group_memory_writes = {
2992 .name = "2ea0da8f-3527-4669-9d9d-13099a7435bf",
2993 .attrs = attrs_memory_writes,
2994};
2995
2996static ssize_t
2997show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2998{
2999 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
3000}
3001
3002static struct device_attribute dev_attr_compute_extended_id = {
3003 .attr = { .name = "id", .mode = 0444 },
3004 .show = show_compute_extended_id,
3005 .store = NULL,
3006};
3007
3008static struct attribute *attrs_compute_extended[] = {
3009 &dev_attr_compute_extended_id.attr,
3010 NULL,
3011};
3012
3013static struct attribute_group group_compute_extended = {
3014 .name = "d97d16af-028b-4cd1-a672-6210cb5513dd",
3015 .attrs = attrs_compute_extended,
3016};
3017
3018static ssize_t
3019show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
3020{
3021 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
3022}
3023
3024static struct device_attribute dev_attr_compute_l3_cache_id = {
3025 .attr = { .name = "id", .mode = 0444 },
3026 .show = show_compute_l3_cache_id,
3027 .store = NULL,
3028};
3029
3030static struct attribute *attrs_compute_l3_cache[] = {
3031 &dev_attr_compute_l3_cache_id.attr,
3032 NULL,
3033};
3034
3035static struct attribute_group group_compute_l3_cache = {
3036 .name = "9fb22842-e708-43f7-9752-e0e41670c39e",
3037 .attrs = attrs_compute_l3_cache,
3038};
3039
3040static ssize_t
3041show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
3042{
3043 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
3044}
3045
3046static struct device_attribute dev_attr_hdc_and_sf_id = {
3047 .attr = { .name = "id", .mode = 0444 },
3048 .show = show_hdc_and_sf_id,
3049 .store = NULL,
3050};
3051
3052static struct attribute *attrs_hdc_and_sf[] = {
3053 &dev_attr_hdc_and_sf_id.attr,
3054 NULL,
3055};
3056
3057static struct attribute_group group_hdc_and_sf = {
3058 .name = "5378e2a1-4248-4188-a4ae-da25a794c603",
3059 .attrs = attrs_hdc_and_sf,
3060};
3061
3062static ssize_t
3063show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
3064{
3065 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
3066}
3067
3068static struct device_attribute dev_attr_l3_1_id = {
3069 .attr = { .name = "id", .mode = 0444 },
3070 .show = show_l3_1_id,
3071 .store = NULL,
3072};
3073
3074static struct attribute *attrs_l3_1[] = {
3075 &dev_attr_l3_1_id.attr,
3076 NULL,
3077};
3078
3079static struct attribute_group group_l3_1 = {
3080 .name = "f42cdd6a-b000-42cb-870f-5eb423a7f514",
3081 .attrs = attrs_l3_1,
3082};
3083
3084static ssize_t
3085show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
3086{
3087 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
3088}
3089
3090static struct device_attribute dev_attr_l3_2_id = {
3091 .attr = { .name = "id", .mode = 0444 },
3092 .show = show_l3_2_id,
3093 .store = NULL,
3094};
3095
3096static struct attribute *attrs_l3_2[] = {
3097 &dev_attr_l3_2_id.attr,
3098 NULL,
3099};
3100
3101static struct attribute_group group_l3_2 = {
3102 .name = "b9bf2423-d88c-4a7b-a051-627611d00dcc",
3103 .attrs = attrs_l3_2,
3104};
3105
3106static ssize_t
3107show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
3108{
3109 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
3110}
3111
3112static struct device_attribute dev_attr_l3_3_id = {
3113 .attr = { .name = "id", .mode = 0444 },
3114 .show = show_l3_3_id,
3115 .store = NULL,
3116};
3117
3118static struct attribute *attrs_l3_3[] = {
3119 &dev_attr_l3_3_id.attr,
3120 NULL,
3121};
3122
3123static struct attribute_group group_l3_3 = {
3124 .name = "2414a93d-d84f-406e-99c0-472161194b40",
3125 .attrs = attrs_l3_3,
3126};
3127
3128static ssize_t
3129show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
3130{
3131 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
3132}
3133
3134static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
3135 .attr = { .name = "id", .mode = 0444 },
3136 .show = show_rasterizer_and_pixel_backend_id,
3137 .store = NULL,
3138};
3139
3140static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
3141 &dev_attr_rasterizer_and_pixel_backend_id.attr,
3142 NULL,
3143};
3144
3145static struct attribute_group group_rasterizer_and_pixel_backend = {
3146 .name = "53a45d2d-170b-4cf5-b7bb-585120c8e2f5",
3147 .attrs = attrs_rasterizer_and_pixel_backend,
3148};
3149
3150static ssize_t
3151show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
3152{
3153 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
3154}
3155
3156static struct device_attribute dev_attr_sampler_id = {
3157 .attr = { .name = "id", .mode = 0444 },
3158 .show = show_sampler_id,
3159 .store = NULL,
3160};
3161
3162static struct attribute *attrs_sampler[] = {
3163 &dev_attr_sampler_id.attr,
3164 NULL,
3165};
3166
3167static struct attribute_group group_sampler = {
3168 .name = "b4cff514-a91e-4798-a0b3-426ca13fc9c1",
3169 .attrs = attrs_sampler,
3170};
3171
3172static ssize_t
3173show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
3174{
3175 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
3176}
3177
3178static struct device_attribute dev_attr_tdl_1_id = {
3179 .attr = { .name = "id", .mode = 0444 },
3180 .show = show_tdl_1_id,
3181 .store = NULL,
3182};
3183
3184static struct attribute *attrs_tdl_1[] = {
3185 &dev_attr_tdl_1_id.attr,
3186 NULL,
3187};
3188
3189static struct attribute_group group_tdl_1 = {
3190 .name = "7821d13b-9b8b-4405-9618-78cd56b62cce",
3191 .attrs = attrs_tdl_1,
3192};
3193
3194static ssize_t
3195show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
3196{
3197 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
3198}
3199
3200static struct device_attribute dev_attr_tdl_2_id = {
3201 .attr = { .name = "id", .mode = 0444 },
3202 .show = show_tdl_2_id,
3203 .store = NULL,
3204};
3205
3206static struct attribute *attrs_tdl_2[] = {
3207 &dev_attr_tdl_2_id.attr,
3208 NULL,
3209};
3210
3211static struct attribute_group group_tdl_2 = {
3212 .name = "893f1a4d-919d-4388-8cb7-746d73ea7259",
3213 .attrs = attrs_tdl_2,
3214};
3215
3216static ssize_t
3217show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
3218{
3219 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
3220}
3221
3222static struct device_attribute dev_attr_compute_extra_id = {
3223 .attr = { .name = "id", .mode = 0444 },
3224 .show = show_compute_extra_id,
3225 .store = NULL,
3226};
3227
3228static struct attribute *attrs_compute_extra[] = {
3229 &dev_attr_compute_extra_id.attr,
3230 NULL,
3231};
3232
3233static struct attribute_group group_compute_extra = {
3234 .name = "41a24047-7484-4ead-ae37-de907e5ff2b2",
3235 .attrs = attrs_compute_extra,
3236};
3237
3238static ssize_t
3239show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
3240{
3241 return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
3242}
3243
3244static struct device_attribute dev_attr_vme_pipe_id = {
3245 .attr = { .name = "id", .mode = 0444 },
3246 .show = show_vme_pipe_id,
3247 .store = NULL,
3248};
3249
3250static struct attribute *attrs_vme_pipe[] = {
3251 &dev_attr_vme_pipe_id.attr,
3252 NULL,
3253};
3254
3255static struct attribute_group group_vme_pipe = {
3256 .name = "95910492-943f-44bd-9461-390240f243fd",
3257 .attrs = attrs_vme_pipe,
3258};
3259
3260static ssize_t
3261show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
3262{
3263 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
3264}
3265
3266static struct device_attribute dev_attr_test_oa_id = {
3267 .attr = { .name = "id", .mode = 0444 },
3268 .show = show_test_oa_id,
3269 .store = NULL,
3270};
3271
3272static struct attribute *attrs_test_oa[] = {
3273 &dev_attr_test_oa_id.attr,
3274 NULL,
3275};
3276
3277static struct attribute_group group_test_oa = {
3278 .name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
3279 .attrs = attrs_test_oa,
3280};
3281
3282int
3283i915_perf_register_sysfs_sklgt2(struct drm_i915_private *dev_priv)
3284{
3285 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
3286 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
3287 int ret = 0;
3288
3289 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
3290 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3291 if (ret)
3292 goto error_render_basic;
3293 }
3294 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
3295 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3296 if (ret)
3297 goto error_compute_basic;
3298 }
3299 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
3300 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3301 if (ret)
3302 goto error_render_pipe_profile;
3303 }
3304 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
3305 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3306 if (ret)
3307 goto error_memory_reads;
3308 }
3309 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
3310 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3311 if (ret)
3312 goto error_memory_writes;
3313 }
3314 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
3315 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3316 if (ret)
3317 goto error_compute_extended;
3318 }
3319 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
3320 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3321 if (ret)
3322 goto error_compute_l3_cache;
3323 }
3324 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
3325 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3326 if (ret)
3327 goto error_hdc_and_sf;
3328 }
3329 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
3330 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3331 if (ret)
3332 goto error_l3_1;
3333 }
3334 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
3335 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3336 if (ret)
3337 goto error_l3_2;
3338 }
3339 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
3340 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3341 if (ret)
3342 goto error_l3_3;
3343 }
3344 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
3345 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3346 if (ret)
3347 goto error_rasterizer_and_pixel_backend;
3348 }
3349 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
3350 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
3351 if (ret)
3352 goto error_sampler;
3353 }
3354 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
3355 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3356 if (ret)
3357 goto error_tdl_1;
3358 }
3359 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
3360 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3361 if (ret)
3362 goto error_tdl_2;
3363 }
3364 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
3365 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3366 if (ret)
3367 goto error_compute_extra;
3368 }
3369 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
3370 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
3371 if (ret)
3372 goto error_vme_pipe;
3373 }
3374 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
3375 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
3376 if (ret)
3377 goto error_test_oa;
3378 }
3379
3380 return 0;
3381
3382error_test_oa:
3383 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
3384 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
3385error_vme_pipe:
3386 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
3387 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3388error_compute_extra:
3389 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
3390 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3391error_tdl_2:
3392 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
3393 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3394error_tdl_1:
3395 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
3396 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
3397error_sampler:
3398 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
3399 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3400error_rasterizer_and_pixel_backend:
3401 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
3402 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3403error_l3_3:
3404 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
3405 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3406error_l3_2:
3407 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
3408 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3409error_l3_1:
3410 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
3411 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3412error_hdc_and_sf:
3413 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
3414 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3415error_compute_l3_cache:
3416 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
3417 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3418error_compute_extended:
3419 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
3420 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3421error_memory_writes:
3422 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
3423 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3424error_memory_reads:
3425 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
3426 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3427error_render_pipe_profile:
3428 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
3429 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3430error_compute_basic:
3431 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
3432 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3433error_render_basic:
3434 return ret;
3435}
3436
3437void
3438i915_perf_unregister_sysfs_sklgt2(struct drm_i915_private *dev_priv)
3439{
3440 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
3441 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
3442
3443 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
3444 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3445 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
3446 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3447 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
3448 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3449 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
3450 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3451 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
3452 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3453 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
3454 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3455 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
3456 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3457 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
3458 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3459 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
3460 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3461 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
3462 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3463 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
3464 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3465 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
3466 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3467 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
3468 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
3469 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
3470 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3471 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
3472 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3473 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
3474 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3475 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
3476 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
3477 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
3478 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
3479}
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
new file mode 100644
index 000000000000..f4397baf3328
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_SKLGT2_H__
30#define __I915_OA_SKLGT2_H__
31
32extern int i915_oa_n_builtin_metric_sets_sklgt2;
33
34extern int i915_oa_select_metric_set_sklgt2(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_sklgt2(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_sklgt2(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
new file mode 100644
index 000000000000..7765e22dfa17
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -0,0 +1,3039 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_sklgt3.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_L3_2,
45 METRIC_SET_ID_L3_3,
46 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
47 METRIC_SET_ID_SAMPLER,
48 METRIC_SET_ID_TDL_1,
49 METRIC_SET_ID_TDL_2,
50 METRIC_SET_ID_COMPUTE_EXTRA,
51 METRIC_SET_ID_VME_PIPE,
52 METRIC_SET_ID_TEST_OA,
53};
54
55int i915_oa_n_builtin_metric_sets_sklgt3 = 18;
56
57static const struct i915_oa_reg b_counter_config_render_basic[] = {
58 { _MMIO(0x2710), 0x00000000 },
59 { _MMIO(0x2714), 0x00800000 },
60 { _MMIO(0x2720), 0x00000000 },
61 { _MMIO(0x2724), 0x00800000 },
62 { _MMIO(0x2740), 0x00000000 },
63};
64
65static const struct i915_oa_reg flex_eu_config_render_basic[] = {
66 { _MMIO(0xe458), 0x00005004 },
67 { _MMIO(0xe558), 0x00010003 },
68 { _MMIO(0xe658), 0x00012011 },
69 { _MMIO(0xe758), 0x00015014 },
70 { _MMIO(0xe45c), 0x00051050 },
71 { _MMIO(0xe55c), 0x00053052 },
72 { _MMIO(0xe65c), 0x00055054 },
73};
74
75static const struct i915_oa_reg mux_config_render_basic[] = {
76 { _MMIO(0x9888), 0x166c01e0 },
77 { _MMIO(0x9888), 0x12170280 },
78 { _MMIO(0x9888), 0x12370280 },
79 { _MMIO(0x9888), 0x16ec01e0 },
80 { _MMIO(0x9888), 0x11930317 },
81 { _MMIO(0x9888), 0x159303df },
82 { _MMIO(0x9888), 0x3f900003 },
83 { _MMIO(0x9888), 0x1a4e0380 },
84 { _MMIO(0x9888), 0x0a6c0053 },
85 { _MMIO(0x9888), 0x106c0000 },
86 { _MMIO(0x9888), 0x1c6c0000 },
87 { _MMIO(0x9888), 0x0a1b4000 },
88 { _MMIO(0x9888), 0x1c1c0001 },
89 { _MMIO(0x9888), 0x002f1000 },
90 { _MMIO(0x9888), 0x042f1000 },
91 { _MMIO(0x9888), 0x004c4000 },
92 { _MMIO(0x9888), 0x0a4c8400 },
93 { _MMIO(0x9888), 0x0c4c0002 },
94 { _MMIO(0x9888), 0x000d2000 },
95 { _MMIO(0x9888), 0x060d8000 },
96 { _MMIO(0x9888), 0x080da000 },
97 { _MMIO(0x9888), 0x0a0da000 },
98 { _MMIO(0x9888), 0x0c0f0400 },
99 { _MMIO(0x9888), 0x0e0f6600 },
100 { _MMIO(0x9888), 0x100f0001 },
101 { _MMIO(0x9888), 0x002c8000 },
102 { _MMIO(0x9888), 0x162ca200 },
103 { _MMIO(0x9888), 0x062d8000 },
104 { _MMIO(0x9888), 0x082d8000 },
105 { _MMIO(0x9888), 0x00133000 },
106 { _MMIO(0x9888), 0x08133000 },
107 { _MMIO(0x9888), 0x00170020 },
108 { _MMIO(0x9888), 0x08170021 },
109 { _MMIO(0x9888), 0x10170000 },
110 { _MMIO(0x9888), 0x0633c000 },
111 { _MMIO(0x9888), 0x0833c000 },
112 { _MMIO(0x9888), 0x06370800 },
113 { _MMIO(0x9888), 0x08370840 },
114 { _MMIO(0x9888), 0x10370000 },
115 { _MMIO(0x9888), 0x1ace0200 },
116 { _MMIO(0x9888), 0x0aec5300 },
117 { _MMIO(0x9888), 0x10ec0000 },
118 { _MMIO(0x9888), 0x1cec0000 },
119 { _MMIO(0x9888), 0x0a9b8000 },
120 { _MMIO(0x9888), 0x1c9c0002 },
121 { _MMIO(0x9888), 0x0ccc0002 },
122 { _MMIO(0x9888), 0x0a8d8000 },
123 { _MMIO(0x9888), 0x108f0001 },
124 { _MMIO(0x9888), 0x16ac8000 },
125 { _MMIO(0x9888), 0x0d933031 },
126 { _MMIO(0x9888), 0x0f933e3f },
127 { _MMIO(0x9888), 0x01933d00 },
128 { _MMIO(0x9888), 0x0393073c },
129 { _MMIO(0x9888), 0x0593000e },
130 { _MMIO(0x9888), 0x1d930000 },
131 { _MMIO(0x9888), 0x19930000 },
132 { _MMIO(0x9888), 0x1b930000 },
133 { _MMIO(0x9888), 0x1d900157 },
134 { _MMIO(0x9888), 0x1f900158 },
135 { _MMIO(0x9888), 0x35900000 },
136 { _MMIO(0x9888), 0x2b908000 },
137 { _MMIO(0x9888), 0x2d908000 },
138 { _MMIO(0x9888), 0x2f908000 },
139 { _MMIO(0x9888), 0x31908000 },
140 { _MMIO(0x9888), 0x15908000 },
141 { _MMIO(0x9888), 0x17908000 },
142 { _MMIO(0x9888), 0x19908000 },
143 { _MMIO(0x9888), 0x1b908000 },
144 { _MMIO(0x9888), 0x1190003f },
145 { _MMIO(0x9888), 0x51907710 },
146 { _MMIO(0x9888), 0x419020a0 },
147 { _MMIO(0x9888), 0x55901515 },
148 { _MMIO(0x9888), 0x45900529 },
149 { _MMIO(0x9888), 0x47901025 },
150 { _MMIO(0x9888), 0x57907770 },
151 { _MMIO(0x9888), 0x49902100 },
152 { _MMIO(0x9888), 0x37900000 },
153 { _MMIO(0x9888), 0x33900000 },
154 { _MMIO(0x9888), 0x4b900108 },
155 { _MMIO(0x9888), 0x59900007 },
156 { _MMIO(0x9888), 0x43902108 },
157 { _MMIO(0x9888), 0x53907777 },
158};
159
160static int
161get_render_basic_mux_config(struct drm_i915_private *dev_priv,
162 const struct i915_oa_reg **regs,
163 int *lens)
164{
165 int n = 0;
166
167 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
168 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
169
170 regs[n] = mux_config_render_basic;
171 lens[n] = ARRAY_SIZE(mux_config_render_basic);
172 n++;
173
174 return n;
175}
176
177static const struct i915_oa_reg b_counter_config_compute_basic[] = {
178 { _MMIO(0x2710), 0x00000000 },
179 { _MMIO(0x2714), 0x00800000 },
180 { _MMIO(0x2720), 0x00000000 },
181 { _MMIO(0x2724), 0x00800000 },
182 { _MMIO(0x2740), 0x00000000 },
183};
184
185static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
186 { _MMIO(0xe458), 0x00005004 },
187 { _MMIO(0xe558), 0x00000003 },
188 { _MMIO(0xe658), 0x00002001 },
189 { _MMIO(0xe758), 0x00778008 },
190 { _MMIO(0xe45c), 0x00088078 },
191 { _MMIO(0xe55c), 0x00808708 },
192 { _MMIO(0xe65c), 0x00a08908 },
193};
194
195static const struct i915_oa_reg mux_config_compute_basic[] = {
196 { _MMIO(0x9888), 0x104f00e0 },
197 { _MMIO(0x9888), 0x124f1c00 },
198 { _MMIO(0x9888), 0x106c00e0 },
199 { _MMIO(0x9888), 0x37906800 },
200 { _MMIO(0x9888), 0x3f900003 },
201 { _MMIO(0x9888), 0x004e8000 },
202 { _MMIO(0x9888), 0x1a4e0820 },
203 { _MMIO(0x9888), 0x1c4e0002 },
204 { _MMIO(0x9888), 0x064f0900 },
205 { _MMIO(0x9888), 0x084f0032 },
206 { _MMIO(0x9888), 0x0a4f1891 },
207 { _MMIO(0x9888), 0x0c4f0e00 },
208 { _MMIO(0x9888), 0x0e4f003c },
209 { _MMIO(0x9888), 0x004f0d80 },
210 { _MMIO(0x9888), 0x024f003b },
211 { _MMIO(0x9888), 0x006c0002 },
212 { _MMIO(0x9888), 0x086c0100 },
213 { _MMIO(0x9888), 0x0c6c000c },
214 { _MMIO(0x9888), 0x0e6c0b00 },
215 { _MMIO(0x9888), 0x186c0000 },
216 { _MMIO(0x9888), 0x1c6c0000 },
217 { _MMIO(0x9888), 0x1e6c0000 },
218 { _MMIO(0x9888), 0x001b4000 },
219 { _MMIO(0x9888), 0x081b8000 },
220 { _MMIO(0x9888), 0x0c1b4000 },
221 { _MMIO(0x9888), 0x0e1b8000 },
222 { _MMIO(0x9888), 0x101c8000 },
223 { _MMIO(0x9888), 0x1a1c8000 },
224 { _MMIO(0x9888), 0x1c1c0024 },
225 { _MMIO(0x9888), 0x065b8000 },
226 { _MMIO(0x9888), 0x085b4000 },
227 { _MMIO(0x9888), 0x0a5bc000 },
228 { _MMIO(0x9888), 0x0c5b8000 },
229 { _MMIO(0x9888), 0x0e5b4000 },
230 { _MMIO(0x9888), 0x005b8000 },
231 { _MMIO(0x9888), 0x025b4000 },
232 { _MMIO(0x9888), 0x1a5c6000 },
233 { _MMIO(0x9888), 0x1c5c001b },
234 { _MMIO(0x9888), 0x125c8000 },
235 { _MMIO(0x9888), 0x145c8000 },
236 { _MMIO(0x9888), 0x004c8000 },
237 { _MMIO(0x9888), 0x0a4c2000 },
238 { _MMIO(0x9888), 0x0c4c0208 },
239 { _MMIO(0x9888), 0x000da000 },
240 { _MMIO(0x9888), 0x060d8000 },
241 { _MMIO(0x9888), 0x080da000 },
242 { _MMIO(0x9888), 0x0a0da000 },
243 { _MMIO(0x9888), 0x0c0da000 },
244 { _MMIO(0x9888), 0x0e0da000 },
245 { _MMIO(0x9888), 0x020d2000 },
246 { _MMIO(0x9888), 0x0c0f5400 },
247 { _MMIO(0x9888), 0x0e0f5500 },
248 { _MMIO(0x9888), 0x100f0155 },
249 { _MMIO(0x9888), 0x002c8000 },
250 { _MMIO(0x9888), 0x0e2cc000 },
251 { _MMIO(0x9888), 0x162cfb00 },
252 { _MMIO(0x9888), 0x182c00be },
253 { _MMIO(0x9888), 0x022cc000 },
254 { _MMIO(0x9888), 0x042cc000 },
255 { _MMIO(0x9888), 0x19900157 },
256 { _MMIO(0x9888), 0x1b900158 },
257 { _MMIO(0x9888), 0x1d900105 },
258 { _MMIO(0x9888), 0x1f900103 },
259 { _MMIO(0x9888), 0x35900000 },
260 { _MMIO(0x9888), 0x11900fff },
261 { _MMIO(0x9888), 0x51900000 },
262 { _MMIO(0x9888), 0x41900800 },
263 { _MMIO(0x9888), 0x55900000 },
264 { _MMIO(0x9888), 0x45900863 },
265 { _MMIO(0x9888), 0x47900802 },
266 { _MMIO(0x9888), 0x57900000 },
267 { _MMIO(0x9888), 0x49900802 },
268 { _MMIO(0x9888), 0x33900000 },
269 { _MMIO(0x9888), 0x4b900002 },
270 { _MMIO(0x9888), 0x59900000 },
271 { _MMIO(0x9888), 0x43900c62 },
272 { _MMIO(0x9888), 0x53903333 },
273};
274
275static int
276get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
277 const struct i915_oa_reg **regs,
278 int *lens)
279{
280 int n = 0;
281
282 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
283 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
284
285 regs[n] = mux_config_compute_basic;
286 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
287 n++;
288
289 return n;
290}
291
292static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
293 { _MMIO(0x2724), 0xf0800000 },
294 { _MMIO(0x2720), 0x00000000 },
295 { _MMIO(0x2714), 0xf0800000 },
296 { _MMIO(0x2710), 0x00000000 },
297 { _MMIO(0x2740), 0x00000000 },
298 { _MMIO(0x2770), 0x0007ffea },
299 { _MMIO(0x2774), 0x00007ffc },
300 { _MMIO(0x2778), 0x0007affa },
301 { _MMIO(0x277c), 0x0000f5fd },
302 { _MMIO(0x2780), 0x00079ffa },
303 { _MMIO(0x2784), 0x0000f3fb },
304 { _MMIO(0x2788), 0x0007bf7a },
305 { _MMIO(0x278c), 0x0000f7e7 },
306 { _MMIO(0x2790), 0x0007fefa },
307 { _MMIO(0x2794), 0x0000f7cf },
308 { _MMIO(0x2798), 0x00077ffa },
309 { _MMIO(0x279c), 0x0000efdf },
310 { _MMIO(0x27a0), 0x0006fffa },
311 { _MMIO(0x27a4), 0x0000cfbf },
312 { _MMIO(0x27a8), 0x0003fffa },
313 { _MMIO(0x27ac), 0x00005f7f },
314};
315
316static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
317 { _MMIO(0xe458), 0x00005004 },
318 { _MMIO(0xe558), 0x00015014 },
319 { _MMIO(0xe658), 0x00025024 },
320 { _MMIO(0xe758), 0x00035034 },
321 { _MMIO(0xe45c), 0x00045044 },
322 { _MMIO(0xe55c), 0x00055054 },
323 { _MMIO(0xe65c), 0x00065064 },
324};
325
326static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
327 { _MMIO(0x9888), 0x0c0e001f },
328 { _MMIO(0x9888), 0x0a0f0000 },
329 { _MMIO(0x9888), 0x10116800 },
330 { _MMIO(0x9888), 0x178a03e0 },
331 { _MMIO(0x9888), 0x11824c00 },
332 { _MMIO(0x9888), 0x11830020 },
333 { _MMIO(0x9888), 0x13840020 },
334 { _MMIO(0x9888), 0x11850019 },
335 { _MMIO(0x9888), 0x11860007 },
336 { _MMIO(0x9888), 0x01870c40 },
337 { _MMIO(0x9888), 0x17880000 },
338 { _MMIO(0x9888), 0x022f4000 },
339 { _MMIO(0x9888), 0x0a4c0040 },
340 { _MMIO(0x9888), 0x0c0d8000 },
341 { _MMIO(0x9888), 0x040d4000 },
342 { _MMIO(0x9888), 0x060d2000 },
343 { _MMIO(0x9888), 0x020e5400 },
344 { _MMIO(0x9888), 0x000e0000 },
345 { _MMIO(0x9888), 0x080f0040 },
346 { _MMIO(0x9888), 0x000f0000 },
347 { _MMIO(0x9888), 0x100f0000 },
348 { _MMIO(0x9888), 0x0e0f0040 },
349 { _MMIO(0x9888), 0x0c2c8000 },
350 { _MMIO(0x9888), 0x06104000 },
351 { _MMIO(0x9888), 0x06110012 },
352 { _MMIO(0x9888), 0x06131000 },
353 { _MMIO(0x9888), 0x01898000 },
354 { _MMIO(0x9888), 0x0d890100 },
355 { _MMIO(0x9888), 0x03898000 },
356 { _MMIO(0x9888), 0x09808000 },
357 { _MMIO(0x9888), 0x0b808000 },
358 { _MMIO(0x9888), 0x0380c000 },
359 { _MMIO(0x9888), 0x0f8a0075 },
360 { _MMIO(0x9888), 0x1d8a0000 },
361 { _MMIO(0x9888), 0x118a8000 },
362 { _MMIO(0x9888), 0x1b8a4000 },
363 { _MMIO(0x9888), 0x138a8000 },
364 { _MMIO(0x9888), 0x1d81a000 },
365 { _MMIO(0x9888), 0x15818000 },
366 { _MMIO(0x9888), 0x17818000 },
367 { _MMIO(0x9888), 0x0b820030 },
368 { _MMIO(0x9888), 0x07828000 },
369 { _MMIO(0x9888), 0x0d824000 },
370 { _MMIO(0x9888), 0x0f828000 },
371 { _MMIO(0x9888), 0x05824000 },
372 { _MMIO(0x9888), 0x0d830003 },
373 { _MMIO(0x9888), 0x0583000c },
374 { _MMIO(0x9888), 0x09830000 },
375 { _MMIO(0x9888), 0x03838000 },
376 { _MMIO(0x9888), 0x07838000 },
377 { _MMIO(0x9888), 0x0b840980 },
378 { _MMIO(0x9888), 0x03844d80 },
379 { _MMIO(0x9888), 0x11840000 },
380 { _MMIO(0x9888), 0x09848000 },
381 { _MMIO(0x9888), 0x09850080 },
382 { _MMIO(0x9888), 0x03850003 },
383 { _MMIO(0x9888), 0x01850000 },
384 { _MMIO(0x9888), 0x07860000 },
385 { _MMIO(0x9888), 0x0f860400 },
386 { _MMIO(0x9888), 0x09870032 },
387 { _MMIO(0x9888), 0x01888052 },
388 { _MMIO(0x9888), 0x11880000 },
389 { _MMIO(0x9888), 0x09884000 },
390 { _MMIO(0x9888), 0x1b931001 },
391 { _MMIO(0x9888), 0x1d930001 },
392 { _MMIO(0x9888), 0x19934000 },
393 { _MMIO(0x9888), 0x1b958000 },
394 { _MMIO(0x9888), 0x1d950094 },
395 { _MMIO(0x9888), 0x19958000 },
396 { _MMIO(0x9888), 0x09e58000 },
397 { _MMIO(0x9888), 0x0be58000 },
398 { _MMIO(0x9888), 0x03e5c000 },
399 { _MMIO(0x9888), 0x0592c000 },
400 { _MMIO(0x9888), 0x0b928000 },
401 { _MMIO(0x9888), 0x0d924000 },
402 { _MMIO(0x9888), 0x0f924000 },
403 { _MMIO(0x9888), 0x11928000 },
404 { _MMIO(0x9888), 0x1392c000 },
405 { _MMIO(0x9888), 0x09924000 },
406 { _MMIO(0x9888), 0x01985000 },
407 { _MMIO(0x9888), 0x07988000 },
408 { _MMIO(0x9888), 0x09981000 },
409 { _MMIO(0x9888), 0x0b982000 },
410 { _MMIO(0x9888), 0x0d982000 },
411 { _MMIO(0x9888), 0x0f989000 },
412 { _MMIO(0x9888), 0x05982000 },
413 { _MMIO(0x9888), 0x13904000 },
414 { _MMIO(0x9888), 0x21904000 },
415 { _MMIO(0x9888), 0x23904000 },
416 { _MMIO(0x9888), 0x25908000 },
417 { _MMIO(0x9888), 0x27904000 },
418 { _MMIO(0x9888), 0x29908000 },
419 { _MMIO(0x9888), 0x2b904000 },
420 { _MMIO(0x9888), 0x2f904000 },
421 { _MMIO(0x9888), 0x31904000 },
422 { _MMIO(0x9888), 0x15904000 },
423 { _MMIO(0x9888), 0x17908000 },
424 { _MMIO(0x9888), 0x19908000 },
425 { _MMIO(0x9888), 0x1b904000 },
426 { _MMIO(0x9888), 0x1190c080 },
427 { _MMIO(0x9888), 0x51901150 },
428 { _MMIO(0x9888), 0x41901400 },
429 { _MMIO(0x9888), 0x55905111 },
430 { _MMIO(0x9888), 0x45901400 },
431 { _MMIO(0x9888), 0x479004a5 },
432 { _MMIO(0x9888), 0x57903455 },
433 { _MMIO(0x9888), 0x49900000 },
434 { _MMIO(0x9888), 0x37900000 },
435 { _MMIO(0x9888), 0x33900000 },
436 { _MMIO(0x9888), 0x4b9000a0 },
437 { _MMIO(0x9888), 0x59900001 },
438 { _MMIO(0x9888), 0x43900005 },
439 { _MMIO(0x9888), 0x53900455 },
440};
441
442static int
443get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
444 const struct i915_oa_reg **regs,
445 int *lens)
446{
447 int n = 0;
448
449 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
450 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
451
452 regs[n] = mux_config_render_pipe_profile;
453 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
454 n++;
455
456 return n;
457}
458
459static const struct i915_oa_reg b_counter_config_memory_reads[] = {
460 { _MMIO(0x272c), 0xffffffff },
461 { _MMIO(0x2728), 0xffffffff },
462 { _MMIO(0x2724), 0xf0800000 },
463 { _MMIO(0x2720), 0x00000000 },
464 { _MMIO(0x271c), 0xffffffff },
465 { _MMIO(0x2718), 0xffffffff },
466 { _MMIO(0x2714), 0xf0800000 },
467 { _MMIO(0x2710), 0x00000000 },
468 { _MMIO(0x274c), 0x86543210 },
469 { _MMIO(0x2748), 0x86543210 },
470 { _MMIO(0x2744), 0x00006667 },
471 { _MMIO(0x2740), 0x00000000 },
472 { _MMIO(0x275c), 0x86543210 },
473 { _MMIO(0x2758), 0x86543210 },
474 { _MMIO(0x2754), 0x00006465 },
475 { _MMIO(0x2750), 0x00000000 },
476 { _MMIO(0x2770), 0x0007f81a },
477 { _MMIO(0x2774), 0x0000fe00 },
478 { _MMIO(0x2778), 0x0007f82a },
479 { _MMIO(0x277c), 0x0000fe00 },
480 { _MMIO(0x2780), 0x0007f872 },
481 { _MMIO(0x2784), 0x0000fe00 },
482 { _MMIO(0x2788), 0x0007f8ba },
483 { _MMIO(0x278c), 0x0000fe00 },
484 { _MMIO(0x2790), 0x0007f87a },
485 { _MMIO(0x2794), 0x0000fe00 },
486 { _MMIO(0x2798), 0x0007f8ea },
487 { _MMIO(0x279c), 0x0000fe00 },
488 { _MMIO(0x27a0), 0x0007f8e2 },
489 { _MMIO(0x27a4), 0x0000fe00 },
490 { _MMIO(0x27a8), 0x0007f8f2 },
491 { _MMIO(0x27ac), 0x0000fe00 },
492};
493
494static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
495 { _MMIO(0xe458), 0x00005004 },
496 { _MMIO(0xe558), 0x00015014 },
497 { _MMIO(0xe658), 0x00025024 },
498 { _MMIO(0xe758), 0x00035034 },
499 { _MMIO(0xe45c), 0x00045044 },
500 { _MMIO(0xe55c), 0x00055054 },
501 { _MMIO(0xe65c), 0x00065064 },
502};
503
504static const struct i915_oa_reg mux_config_memory_reads[] = {
505 { _MMIO(0x9888), 0x11810c00 },
506 { _MMIO(0x9888), 0x1381001a },
507 { _MMIO(0x9888), 0x37906800 },
508 { _MMIO(0x9888), 0x3f900064 },
509 { _MMIO(0x9888), 0x03811300 },
510 { _MMIO(0x9888), 0x05811b12 },
511 { _MMIO(0x9888), 0x0781001a },
512 { _MMIO(0x9888), 0x1f810000 },
513 { _MMIO(0x9888), 0x17810000 },
514 { _MMIO(0x9888), 0x19810000 },
515 { _MMIO(0x9888), 0x1b810000 },
516 { _MMIO(0x9888), 0x1d810000 },
517 { _MMIO(0x9888), 0x1b930055 },
518 { _MMIO(0x9888), 0x03e58000 },
519 { _MMIO(0x9888), 0x05e5c000 },
520 { _MMIO(0x9888), 0x07e54000 },
521 { _MMIO(0x9888), 0x13900150 },
522 { _MMIO(0x9888), 0x21900151 },
523 { _MMIO(0x9888), 0x23900152 },
524 { _MMIO(0x9888), 0x25900153 },
525 { _MMIO(0x9888), 0x27900154 },
526 { _MMIO(0x9888), 0x29900155 },
527 { _MMIO(0x9888), 0x2b900156 },
528 { _MMIO(0x9888), 0x2d900157 },
529 { _MMIO(0x9888), 0x2f90015f },
530 { _MMIO(0x9888), 0x31900105 },
531 { _MMIO(0x9888), 0x15900103 },
532 { _MMIO(0x9888), 0x17900101 },
533 { _MMIO(0x9888), 0x35900000 },
534 { _MMIO(0x9888), 0x19908000 },
535 { _MMIO(0x9888), 0x1b908000 },
536 { _MMIO(0x9888), 0x1d908000 },
537 { _MMIO(0x9888), 0x1f908000 },
538 { _MMIO(0x9888), 0x11900000 },
539 { _MMIO(0x9888), 0x51900000 },
540 { _MMIO(0x9888), 0x41900c60 },
541 { _MMIO(0x9888), 0x55900000 },
542 { _MMIO(0x9888), 0x45900c00 },
543 { _MMIO(0x9888), 0x47900c63 },
544 { _MMIO(0x9888), 0x57900000 },
545 { _MMIO(0x9888), 0x49900c63 },
546 { _MMIO(0x9888), 0x33900000 },
547 { _MMIO(0x9888), 0x4b900063 },
548 { _MMIO(0x9888), 0x59900000 },
549 { _MMIO(0x9888), 0x43900003 },
550 { _MMIO(0x9888), 0x53900000 },
551};
552
553static int
554get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
555 const struct i915_oa_reg **regs,
556 int *lens)
557{
558 int n = 0;
559
560 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
561 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
562
563 regs[n] = mux_config_memory_reads;
564 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
565 n++;
566
567 return n;
568}
569
570static const struct i915_oa_reg b_counter_config_memory_writes[] = {
571 { _MMIO(0x272c), 0xffffffff },
572 { _MMIO(0x2728), 0xffffffff },
573 { _MMIO(0x2724), 0xf0800000 },
574 { _MMIO(0x2720), 0x00000000 },
575 { _MMIO(0x271c), 0xffffffff },
576 { _MMIO(0x2718), 0xffffffff },
577 { _MMIO(0x2714), 0xf0800000 },
578 { _MMIO(0x2710), 0x00000000 },
579 { _MMIO(0x274c), 0x86543210 },
580 { _MMIO(0x2748), 0x86543210 },
581 { _MMIO(0x2744), 0x00006667 },
582 { _MMIO(0x2740), 0x00000000 },
583 { _MMIO(0x275c), 0x86543210 },
584 { _MMIO(0x2758), 0x86543210 },
585 { _MMIO(0x2754), 0x00006465 },
586 { _MMIO(0x2750), 0x00000000 },
587 { _MMIO(0x2770), 0x0007f81a },
588 { _MMIO(0x2774), 0x0000fe00 },
589 { _MMIO(0x2778), 0x0007f82a },
590 { _MMIO(0x277c), 0x0000fe00 },
591 { _MMIO(0x2780), 0x0007f822 },
592 { _MMIO(0x2784), 0x0000fe00 },
593 { _MMIO(0x2788), 0x0007f8ba },
594 { _MMIO(0x278c), 0x0000fe00 },
595 { _MMIO(0x2790), 0x0007f87a },
596 { _MMIO(0x2794), 0x0000fe00 },
597 { _MMIO(0x2798), 0x0007f8ea },
598 { _MMIO(0x279c), 0x0000fe00 },
599 { _MMIO(0x27a0), 0x0007f8e2 },
600 { _MMIO(0x27a4), 0x0000fe00 },
601 { _MMIO(0x27a8), 0x0007f8f2 },
602 { _MMIO(0x27ac), 0x0000fe00 },
603};
604
605static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
606 { _MMIO(0xe458), 0x00005004 },
607 { _MMIO(0xe558), 0x00015014 },
608 { _MMIO(0xe658), 0x00025024 },
609 { _MMIO(0xe758), 0x00035034 },
610 { _MMIO(0xe45c), 0x00045044 },
611 { _MMIO(0xe55c), 0x00055054 },
612 { _MMIO(0xe65c), 0x00065064 },
613};
614
615static const struct i915_oa_reg mux_config_memory_writes[] = {
616 { _MMIO(0x9888), 0x11810c00 },
617 { _MMIO(0x9888), 0x1381001a },
618 { _MMIO(0x9888), 0x37906800 },
619 { _MMIO(0x9888), 0x3f901000 },
620 { _MMIO(0x9888), 0x03811300 },
621 { _MMIO(0x9888), 0x05811b12 },
622 { _MMIO(0x9888), 0x0781001a },
623 { _MMIO(0x9888), 0x1f810000 },
624 { _MMIO(0x9888), 0x17810000 },
625 { _MMIO(0x9888), 0x19810000 },
626 { _MMIO(0x9888), 0x1b810000 },
627 { _MMIO(0x9888), 0x1d810000 },
628 { _MMIO(0x9888), 0x1b930055 },
629 { _MMIO(0x9888), 0x03e58000 },
630 { _MMIO(0x9888), 0x05e5c000 },
631 { _MMIO(0x9888), 0x07e54000 },
632 { _MMIO(0x9888), 0x13900160 },
633 { _MMIO(0x9888), 0x21900161 },
634 { _MMIO(0x9888), 0x23900162 },
635 { _MMIO(0x9888), 0x25900163 },
636 { _MMIO(0x9888), 0x27900164 },
637 { _MMIO(0x9888), 0x29900165 },
638 { _MMIO(0x9888), 0x2b900166 },
639 { _MMIO(0x9888), 0x2d900167 },
640 { _MMIO(0x9888), 0x2f900150 },
641 { _MMIO(0x9888), 0x31900105 },
642 { _MMIO(0x9888), 0x15900103 },
643 { _MMIO(0x9888), 0x17900101 },
644 { _MMIO(0x9888), 0x35900000 },
645 { _MMIO(0x9888), 0x19908000 },
646 { _MMIO(0x9888), 0x1b908000 },
647 { _MMIO(0x9888), 0x1d908000 },
648 { _MMIO(0x9888), 0x1f908000 },
649 { _MMIO(0x9888), 0x11900000 },
650 { _MMIO(0x9888), 0x51900000 },
651 { _MMIO(0x9888), 0x41900c60 },
652 { _MMIO(0x9888), 0x55900000 },
653 { _MMIO(0x9888), 0x45900c00 },
654 { _MMIO(0x9888), 0x47900c63 },
655 { _MMIO(0x9888), 0x57900000 },
656 { _MMIO(0x9888), 0x49900c63 },
657 { _MMIO(0x9888), 0x33900000 },
658 { _MMIO(0x9888), 0x4b900063 },
659 { _MMIO(0x9888), 0x59900000 },
660 { _MMIO(0x9888), 0x43900003 },
661 { _MMIO(0x9888), 0x53900000 },
662};
663
664static int
665get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
666 const struct i915_oa_reg **regs,
667 int *lens)
668{
669 int n = 0;
670
671 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
672 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
673
674 regs[n] = mux_config_memory_writes;
675 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
676 n++;
677
678 return n;
679}
680
681static const struct i915_oa_reg b_counter_config_compute_extended[] = {
682 { _MMIO(0x2724), 0xf0800000 },
683 { _MMIO(0x2720), 0x00000000 },
684 { _MMIO(0x2714), 0xf0800000 },
685 { _MMIO(0x2710), 0x00000000 },
686 { _MMIO(0x2740), 0x00000000 },
687 { _MMIO(0x2770), 0x0007fc2a },
688 { _MMIO(0x2774), 0x0000bf00 },
689 { _MMIO(0x2778), 0x0007fc6a },
690 { _MMIO(0x277c), 0x0000bf00 },
691 { _MMIO(0x2780), 0x0007fc92 },
692 { _MMIO(0x2784), 0x0000bf00 },
693 { _MMIO(0x2788), 0x0007fca2 },
694 { _MMIO(0x278c), 0x0000bf00 },
695 { _MMIO(0x2790), 0x0007fc32 },
696 { _MMIO(0x2794), 0x0000bf00 },
697 { _MMIO(0x2798), 0x0007fc9a },
698 { _MMIO(0x279c), 0x0000bf00 },
699 { _MMIO(0x27a0), 0x0007fe6a },
700 { _MMIO(0x27a4), 0x0000bf00 },
701 { _MMIO(0x27a8), 0x0007fe7a },
702 { _MMIO(0x27ac), 0x0000bf00 },
703};
704
705static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
706 { _MMIO(0xe458), 0x00005004 },
707 { _MMIO(0xe558), 0x00000003 },
708 { _MMIO(0xe658), 0x00002001 },
709 { _MMIO(0xe758), 0x00778008 },
710 { _MMIO(0xe45c), 0x00088078 },
711 { _MMIO(0xe55c), 0x00808708 },
712 { _MMIO(0xe65c), 0x00a08908 },
713};
714
715static const struct i915_oa_reg mux_config_compute_extended[] = {
716 { _MMIO(0x9888), 0x106c00e0 },
717 { _MMIO(0x9888), 0x141c8160 },
718 { _MMIO(0x9888), 0x161c8015 },
719 { _MMIO(0x9888), 0x181c0120 },
720 { _MMIO(0x9888), 0x004e8000 },
721 { _MMIO(0x9888), 0x0e4e8000 },
722 { _MMIO(0x9888), 0x184e8000 },
723 { _MMIO(0x9888), 0x1a4eaaa0 },
724 { _MMIO(0x9888), 0x1c4e0002 },
725 { _MMIO(0x9888), 0x024e8000 },
726 { _MMIO(0x9888), 0x044e8000 },
727 { _MMIO(0x9888), 0x064e8000 },
728 { _MMIO(0x9888), 0x084e8000 },
729 { _MMIO(0x9888), 0x0a4e8000 },
730 { _MMIO(0x9888), 0x0e6c0b01 },
731 { _MMIO(0x9888), 0x006c0200 },
732 { _MMIO(0x9888), 0x026c000c },
733 { _MMIO(0x9888), 0x1c6c0000 },
734 { _MMIO(0x9888), 0x1e6c0000 },
735 { _MMIO(0x9888), 0x1a6c0000 },
736 { _MMIO(0x9888), 0x0e1bc000 },
737 { _MMIO(0x9888), 0x001b8000 },
738 { _MMIO(0x9888), 0x021bc000 },
739 { _MMIO(0x9888), 0x001c0041 },
740 { _MMIO(0x9888), 0x061c4200 },
741 { _MMIO(0x9888), 0x081c4443 },
742 { _MMIO(0x9888), 0x0a1c4645 },
743 { _MMIO(0x9888), 0x0c1c7647 },
744 { _MMIO(0x9888), 0x041c7357 },
745 { _MMIO(0x9888), 0x1c1c0030 },
746 { _MMIO(0x9888), 0x101c0000 },
747 { _MMIO(0x9888), 0x1a1c0000 },
748 { _MMIO(0x9888), 0x121c8000 },
749 { _MMIO(0x9888), 0x004c8000 },
750 { _MMIO(0x9888), 0x0a4caa2a },
751 { _MMIO(0x9888), 0x0c4c02aa },
752 { _MMIO(0x9888), 0x084ca000 },
753 { _MMIO(0x9888), 0x000da000 },
754 { _MMIO(0x9888), 0x060d8000 },
755 { _MMIO(0x9888), 0x080da000 },
756 { _MMIO(0x9888), 0x0a0da000 },
757 { _MMIO(0x9888), 0x0c0da000 },
758 { _MMIO(0x9888), 0x0e0da000 },
759 { _MMIO(0x9888), 0x020da000 },
760 { _MMIO(0x9888), 0x040da000 },
761 { _MMIO(0x9888), 0x0c0f5400 },
762 { _MMIO(0x9888), 0x0e0f5515 },
763 { _MMIO(0x9888), 0x100f0155 },
764 { _MMIO(0x9888), 0x002c8000 },
765 { _MMIO(0x9888), 0x0e2c8000 },
766 { _MMIO(0x9888), 0x162caa00 },
767 { _MMIO(0x9888), 0x182c00aa },
768 { _MMIO(0x9888), 0x022c8000 },
769 { _MMIO(0x9888), 0x042c8000 },
770 { _MMIO(0x9888), 0x062c8000 },
771 { _MMIO(0x9888), 0x082c8000 },
772 { _MMIO(0x9888), 0x0a2c8000 },
773 { _MMIO(0x9888), 0x11907fff },
774 { _MMIO(0x9888), 0x51900000 },
775 { _MMIO(0x9888), 0x41900040 },
776 { _MMIO(0x9888), 0x55900000 },
777 { _MMIO(0x9888), 0x45900802 },
778 { _MMIO(0x9888), 0x47900842 },
779 { _MMIO(0x9888), 0x57900000 },
780 { _MMIO(0x9888), 0x49900842 },
781 { _MMIO(0x9888), 0x37900000 },
782 { _MMIO(0x9888), 0x33900000 },
783 { _MMIO(0x9888), 0x4b900000 },
784 { _MMIO(0x9888), 0x59900000 },
785 { _MMIO(0x9888), 0x43900800 },
786 { _MMIO(0x9888), 0x53900000 },
787};
788
789static int
790get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
791 const struct i915_oa_reg **regs,
792 int *lens)
793{
794 int n = 0;
795
796 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
797 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
798
799 regs[n] = mux_config_compute_extended;
800 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
801 n++;
802
803 return n;
804}
805
806static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
807 { _MMIO(0x2710), 0x00000000 },
808 { _MMIO(0x2714), 0x30800000 },
809 { _MMIO(0x2720), 0x00000000 },
810 { _MMIO(0x2724), 0x30800000 },
811 { _MMIO(0x2740), 0x00000000 },
812 { _MMIO(0x2770), 0x0007fffa },
813 { _MMIO(0x2774), 0x0000fefe },
814 { _MMIO(0x2778), 0x0007fffa },
815 { _MMIO(0x277c), 0x0000fefd },
816 { _MMIO(0x2790), 0x0007fffa },
817 { _MMIO(0x2794), 0x0000fbef },
818 { _MMIO(0x2798), 0x0007fffa },
819 { _MMIO(0x279c), 0x0000fbdf },
820};
821
822static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
823 { _MMIO(0xe458), 0x00005004 },
824 { _MMIO(0xe558), 0x00000003 },
825 { _MMIO(0xe658), 0x00002001 },
826 { _MMIO(0xe758), 0x00101100 },
827 { _MMIO(0xe45c), 0x00201200 },
828 { _MMIO(0xe55c), 0x00301300 },
829 { _MMIO(0xe65c), 0x00401400 },
830};
831
832static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
833 { _MMIO(0x9888), 0x166c0760 },
834 { _MMIO(0x9888), 0x1593001e },
835 { _MMIO(0x9888), 0x3f900003 },
836 { _MMIO(0x9888), 0x004e8000 },
837 { _MMIO(0x9888), 0x0e4e8000 },
838 { _MMIO(0x9888), 0x184e8000 },
839 { _MMIO(0x9888), 0x1a4e8020 },
840 { _MMIO(0x9888), 0x1c4e0002 },
841 { _MMIO(0x9888), 0x006c0051 },
842 { _MMIO(0x9888), 0x066c5000 },
843 { _MMIO(0x9888), 0x086c5c5d },
844 { _MMIO(0x9888), 0x0e6c5e5f },
845 { _MMIO(0x9888), 0x106c0000 },
846 { _MMIO(0x9888), 0x186c0000 },
847 { _MMIO(0x9888), 0x1c6c0000 },
848 { _MMIO(0x9888), 0x1e6c0000 },
849 { _MMIO(0x9888), 0x001b4000 },
850 { _MMIO(0x9888), 0x061b8000 },
851 { _MMIO(0x9888), 0x081bc000 },
852 { _MMIO(0x9888), 0x0e1bc000 },
853 { _MMIO(0x9888), 0x101c8000 },
854 { _MMIO(0x9888), 0x1a1ce000 },
855 { _MMIO(0x9888), 0x1c1c0030 },
856 { _MMIO(0x9888), 0x004c8000 },
857 { _MMIO(0x9888), 0x0a4c2a00 },
858 { _MMIO(0x9888), 0x0c4c0280 },
859 { _MMIO(0x9888), 0x000d2000 },
860 { _MMIO(0x9888), 0x060d8000 },
861 { _MMIO(0x9888), 0x080da000 },
862 { _MMIO(0x9888), 0x0e0da000 },
863 { _MMIO(0x9888), 0x0c0f0400 },
864 { _MMIO(0x9888), 0x0e0f1500 },
865 { _MMIO(0x9888), 0x100f0140 },
866 { _MMIO(0x9888), 0x002c8000 },
867 { _MMIO(0x9888), 0x0e2c8000 },
868 { _MMIO(0x9888), 0x162c0a00 },
869 { _MMIO(0x9888), 0x182c00a0 },
870 { _MMIO(0x9888), 0x03933300 },
871 { _MMIO(0x9888), 0x05930032 },
872 { _MMIO(0x9888), 0x11930000 },
873 { _MMIO(0x9888), 0x1b930000 },
874 { _MMIO(0x9888), 0x1d900157 },
875 { _MMIO(0x9888), 0x1f900158 },
876 { _MMIO(0x9888), 0x35900000 },
877 { _MMIO(0x9888), 0x19908000 },
878 { _MMIO(0x9888), 0x1b908000 },
879 { _MMIO(0x9888), 0x1190030f },
880 { _MMIO(0x9888), 0x51900000 },
881 { _MMIO(0x9888), 0x41900000 },
882 { _MMIO(0x9888), 0x55900000 },
883 { _MMIO(0x9888), 0x45900063 },
884 { _MMIO(0x9888), 0x47900000 },
885 { _MMIO(0x9888), 0x37900000 },
886 { _MMIO(0x9888), 0x33900000 },
887 { _MMIO(0x9888), 0x57900000 },
888 { _MMIO(0x9888), 0x4b900000 },
889 { _MMIO(0x9888), 0x59900000 },
890 { _MMIO(0x9888), 0x53903333 },
891 { _MMIO(0x9888), 0x43900840 },
892};
893
894static int
895get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
896 const struct i915_oa_reg **regs,
897 int *lens)
898{
899 int n = 0;
900
901 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
902 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
903
904 regs[n] = mux_config_compute_l3_cache;
905 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
906 n++;
907
908 return n;
909}
910
911static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
912 { _MMIO(0x2740), 0x00000000 },
913 { _MMIO(0x2744), 0x00800000 },
914 { _MMIO(0x2710), 0x00000000 },
915 { _MMIO(0x2714), 0x10800000 },
916 { _MMIO(0x2720), 0x00000000 },
917 { _MMIO(0x2724), 0x00800000 },
918 { _MMIO(0x2770), 0x00000002 },
919 { _MMIO(0x2774), 0x0000fdff },
920};
921
922static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
923 { _MMIO(0xe458), 0x00005004 },
924 { _MMIO(0xe558), 0x00010003 },
925 { _MMIO(0xe658), 0x00012011 },
926 { _MMIO(0xe758), 0x00015014 },
927 { _MMIO(0xe45c), 0x00051050 },
928 { _MMIO(0xe55c), 0x00053052 },
929 { _MMIO(0xe65c), 0x00055054 },
930};
931
932static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
933 { _MMIO(0x9888), 0x104f0232 },
934 { _MMIO(0x9888), 0x124f4640 },
935 { _MMIO(0x9888), 0x106c0232 },
936 { _MMIO(0x9888), 0x11834400 },
937 { _MMIO(0x9888), 0x0a4e8000 },
938 { _MMIO(0x9888), 0x0c4e8000 },
939 { _MMIO(0x9888), 0x004f1880 },
940 { _MMIO(0x9888), 0x024f08bb },
941 { _MMIO(0x9888), 0x044f001b },
942 { _MMIO(0x9888), 0x046c0100 },
943 { _MMIO(0x9888), 0x066c000b },
944 { _MMIO(0x9888), 0x1a6c0000 },
945 { _MMIO(0x9888), 0x041b8000 },
946 { _MMIO(0x9888), 0x061b4000 },
947 { _MMIO(0x9888), 0x1a1c1800 },
948 { _MMIO(0x9888), 0x005b8000 },
949 { _MMIO(0x9888), 0x025bc000 },
950 { _MMIO(0x9888), 0x045b4000 },
951 { _MMIO(0x9888), 0x125c8000 },
952 { _MMIO(0x9888), 0x145c8000 },
953 { _MMIO(0x9888), 0x165c8000 },
954 { _MMIO(0x9888), 0x185c8000 },
955 { _MMIO(0x9888), 0x0a4c00a0 },
956 { _MMIO(0x9888), 0x000d8000 },
957 { _MMIO(0x9888), 0x020da000 },
958 { _MMIO(0x9888), 0x040da000 },
959 { _MMIO(0x9888), 0x060d2000 },
960 { _MMIO(0x9888), 0x0c0f5000 },
961 { _MMIO(0x9888), 0x0e0f0055 },
962 { _MMIO(0x9888), 0x022cc000 },
963 { _MMIO(0x9888), 0x042cc000 },
964 { _MMIO(0x9888), 0x062cc000 },
965 { _MMIO(0x9888), 0x082cc000 },
966 { _MMIO(0x9888), 0x0a2c8000 },
967 { _MMIO(0x9888), 0x0c2c8000 },
968 { _MMIO(0x9888), 0x0f828000 },
969 { _MMIO(0x9888), 0x0f8305c0 },
970 { _MMIO(0x9888), 0x09830000 },
971 { _MMIO(0x9888), 0x07830000 },
972 { _MMIO(0x9888), 0x1d950080 },
973 { _MMIO(0x9888), 0x13928000 },
974 { _MMIO(0x9888), 0x0f988000 },
975 { _MMIO(0x9888), 0x31904000 },
976 { _MMIO(0x9888), 0x1190fc00 },
977 { _MMIO(0x9888), 0x37900000 },
978 { _MMIO(0x9888), 0x59900005 },
979 { _MMIO(0x9888), 0x4b900000 },
980 { _MMIO(0x9888), 0x51900000 },
981 { _MMIO(0x9888), 0x41900800 },
982 { _MMIO(0x9888), 0x43900842 },
983 { _MMIO(0x9888), 0x53900000 },
984 { _MMIO(0x9888), 0x45900000 },
985 { _MMIO(0x9888), 0x33900000 },
986};
987
988static int
989get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
990 const struct i915_oa_reg **regs,
991 int *lens)
992{
993 int n = 0;
994
995 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
996 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
997
998 regs[n] = mux_config_hdc_and_sf;
999 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
1000 n++;
1001
1002 return n;
1003}
1004
1005static const struct i915_oa_reg b_counter_config_l3_1[] = {
1006 { _MMIO(0x2740), 0x00000000 },
1007 { _MMIO(0x2744), 0x00800000 },
1008 { _MMIO(0x2710), 0x00000000 },
1009 { _MMIO(0x2714), 0xf0800000 },
1010 { _MMIO(0x2720), 0x00000000 },
1011 { _MMIO(0x2724), 0xf0800000 },
1012 { _MMIO(0x2770), 0x00100070 },
1013 { _MMIO(0x2774), 0x0000fff1 },
1014 { _MMIO(0x2778), 0x00014002 },
1015 { _MMIO(0x277c), 0x0000c3ff },
1016 { _MMIO(0x2780), 0x00010002 },
1017 { _MMIO(0x2784), 0x0000c7ff },
1018 { _MMIO(0x2788), 0x00004002 },
1019 { _MMIO(0x278c), 0x0000d3ff },
1020 { _MMIO(0x2790), 0x00100700 },
1021 { _MMIO(0x2794), 0x0000ff1f },
1022 { _MMIO(0x2798), 0x00001402 },
1023 { _MMIO(0x279c), 0x0000fc3f },
1024 { _MMIO(0x27a0), 0x00001002 },
1025 { _MMIO(0x27a4), 0x0000fc7f },
1026 { _MMIO(0x27a8), 0x00000402 },
1027 { _MMIO(0x27ac), 0x0000fd3f },
1028};
1029
1030static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1031 { _MMIO(0xe458), 0x00005004 },
1032 { _MMIO(0xe558), 0x00010003 },
1033 { _MMIO(0xe658), 0x00012011 },
1034 { _MMIO(0xe758), 0x00015014 },
1035 { _MMIO(0xe45c), 0x00051050 },
1036 { _MMIO(0xe55c), 0x00053052 },
1037 { _MMIO(0xe65c), 0x00055054 },
1038};
1039
1040static const struct i915_oa_reg mux_config_l3_1[] = {
1041 { _MMIO(0x9888), 0x126c7b40 },
1042 { _MMIO(0x9888), 0x166c0020 },
1043 { _MMIO(0x9888), 0x0a603444 },
1044 { _MMIO(0x9888), 0x0a613400 },
1045 { _MMIO(0x9888), 0x1a4ea800 },
1046 { _MMIO(0x9888), 0x1c4e0002 },
1047 { _MMIO(0x9888), 0x024e8000 },
1048 { _MMIO(0x9888), 0x044e8000 },
1049 { _MMIO(0x9888), 0x064e8000 },
1050 { _MMIO(0x9888), 0x084e8000 },
1051 { _MMIO(0x9888), 0x0a4e8000 },
1052 { _MMIO(0x9888), 0x064f4000 },
1053 { _MMIO(0x9888), 0x0c6c5327 },
1054 { _MMIO(0x9888), 0x0e6c5425 },
1055 { _MMIO(0x9888), 0x006c2a00 },
1056 { _MMIO(0x9888), 0x026c285b },
1057 { _MMIO(0x9888), 0x046c005c },
1058 { _MMIO(0x9888), 0x106c0000 },
1059 { _MMIO(0x9888), 0x1c6c0000 },
1060 { _MMIO(0x9888), 0x1e6c0000 },
1061 { _MMIO(0x9888), 0x1a6c0800 },
1062 { _MMIO(0x9888), 0x0c1bc000 },
1063 { _MMIO(0x9888), 0x0e1bc000 },
1064 { _MMIO(0x9888), 0x001b8000 },
1065 { _MMIO(0x9888), 0x021bc000 },
1066 { _MMIO(0x9888), 0x041bc000 },
1067 { _MMIO(0x9888), 0x1c1c003c },
1068 { _MMIO(0x9888), 0x121c8000 },
1069 { _MMIO(0x9888), 0x141c8000 },
1070 { _MMIO(0x9888), 0x161c8000 },
1071 { _MMIO(0x9888), 0x181c8000 },
1072 { _MMIO(0x9888), 0x1a1c0800 },
1073 { _MMIO(0x9888), 0x065b4000 },
1074 { _MMIO(0x9888), 0x1a5c1000 },
1075 { _MMIO(0x9888), 0x10600000 },
1076 { _MMIO(0x9888), 0x04600000 },
1077 { _MMIO(0x9888), 0x0c610044 },
1078 { _MMIO(0x9888), 0x10610000 },
1079 { _MMIO(0x9888), 0x06610000 },
1080 { _MMIO(0x9888), 0x0c4c02a8 },
1081 { _MMIO(0x9888), 0x084ca000 },
1082 { _MMIO(0x9888), 0x0a4c002a },
1083 { _MMIO(0x9888), 0x0c0da000 },
1084 { _MMIO(0x9888), 0x0e0da000 },
1085 { _MMIO(0x9888), 0x000d8000 },
1086 { _MMIO(0x9888), 0x020da000 },
1087 { _MMIO(0x9888), 0x040da000 },
1088 { _MMIO(0x9888), 0x060d2000 },
1089 { _MMIO(0x9888), 0x100f0154 },
1090 { _MMIO(0x9888), 0x0c0f5000 },
1091 { _MMIO(0x9888), 0x0e0f0055 },
1092 { _MMIO(0x9888), 0x182c00aa },
1093 { _MMIO(0x9888), 0x022c8000 },
1094 { _MMIO(0x9888), 0x042c8000 },
1095 { _MMIO(0x9888), 0x062c8000 },
1096 { _MMIO(0x9888), 0x082c8000 },
1097 { _MMIO(0x9888), 0x0a2c8000 },
1098 { _MMIO(0x9888), 0x0c2cc000 },
1099 { _MMIO(0x9888), 0x1190ffc0 },
1100 { _MMIO(0x9888), 0x57900000 },
1101 { _MMIO(0x9888), 0x49900420 },
1102 { _MMIO(0x9888), 0x37900000 },
1103 { _MMIO(0x9888), 0x33900000 },
1104 { _MMIO(0x9888), 0x4b900021 },
1105 { _MMIO(0x9888), 0x59900000 },
1106 { _MMIO(0x9888), 0x51900000 },
1107 { _MMIO(0x9888), 0x41900400 },
1108 { _MMIO(0x9888), 0x43900421 },
1109 { _MMIO(0x9888), 0x53900000 },
1110 { _MMIO(0x9888), 0x45900040 },
1111};
1112
1113static int
1114get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1115 const struct i915_oa_reg **regs,
1116 int *lens)
1117{
1118 int n = 0;
1119
1120 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1121 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1122
1123 regs[n] = mux_config_l3_1;
1124 lens[n] = ARRAY_SIZE(mux_config_l3_1);
1125 n++;
1126
1127 return n;
1128}
1129
1130static const struct i915_oa_reg b_counter_config_l3_2[] = {
1131 { _MMIO(0x2740), 0x00000000 },
1132 { _MMIO(0x2744), 0x00800000 },
1133 { _MMIO(0x2710), 0x00000000 },
1134 { _MMIO(0x2714), 0xf0800000 },
1135 { _MMIO(0x2720), 0x00000000 },
1136 { _MMIO(0x2724), 0x00800000 },
1137 { _MMIO(0x2770), 0x00100070 },
1138 { _MMIO(0x2774), 0x0000fff1 },
1139 { _MMIO(0x2778), 0x00028002 },
1140 { _MMIO(0x277c), 0x000087ff },
1141 { _MMIO(0x2780), 0x00020002 },
1142 { _MMIO(0x2784), 0x00008fff },
1143 { _MMIO(0x2788), 0x00008002 },
1144 { _MMIO(0x278c), 0x0000a7ff },
1145};
1146
1147static const struct i915_oa_reg flex_eu_config_l3_2[] = {
1148 { _MMIO(0xe458), 0x00005004 },
1149 { _MMIO(0xe558), 0x00010003 },
1150 { _MMIO(0xe658), 0x00012011 },
1151 { _MMIO(0xe758), 0x00015014 },
1152 { _MMIO(0xe45c), 0x00051050 },
1153 { _MMIO(0xe55c), 0x00053052 },
1154 { _MMIO(0xe65c), 0x00055054 },
1155};
1156
1157static const struct i915_oa_reg mux_config_l3_2[] = {
1158 { _MMIO(0x9888), 0x126c02e0 },
1159 { _MMIO(0x9888), 0x146c0001 },
1160 { _MMIO(0x9888), 0x0a623400 },
1161 { _MMIO(0x9888), 0x044e8000 },
1162 { _MMIO(0x9888), 0x064e8000 },
1163 { _MMIO(0x9888), 0x084e8000 },
1164 { _MMIO(0x9888), 0x0a4e8000 },
1165 { _MMIO(0x9888), 0x064f4000 },
1166 { _MMIO(0x9888), 0x026c3324 },
1167 { _MMIO(0x9888), 0x046c3422 },
1168 { _MMIO(0x9888), 0x106c0000 },
1169 { _MMIO(0x9888), 0x1a6c0000 },
1170 { _MMIO(0x9888), 0x021bc000 },
1171 { _MMIO(0x9888), 0x041bc000 },
1172 { _MMIO(0x9888), 0x141c8000 },
1173 { _MMIO(0x9888), 0x161c8000 },
1174 { _MMIO(0x9888), 0x181c8000 },
1175 { _MMIO(0x9888), 0x1a1c0800 },
1176 { _MMIO(0x9888), 0x065b4000 },
1177 { _MMIO(0x9888), 0x1a5c1000 },
1178 { _MMIO(0x9888), 0x06614000 },
1179 { _MMIO(0x9888), 0x0c620044 },
1180 { _MMIO(0x9888), 0x10620000 },
1181 { _MMIO(0x9888), 0x06620000 },
1182 { _MMIO(0x9888), 0x084c8000 },
1183 { _MMIO(0x9888), 0x0a4c002a },
1184 { _MMIO(0x9888), 0x020da000 },
1185 { _MMIO(0x9888), 0x040da000 },
1186 { _MMIO(0x9888), 0x060d2000 },
1187 { _MMIO(0x9888), 0x0c0f4000 },
1188 { _MMIO(0x9888), 0x0e0f0055 },
1189 { _MMIO(0x9888), 0x042c8000 },
1190 { _MMIO(0x9888), 0x062c8000 },
1191 { _MMIO(0x9888), 0x082c8000 },
1192 { _MMIO(0x9888), 0x0a2c8000 },
1193 { _MMIO(0x9888), 0x0c2cc000 },
1194 { _MMIO(0x9888), 0x1190f800 },
1195 { _MMIO(0x9888), 0x37900000 },
1196 { _MMIO(0x9888), 0x51900000 },
1197 { _MMIO(0x9888), 0x43900000 },
1198 { _MMIO(0x9888), 0x53900000 },
1199 { _MMIO(0x9888), 0x45900000 },
1200 { _MMIO(0x9888), 0x33900000 },
1201};
1202
1203static int
1204get_l3_2_mux_config(struct drm_i915_private *dev_priv,
1205 const struct i915_oa_reg **regs,
1206 int *lens)
1207{
1208 int n = 0;
1209
1210 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1211 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1212
1213 regs[n] = mux_config_l3_2;
1214 lens[n] = ARRAY_SIZE(mux_config_l3_2);
1215 n++;
1216
1217 return n;
1218}
1219
1220static const struct i915_oa_reg b_counter_config_l3_3[] = {
1221 { _MMIO(0x2740), 0x00000000 },
1222 { _MMIO(0x2744), 0x00800000 },
1223 { _MMIO(0x2710), 0x00000000 },
1224 { _MMIO(0x2714), 0xf0800000 },
1225 { _MMIO(0x2720), 0x00000000 },
1226 { _MMIO(0x2724), 0x00800000 },
1227 { _MMIO(0x2770), 0x00100070 },
1228 { _MMIO(0x2774), 0x0000fff1 },
1229 { _MMIO(0x2778), 0x00028002 },
1230 { _MMIO(0x277c), 0x000087ff },
1231 { _MMIO(0x2780), 0x00020002 },
1232 { _MMIO(0x2784), 0x00008fff },
1233 { _MMIO(0x2788), 0x00008002 },
1234 { _MMIO(0x278c), 0x0000a7ff },
1235};
1236
1237static const struct i915_oa_reg flex_eu_config_l3_3[] = {
1238 { _MMIO(0xe458), 0x00005004 },
1239 { _MMIO(0xe558), 0x00010003 },
1240 { _MMIO(0xe658), 0x00012011 },
1241 { _MMIO(0xe758), 0x00015014 },
1242 { _MMIO(0xe45c), 0x00051050 },
1243 { _MMIO(0xe55c), 0x00053052 },
1244 { _MMIO(0xe65c), 0x00055054 },
1245};
1246
1247static const struct i915_oa_reg mux_config_l3_3[] = {
1248 { _MMIO(0x9888), 0x126c4e80 },
1249 { _MMIO(0x9888), 0x146c0000 },
1250 { _MMIO(0x9888), 0x0a633400 },
1251 { _MMIO(0x9888), 0x044e8000 },
1252 { _MMIO(0x9888), 0x064e8000 },
1253 { _MMIO(0x9888), 0x084e8000 },
1254 { _MMIO(0x9888), 0x0a4e8000 },
1255 { _MMIO(0x9888), 0x0c4e8000 },
1256 { _MMIO(0x9888), 0x026c3321 },
1257 { _MMIO(0x9888), 0x046c342f },
1258 { _MMIO(0x9888), 0x106c0000 },
1259 { _MMIO(0x9888), 0x1a6c2000 },
1260 { _MMIO(0x9888), 0x021bc000 },
1261 { _MMIO(0x9888), 0x041bc000 },
1262 { _MMIO(0x9888), 0x061b4000 },
1263 { _MMIO(0x9888), 0x141c8000 },
1264 { _MMIO(0x9888), 0x161c8000 },
1265 { _MMIO(0x9888), 0x181c8000 },
1266 { _MMIO(0x9888), 0x1a1c1800 },
1267 { _MMIO(0x9888), 0x06604000 },
1268 { _MMIO(0x9888), 0x0c630044 },
1269 { _MMIO(0x9888), 0x10630000 },
1270 { _MMIO(0x9888), 0x06630000 },
1271 { _MMIO(0x9888), 0x084c8000 },
1272 { _MMIO(0x9888), 0x0a4c00aa },
1273 { _MMIO(0x9888), 0x020da000 },
1274 { _MMIO(0x9888), 0x040da000 },
1275 { _MMIO(0x9888), 0x060d2000 },
1276 { _MMIO(0x9888), 0x0c0f4000 },
1277 { _MMIO(0x9888), 0x0e0f0055 },
1278 { _MMIO(0x9888), 0x042c8000 },
1279 { _MMIO(0x9888), 0x062c8000 },
1280 { _MMIO(0x9888), 0x082c8000 },
1281 { _MMIO(0x9888), 0x0a2c8000 },
1282 { _MMIO(0x9888), 0x0c2c8000 },
1283 { _MMIO(0x9888), 0x1190f800 },
1284 { _MMIO(0x9888), 0x37900000 },
1285 { _MMIO(0x9888), 0x51900000 },
1286 { _MMIO(0x9888), 0x43900842 },
1287 { _MMIO(0x9888), 0x53900000 },
1288 { _MMIO(0x9888), 0x45900002 },
1289 { _MMIO(0x9888), 0x33900000 },
1290};
1291
1292static int
1293get_l3_3_mux_config(struct drm_i915_private *dev_priv,
1294 const struct i915_oa_reg **regs,
1295 int *lens)
1296{
1297 int n = 0;
1298
1299 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1300 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1301
1302 regs[n] = mux_config_l3_3;
1303 lens[n] = ARRAY_SIZE(mux_config_l3_3);
1304 n++;
1305
1306 return n;
1307}
1308
1309static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1310 { _MMIO(0x2740), 0x00000000 },
1311 { _MMIO(0x2744), 0x00800000 },
1312 { _MMIO(0x2710), 0x00000000 },
1313 { _MMIO(0x2714), 0x30800000 },
1314 { _MMIO(0x2720), 0x00000000 },
1315 { _MMIO(0x2724), 0x00800000 },
1316 { _MMIO(0x2770), 0x00000002 },
1317 { _MMIO(0x2774), 0x0000efff },
1318 { _MMIO(0x2778), 0x00006000 },
1319 { _MMIO(0x277c), 0x0000f3ff },
1320};
1321
1322static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1323 { _MMIO(0xe458), 0x00005004 },
1324 { _MMIO(0xe558), 0x00010003 },
1325 { _MMIO(0xe658), 0x00012011 },
1326 { _MMIO(0xe758), 0x00015014 },
1327 { _MMIO(0xe45c), 0x00051050 },
1328 { _MMIO(0xe55c), 0x00053052 },
1329 { _MMIO(0xe65c), 0x00055054 },
1330};
1331
1332static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1333 { _MMIO(0x9888), 0x102f3800 },
1334 { _MMIO(0x9888), 0x144d0500 },
1335 { _MMIO(0x9888), 0x120d03c0 },
1336 { _MMIO(0x9888), 0x140d03cf },
1337 { _MMIO(0x9888), 0x0c0f0004 },
1338 { _MMIO(0x9888), 0x0c4e4000 },
1339 { _MMIO(0x9888), 0x042f0480 },
1340 { _MMIO(0x9888), 0x082f0000 },
1341 { _MMIO(0x9888), 0x022f0000 },
1342 { _MMIO(0x9888), 0x0a4c0090 },
1343 { _MMIO(0x9888), 0x064d0027 },
1344 { _MMIO(0x9888), 0x004d0000 },
1345 { _MMIO(0x9888), 0x000d0d40 },
1346 { _MMIO(0x9888), 0x020d803f },
1347 { _MMIO(0x9888), 0x040d8023 },
1348 { _MMIO(0x9888), 0x100d0000 },
1349 { _MMIO(0x9888), 0x060d2000 },
1350 { _MMIO(0x9888), 0x020f0010 },
1351 { _MMIO(0x9888), 0x000f0000 },
1352 { _MMIO(0x9888), 0x0e0f0050 },
1353 { _MMIO(0x9888), 0x0a2c8000 },
1354 { _MMIO(0x9888), 0x0c2c8000 },
1355 { _MMIO(0x9888), 0x1190fc00 },
1356 { _MMIO(0x9888), 0x37900000 },
1357 { _MMIO(0x9888), 0x51900000 },
1358 { _MMIO(0x9888), 0x41901400 },
1359 { _MMIO(0x9888), 0x43901485 },
1360 { _MMIO(0x9888), 0x53900000 },
1361 { _MMIO(0x9888), 0x45900001 },
1362 { _MMIO(0x9888), 0x33900000 },
1363};
1364
1365static int
1366get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1367 const struct i915_oa_reg **regs,
1368 int *lens)
1369{
1370 int n = 0;
1371
1372 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1373 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1374
1375 regs[n] = mux_config_rasterizer_and_pixel_backend;
1376 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1377 n++;
1378
1379 return n;
1380}
1381
1382static const struct i915_oa_reg b_counter_config_sampler[] = {
1383 { _MMIO(0x2740), 0x00000000 },
1384 { _MMIO(0x2744), 0x00800000 },
1385 { _MMIO(0x2710), 0x00000000 },
1386 { _MMIO(0x2714), 0x70800000 },
1387 { _MMIO(0x2720), 0x00000000 },
1388 { _MMIO(0x2724), 0x00800000 },
1389 { _MMIO(0x2770), 0x0000c000 },
1390 { _MMIO(0x2774), 0x0000e7ff },
1391 { _MMIO(0x2778), 0x00003000 },
1392 { _MMIO(0x277c), 0x0000f9ff },
1393 { _MMIO(0x2780), 0x00000c00 },
1394 { _MMIO(0x2784), 0x0000fe7f },
1395};
1396
1397static const struct i915_oa_reg flex_eu_config_sampler[] = {
1398 { _MMIO(0xe458), 0x00005004 },
1399 { _MMIO(0xe558), 0x00010003 },
1400 { _MMIO(0xe658), 0x00012011 },
1401 { _MMIO(0xe758), 0x00015014 },
1402 { _MMIO(0xe45c), 0x00051050 },
1403 { _MMIO(0xe55c), 0x00053052 },
1404 { _MMIO(0xe65c), 0x00055054 },
1405};
1406
1407static const struct i915_oa_reg mux_config_sampler[] = {
1408 { _MMIO(0x9888), 0x14152c00 },
1409 { _MMIO(0x9888), 0x16150005 },
1410 { _MMIO(0x9888), 0x121600a0 },
1411 { _MMIO(0x9888), 0x14352c00 },
1412 { _MMIO(0x9888), 0x16350005 },
1413 { _MMIO(0x9888), 0x123600a0 },
1414 { _MMIO(0x9888), 0x14552c00 },
1415 { _MMIO(0x9888), 0x16550005 },
1416 { _MMIO(0x9888), 0x125600a0 },
1417 { _MMIO(0x9888), 0x062f6000 },
1418 { _MMIO(0x9888), 0x022f2000 },
1419 { _MMIO(0x9888), 0x0c4c0050 },
1420 { _MMIO(0x9888), 0x0a4c0010 },
1421 { _MMIO(0x9888), 0x0c0d8000 },
1422 { _MMIO(0x9888), 0x0e0da000 },
1423 { _MMIO(0x9888), 0x000d8000 },
1424 { _MMIO(0x9888), 0x020da000 },
1425 { _MMIO(0x9888), 0x040da000 },
1426 { _MMIO(0x9888), 0x060d2000 },
1427 { _MMIO(0x9888), 0x100f0350 },
1428 { _MMIO(0x9888), 0x0c0fb000 },
1429 { _MMIO(0x9888), 0x0e0f00da },
1430 { _MMIO(0x9888), 0x182c0028 },
1431 { _MMIO(0x9888), 0x0a2c8000 },
1432 { _MMIO(0x9888), 0x022dc000 },
1433 { _MMIO(0x9888), 0x042d4000 },
1434 { _MMIO(0x9888), 0x0c138000 },
1435 { _MMIO(0x9888), 0x0e132000 },
1436 { _MMIO(0x9888), 0x0413c000 },
1437 { _MMIO(0x9888), 0x1c140018 },
1438 { _MMIO(0x9888), 0x0c157000 },
1439 { _MMIO(0x9888), 0x0e150078 },
1440 { _MMIO(0x9888), 0x10150000 },
1441 { _MMIO(0x9888), 0x04162180 },
1442 { _MMIO(0x9888), 0x02160000 },
1443 { _MMIO(0x9888), 0x04174000 },
1444 { _MMIO(0x9888), 0x0233a000 },
1445 { _MMIO(0x9888), 0x04333000 },
1446 { _MMIO(0x9888), 0x14348000 },
1447 { _MMIO(0x9888), 0x16348000 },
1448 { _MMIO(0x9888), 0x02357870 },
1449 { _MMIO(0x9888), 0x10350000 },
1450 { _MMIO(0x9888), 0x04360043 },
1451 { _MMIO(0x9888), 0x02360000 },
1452 { _MMIO(0x9888), 0x04371000 },
1453 { _MMIO(0x9888), 0x0e538000 },
1454 { _MMIO(0x9888), 0x00538000 },
1455 { _MMIO(0x9888), 0x06533000 },
1456 { _MMIO(0x9888), 0x1c540020 },
1457 { _MMIO(0x9888), 0x12548000 },
1458 { _MMIO(0x9888), 0x0e557000 },
1459 { _MMIO(0x9888), 0x00557800 },
1460 { _MMIO(0x9888), 0x10550000 },
1461 { _MMIO(0x9888), 0x06560043 },
1462 { _MMIO(0x9888), 0x02560000 },
1463 { _MMIO(0x9888), 0x06571000 },
1464 { _MMIO(0x9888), 0x1190ff80 },
1465 { _MMIO(0x9888), 0x57900000 },
1466 { _MMIO(0x9888), 0x49900000 },
1467 { _MMIO(0x9888), 0x37900000 },
1468 { _MMIO(0x9888), 0x33900000 },
1469 { _MMIO(0x9888), 0x4b900060 },
1470 { _MMIO(0x9888), 0x59900000 },
1471 { _MMIO(0x9888), 0x51900000 },
1472 { _MMIO(0x9888), 0x41900c00 },
1473 { _MMIO(0x9888), 0x43900842 },
1474 { _MMIO(0x9888), 0x53900000 },
1475 { _MMIO(0x9888), 0x45900060 },
1476};
1477
1478static int
1479get_sampler_mux_config(struct drm_i915_private *dev_priv,
1480 const struct i915_oa_reg **regs,
1481 int *lens)
1482{
1483 int n = 0;
1484
1485 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1486 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1487
1488 regs[n] = mux_config_sampler;
1489 lens[n] = ARRAY_SIZE(mux_config_sampler);
1490 n++;
1491
1492 return n;
1493}
1494
1495static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1496 { _MMIO(0x2740), 0x00000000 },
1497 { _MMIO(0x2744), 0x00800000 },
1498 { _MMIO(0x2710), 0x00000000 },
1499 { _MMIO(0x2714), 0xf0800000 },
1500 { _MMIO(0x2720), 0x00000000 },
1501 { _MMIO(0x2724), 0x30800000 },
1502 { _MMIO(0x2770), 0x00000002 },
1503 { _MMIO(0x2774), 0x00007fff },
1504 { _MMIO(0x2778), 0x00000000 },
1505 { _MMIO(0x277c), 0x00009fff },
1506 { _MMIO(0x2780), 0x00000002 },
1507 { _MMIO(0x2784), 0x0000efff },
1508 { _MMIO(0x2788), 0x00000000 },
1509 { _MMIO(0x278c), 0x0000f3ff },
1510 { _MMIO(0x2790), 0x00000002 },
1511 { _MMIO(0x2794), 0x0000fdff },
1512 { _MMIO(0x2798), 0x00000000 },
1513 { _MMIO(0x279c), 0x0000fe7f },
1514};
1515
1516static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1517 { _MMIO(0xe458), 0x00005004 },
1518 { _MMIO(0xe558), 0x00010003 },
1519 { _MMIO(0xe658), 0x00012011 },
1520 { _MMIO(0xe758), 0x00015014 },
1521 { _MMIO(0xe45c), 0x00051050 },
1522 { _MMIO(0xe55c), 0x00053052 },
1523 { _MMIO(0xe65c), 0x00055054 },
1524};
1525
1526static const struct i915_oa_reg mux_config_tdl_1[] = {
1527 { _MMIO(0x9888), 0x12120000 },
1528 { _MMIO(0x9888), 0x12320000 },
1529 { _MMIO(0x9888), 0x12520000 },
1530 { _MMIO(0x9888), 0x002f8000 },
1531 { _MMIO(0x9888), 0x022f3000 },
1532 { _MMIO(0x9888), 0x0a4c0015 },
1533 { _MMIO(0x9888), 0x0c0d8000 },
1534 { _MMIO(0x9888), 0x0e0da000 },
1535 { _MMIO(0x9888), 0x000d8000 },
1536 { _MMIO(0x9888), 0x020da000 },
1537 { _MMIO(0x9888), 0x040da000 },
1538 { _MMIO(0x9888), 0x060d2000 },
1539 { _MMIO(0x9888), 0x100f03a0 },
1540 { _MMIO(0x9888), 0x0c0ff000 },
1541 { _MMIO(0x9888), 0x0e0f0095 },
1542 { _MMIO(0x9888), 0x062c8000 },
1543 { _MMIO(0x9888), 0x082c8000 },
1544 { _MMIO(0x9888), 0x0a2c8000 },
1545 { _MMIO(0x9888), 0x0c2d8000 },
1546 { _MMIO(0x9888), 0x0e2d4000 },
1547 { _MMIO(0x9888), 0x062d4000 },
1548 { _MMIO(0x9888), 0x02108000 },
1549 { _MMIO(0x9888), 0x0410c000 },
1550 { _MMIO(0x9888), 0x02118000 },
1551 { _MMIO(0x9888), 0x0411c000 },
1552 { _MMIO(0x9888), 0x02121880 },
1553 { _MMIO(0x9888), 0x041219b5 },
1554 { _MMIO(0x9888), 0x00120000 },
1555 { _MMIO(0x9888), 0x02134000 },
1556 { _MMIO(0x9888), 0x04135000 },
1557 { _MMIO(0x9888), 0x0c308000 },
1558 { _MMIO(0x9888), 0x0e304000 },
1559 { _MMIO(0x9888), 0x06304000 },
1560 { _MMIO(0x9888), 0x0c318000 },
1561 { _MMIO(0x9888), 0x0e314000 },
1562 { _MMIO(0x9888), 0x06314000 },
1563 { _MMIO(0x9888), 0x0c321a80 },
1564 { _MMIO(0x9888), 0x0e320033 },
1565 { _MMIO(0x9888), 0x06320031 },
1566 { _MMIO(0x9888), 0x00320000 },
1567 { _MMIO(0x9888), 0x0c334000 },
1568 { _MMIO(0x9888), 0x0e331000 },
1569 { _MMIO(0x9888), 0x06331000 },
1570 { _MMIO(0x9888), 0x0e508000 },
1571 { _MMIO(0x9888), 0x00508000 },
1572 { _MMIO(0x9888), 0x02504000 },
1573 { _MMIO(0x9888), 0x0e518000 },
1574 { _MMIO(0x9888), 0x00518000 },
1575 { _MMIO(0x9888), 0x02514000 },
1576 { _MMIO(0x9888), 0x0e521880 },
1577 { _MMIO(0x9888), 0x00521a80 },
1578 { _MMIO(0x9888), 0x02520033 },
1579 { _MMIO(0x9888), 0x0e534000 },
1580 { _MMIO(0x9888), 0x00534000 },
1581 { _MMIO(0x9888), 0x02531000 },
1582 { _MMIO(0x9888), 0x1190ff80 },
1583 { _MMIO(0x9888), 0x57900000 },
1584 { _MMIO(0x9888), 0x49900800 },
1585 { _MMIO(0x9888), 0x37900000 },
1586 { _MMIO(0x9888), 0x33900000 },
1587 { _MMIO(0x9888), 0x4b900062 },
1588 { _MMIO(0x9888), 0x59900000 },
1589 { _MMIO(0x9888), 0x51900000 },
1590 { _MMIO(0x9888), 0x41900c00 },
1591 { _MMIO(0x9888), 0x43900003 },
1592 { _MMIO(0x9888), 0x53900000 },
1593 { _MMIO(0x9888), 0x45900040 },
1594};
1595
1596static int
1597get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1598 const struct i915_oa_reg **regs,
1599 int *lens)
1600{
1601 int n = 0;
1602
1603 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1604 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1605
1606 regs[n] = mux_config_tdl_1;
1607 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1608 n++;
1609
1610 return n;
1611}
1612
1613static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1614 { _MMIO(0x2740), 0x00000000 },
1615 { _MMIO(0x2744), 0x00800000 },
1616 { _MMIO(0x2710), 0x00000000 },
1617 { _MMIO(0x2714), 0x00800000 },
1618 { _MMIO(0x2720), 0x00000000 },
1619 { _MMIO(0x2724), 0x00800000 },
1620};
1621
1622static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1623 { _MMIO(0xe458), 0x00005004 },
1624 { _MMIO(0xe558), 0x00010003 },
1625 { _MMIO(0xe658), 0x00012011 },
1626 { _MMIO(0xe758), 0x00015014 },
1627 { _MMIO(0xe45c), 0x00051050 },
1628 { _MMIO(0xe55c), 0x00053052 },
1629 { _MMIO(0xe65c), 0x00055054 },
1630};
1631
1632static const struct i915_oa_reg mux_config_tdl_2[] = {
1633 { _MMIO(0x9888), 0x12124d60 },
1634 { _MMIO(0x9888), 0x12322e60 },
1635 { _MMIO(0x9888), 0x12524d60 },
1636 { _MMIO(0x9888), 0x022f3000 },
1637 { _MMIO(0x9888), 0x0a4c0014 },
1638 { _MMIO(0x9888), 0x000d8000 },
1639 { _MMIO(0x9888), 0x020da000 },
1640 { _MMIO(0x9888), 0x040da000 },
1641 { _MMIO(0x9888), 0x060d2000 },
1642 { _MMIO(0x9888), 0x0c0fe000 },
1643 { _MMIO(0x9888), 0x0e0f0097 },
1644 { _MMIO(0x9888), 0x082c8000 },
1645 { _MMIO(0x9888), 0x0a2c8000 },
1646 { _MMIO(0x9888), 0x002d8000 },
1647 { _MMIO(0x9888), 0x062d4000 },
1648 { _MMIO(0x9888), 0x0410c000 },
1649 { _MMIO(0x9888), 0x0411c000 },
1650 { _MMIO(0x9888), 0x04121fb7 },
1651 { _MMIO(0x9888), 0x00120000 },
1652 { _MMIO(0x9888), 0x04135000 },
1653 { _MMIO(0x9888), 0x00308000 },
1654 { _MMIO(0x9888), 0x06304000 },
1655 { _MMIO(0x9888), 0x00318000 },
1656 { _MMIO(0x9888), 0x06314000 },
1657 { _MMIO(0x9888), 0x00321b80 },
1658 { _MMIO(0x9888), 0x0632003f },
1659 { _MMIO(0x9888), 0x00334000 },
1660 { _MMIO(0x9888), 0x06331000 },
1661 { _MMIO(0x9888), 0x0250c000 },
1662 { _MMIO(0x9888), 0x0251c000 },
1663 { _MMIO(0x9888), 0x02521fb7 },
1664 { _MMIO(0x9888), 0x00520000 },
1665 { _MMIO(0x9888), 0x02535000 },
1666 { _MMIO(0x9888), 0x1190fc00 },
1667 { _MMIO(0x9888), 0x37900000 },
1668 { _MMIO(0x9888), 0x51900000 },
1669 { _MMIO(0x9888), 0x41900800 },
1670 { _MMIO(0x9888), 0x43900063 },
1671 { _MMIO(0x9888), 0x53900000 },
1672 { _MMIO(0x9888), 0x45900040 },
1673 { _MMIO(0x9888), 0x33900000 },
1674};
1675
1676static int
1677get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1678 const struct i915_oa_reg **regs,
1679 int *lens)
1680{
1681 int n = 0;
1682
1683 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1684 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1685
1686 regs[n] = mux_config_tdl_2;
1687 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1688 n++;
1689
1690 return n;
1691}
1692
1693static const struct i915_oa_reg b_counter_config_compute_extra[] = {
1694};
1695
1696static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
1697};
1698
1699static const struct i915_oa_reg mux_config_compute_extra[] = {
1700 { _MMIO(0x9888), 0x121203e0 },
1701 { _MMIO(0x9888), 0x123203e0 },
1702 { _MMIO(0x9888), 0x125203e0 },
1703 { _MMIO(0x9888), 0x129203e0 },
1704 { _MMIO(0x9888), 0x12b203e0 },
1705 { _MMIO(0x9888), 0x12d203e0 },
1706 { _MMIO(0x9888), 0x024ec000 },
1707 { _MMIO(0x9888), 0x044ec000 },
1708 { _MMIO(0x9888), 0x064ec000 },
1709 { _MMIO(0x9888), 0x022f4000 },
1710 { _MMIO(0x9888), 0x084ca000 },
1711 { _MMIO(0x9888), 0x0a4c0042 },
1712 { _MMIO(0x9888), 0x000d8000 },
1713 { _MMIO(0x9888), 0x020da000 },
1714 { _MMIO(0x9888), 0x040da000 },
1715 { _MMIO(0x9888), 0x060d2000 },
1716 { _MMIO(0x9888), 0x0c0f5000 },
1717 { _MMIO(0x9888), 0x0e0f006d },
1718 { _MMIO(0x9888), 0x022c8000 },
1719 { _MMIO(0x9888), 0x042c8000 },
1720 { _MMIO(0x9888), 0x062c8000 },
1721 { _MMIO(0x9888), 0x0c2c8000 },
1722 { _MMIO(0x9888), 0x042d8000 },
1723 { _MMIO(0x9888), 0x06104000 },
1724 { _MMIO(0x9888), 0x06114000 },
1725 { _MMIO(0x9888), 0x06120033 },
1726 { _MMIO(0x9888), 0x00120000 },
1727 { _MMIO(0x9888), 0x06131000 },
1728 { _MMIO(0x9888), 0x04308000 },
1729 { _MMIO(0x9888), 0x04318000 },
1730 { _MMIO(0x9888), 0x04321980 },
1731 { _MMIO(0x9888), 0x00320000 },
1732 { _MMIO(0x9888), 0x04334000 },
1733 { _MMIO(0x9888), 0x04504000 },
1734 { _MMIO(0x9888), 0x04514000 },
1735 { _MMIO(0x9888), 0x04520033 },
1736 { _MMIO(0x9888), 0x00520000 },
1737 { _MMIO(0x9888), 0x04531000 },
1738 { _MMIO(0x9888), 0x00af8000 },
1739 { _MMIO(0x9888), 0x0acc0001 },
1740 { _MMIO(0x9888), 0x008d8000 },
1741 { _MMIO(0x9888), 0x028da000 },
1742 { _MMIO(0x9888), 0x0c8fb000 },
1743 { _MMIO(0x9888), 0x0e8f0001 },
1744 { _MMIO(0x9888), 0x06ac8000 },
1745 { _MMIO(0x9888), 0x02ad4000 },
1746 { _MMIO(0x9888), 0x02908000 },
1747 { _MMIO(0x9888), 0x02918000 },
1748 { _MMIO(0x9888), 0x02921980 },
1749 { _MMIO(0x9888), 0x00920000 },
1750 { _MMIO(0x9888), 0x02934000 },
1751 { _MMIO(0x9888), 0x02b04000 },
1752 { _MMIO(0x9888), 0x02b14000 },
1753 { _MMIO(0x9888), 0x02b20033 },
1754 { _MMIO(0x9888), 0x00b20000 },
1755 { _MMIO(0x9888), 0x02b31000 },
1756 { _MMIO(0x9888), 0x00d08000 },
1757 { _MMIO(0x9888), 0x00d18000 },
1758 { _MMIO(0x9888), 0x00d21980 },
1759 { _MMIO(0x9888), 0x00d34000 },
1760 { _MMIO(0x9888), 0x1190fc00 },
1761 { _MMIO(0x9888), 0x37900000 },
1762 { _MMIO(0x9888), 0x51900000 },
1763 { _MMIO(0x9888), 0x41900c00 },
1764 { _MMIO(0x9888), 0x43900402 },
1765 { _MMIO(0x9888), 0x53901550 },
1766 { _MMIO(0x9888), 0x45900080 },
1767 { _MMIO(0x9888), 0x33900000 },
1768};
1769
1770static int
1771get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
1772 const struct i915_oa_reg **regs,
1773 int *lens)
1774{
1775 int n = 0;
1776
1777 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1778 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1779
1780 regs[n] = mux_config_compute_extra;
1781 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
1782 n++;
1783
1784 return n;
1785}
1786
1787static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
1788 { _MMIO(0x2740), 0x00000000 },
1789 { _MMIO(0x2710), 0x00000000 },
1790 { _MMIO(0x2714), 0xf0800000 },
1791 { _MMIO(0x2720), 0x00000000 },
1792 { _MMIO(0x2724), 0x30800000 },
1793 { _MMIO(0x2770), 0x00100030 },
1794 { _MMIO(0x2774), 0x0000fff9 },
1795 { _MMIO(0x2778), 0x00000002 },
1796 { _MMIO(0x277c), 0x0000fffc },
1797 { _MMIO(0x2780), 0x00000002 },
1798 { _MMIO(0x2784), 0x0000fff3 },
1799 { _MMIO(0x2788), 0x00100180 },
1800 { _MMIO(0x278c), 0x0000ffcf },
1801 { _MMIO(0x2790), 0x00000002 },
1802 { _MMIO(0x2794), 0x0000ffcf },
1803 { _MMIO(0x2798), 0x00000002 },
1804 { _MMIO(0x279c), 0x0000ff3f },
1805};
1806
1807static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
1808 { _MMIO(0xe458), 0x00005004 },
1809 { _MMIO(0xe558), 0x00008003 },
1810};
1811
1812static const struct i915_oa_reg mux_config_vme_pipe[] = {
1813 { _MMIO(0x9888), 0x141a5800 },
1814 { _MMIO(0x9888), 0x161a00c0 },
1815 { _MMIO(0x9888), 0x12180240 },
1816 { _MMIO(0x9888), 0x14180002 },
1817 { _MMIO(0x9888), 0x149a5800 },
1818 { _MMIO(0x9888), 0x169a00c0 },
1819 { _MMIO(0x9888), 0x12980240 },
1820 { _MMIO(0x9888), 0x14980002 },
1821 { _MMIO(0x9888), 0x1a4e3fc0 },
1822 { _MMIO(0x9888), 0x002f1000 },
1823 { _MMIO(0x9888), 0x022f8000 },
1824 { _MMIO(0x9888), 0x042f3000 },
1825 { _MMIO(0x9888), 0x004c4000 },
1826 { _MMIO(0x9888), 0x0a4c9500 },
1827 { _MMIO(0x9888), 0x0c4c002a },
1828 { _MMIO(0x9888), 0x000d2000 },
1829 { _MMIO(0x9888), 0x060d8000 },
1830 { _MMIO(0x9888), 0x080da000 },
1831 { _MMIO(0x9888), 0x0a0da000 },
1832 { _MMIO(0x9888), 0x0c0da000 },
1833 { _MMIO(0x9888), 0x0c0f0400 },
1834 { _MMIO(0x9888), 0x0e0f5500 },
1835 { _MMIO(0x9888), 0x100f0015 },
1836 { _MMIO(0x9888), 0x002c8000 },
1837 { _MMIO(0x9888), 0x0e2c8000 },
1838 { _MMIO(0x9888), 0x162caa00 },
1839 { _MMIO(0x9888), 0x182c000a },
1840 { _MMIO(0x9888), 0x04193000 },
1841 { _MMIO(0x9888), 0x081a28c1 },
1842 { _MMIO(0x9888), 0x001a0000 },
1843 { _MMIO(0x9888), 0x00133000 },
1844 { _MMIO(0x9888), 0x0613c000 },
1845 { _MMIO(0x9888), 0x0813f000 },
1846 { _MMIO(0x9888), 0x00172000 },
1847 { _MMIO(0x9888), 0x06178000 },
1848 { _MMIO(0x9888), 0x0817a000 },
1849 { _MMIO(0x9888), 0x00180037 },
1850 { _MMIO(0x9888), 0x06180940 },
1851 { _MMIO(0x9888), 0x08180000 },
1852 { _MMIO(0x9888), 0x02180000 },
1853 { _MMIO(0x9888), 0x04183000 },
1854 { _MMIO(0x9888), 0x04afc000 },
1855 { _MMIO(0x9888), 0x06af3000 },
1856 { _MMIO(0x9888), 0x0acc4000 },
1857 { _MMIO(0x9888), 0x0ccc0015 },
1858 { _MMIO(0x9888), 0x0a8da000 },
1859 { _MMIO(0x9888), 0x0c8da000 },
1860 { _MMIO(0x9888), 0x0e8f4000 },
1861 { _MMIO(0x9888), 0x108f0015 },
1862 { _MMIO(0x9888), 0x16aca000 },
1863 { _MMIO(0x9888), 0x18ac000a },
1864 { _MMIO(0x9888), 0x06993000 },
1865 { _MMIO(0x9888), 0x0c9a28c1 },
1866 { _MMIO(0x9888), 0x009a0000 },
1867 { _MMIO(0x9888), 0x0a93f000 },
1868 { _MMIO(0x9888), 0x0c93f000 },
1869 { _MMIO(0x9888), 0x0a97a000 },
1870 { _MMIO(0x9888), 0x0c97a000 },
1871 { _MMIO(0x9888), 0x0a980977 },
1872 { _MMIO(0x9888), 0x08980000 },
1873 { _MMIO(0x9888), 0x04980000 },
1874 { _MMIO(0x9888), 0x06983000 },
1875 { _MMIO(0x9888), 0x119000ff },
1876 { _MMIO(0x9888), 0x51900050 },
1877 { _MMIO(0x9888), 0x41900000 },
1878 { _MMIO(0x9888), 0x55900115 },
1879 { _MMIO(0x9888), 0x45900000 },
1880 { _MMIO(0x9888), 0x47900884 },
1881 { _MMIO(0x9888), 0x57900000 },
1882 { _MMIO(0x9888), 0x49900002 },
1883 { _MMIO(0x9888), 0x37900000 },
1884 { _MMIO(0x9888), 0x33900000 },
1885};
1886
1887static int
1888get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
1889 const struct i915_oa_reg **regs,
1890 int *lens)
1891{
1892 int n = 0;
1893
1894 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1895 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1896
1897 regs[n] = mux_config_vme_pipe;
1898 lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
1899 n++;
1900
1901 return n;
1902}
1903
1904static const struct i915_oa_reg b_counter_config_test_oa[] = {
1905 { _MMIO(0x2740), 0x00000000 },
1906 { _MMIO(0x2744), 0x00800000 },
1907 { _MMIO(0x2714), 0xf0800000 },
1908 { _MMIO(0x2710), 0x00000000 },
1909 { _MMIO(0x2724), 0xf0800000 },
1910 { _MMIO(0x2720), 0x00000000 },
1911 { _MMIO(0x2770), 0x00000004 },
1912 { _MMIO(0x2774), 0x00000000 },
1913 { _MMIO(0x2778), 0x00000003 },
1914 { _MMIO(0x277c), 0x00000000 },
1915 { _MMIO(0x2780), 0x00000007 },
1916 { _MMIO(0x2784), 0x00000000 },
1917 { _MMIO(0x2788), 0x00100002 },
1918 { _MMIO(0x278c), 0x0000fff7 },
1919 { _MMIO(0x2790), 0x00100002 },
1920 { _MMIO(0x2794), 0x0000ffcf },
1921 { _MMIO(0x2798), 0x00100082 },
1922 { _MMIO(0x279c), 0x0000ffef },
1923 { _MMIO(0x27a0), 0x001000c2 },
1924 { _MMIO(0x27a4), 0x0000ffe7 },
1925 { _MMIO(0x27a8), 0x00100001 },
1926 { _MMIO(0x27ac), 0x0000ffe7 },
1927};
1928
1929static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1930};
1931
1932static const struct i915_oa_reg mux_config_test_oa[] = {
1933 { _MMIO(0x9888), 0x11810000 },
1934 { _MMIO(0x9888), 0x07810013 },
1935 { _MMIO(0x9888), 0x1f810000 },
1936 { _MMIO(0x9888), 0x1d810000 },
1937 { _MMIO(0x9888), 0x1b930040 },
1938 { _MMIO(0x9888), 0x07e54000 },
1939 { _MMIO(0x9888), 0x1f908000 },
1940 { _MMIO(0x9888), 0x11900000 },
1941 { _MMIO(0x9888), 0x37900000 },
1942 { _MMIO(0x9888), 0x53900000 },
1943 { _MMIO(0x9888), 0x45900000 },
1944 { _MMIO(0x9888), 0x33900000 },
1945};
1946
1947static int
1948get_test_oa_mux_config(struct drm_i915_private *dev_priv,
1949 const struct i915_oa_reg **regs,
1950 int *lens)
1951{
1952 int n = 0;
1953
1954 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1955 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1956
1957 regs[n] = mux_config_test_oa;
1958 lens[n] = ARRAY_SIZE(mux_config_test_oa);
1959 n++;
1960
1961 return n;
1962}
1963
1964int i915_oa_select_metric_set_sklgt3(struct drm_i915_private *dev_priv)
1965{
1966 dev_priv->perf.oa.n_mux_configs = 0;
1967 dev_priv->perf.oa.b_counter_regs = NULL;
1968 dev_priv->perf.oa.b_counter_regs_len = 0;
1969 dev_priv->perf.oa.flex_regs = NULL;
1970 dev_priv->perf.oa.flex_regs_len = 0;
1971
1972 switch (dev_priv->perf.oa.metrics_set) {
1973 case METRIC_SET_ID_RENDER_BASIC:
1974 dev_priv->perf.oa.n_mux_configs =
1975 get_render_basic_mux_config(dev_priv,
1976 dev_priv->perf.oa.mux_regs,
1977 dev_priv->perf.oa.mux_regs_lens);
1978 if (dev_priv->perf.oa.n_mux_configs == 0) {
1979 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
1980
1981 /* EINVAL because *_register_sysfs already checked this
1982 * and so it wouldn't have been advertised to userspace and
1983 * so shouldn't have been requested
1984 */
1985 return -EINVAL;
1986 }
1987
1988 dev_priv->perf.oa.b_counter_regs =
1989 b_counter_config_render_basic;
1990 dev_priv->perf.oa.b_counter_regs_len =
1991 ARRAY_SIZE(b_counter_config_render_basic);
1992
1993 dev_priv->perf.oa.flex_regs =
1994 flex_eu_config_render_basic;
1995 dev_priv->perf.oa.flex_regs_len =
1996 ARRAY_SIZE(flex_eu_config_render_basic);
1997
1998 return 0;
1999 case METRIC_SET_ID_COMPUTE_BASIC:
2000 dev_priv->perf.oa.n_mux_configs =
2001 get_compute_basic_mux_config(dev_priv,
2002 dev_priv->perf.oa.mux_regs,
2003 dev_priv->perf.oa.mux_regs_lens);
2004 if (dev_priv->perf.oa.n_mux_configs == 0) {
2005 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
2006
2007 /* EINVAL because *_register_sysfs already checked this
2008 * and so it wouldn't have been advertised to userspace and
2009 * so shouldn't have been requested
2010 */
2011 return -EINVAL;
2012 }
2013
2014 dev_priv->perf.oa.b_counter_regs =
2015 b_counter_config_compute_basic;
2016 dev_priv->perf.oa.b_counter_regs_len =
2017 ARRAY_SIZE(b_counter_config_compute_basic);
2018
2019 dev_priv->perf.oa.flex_regs =
2020 flex_eu_config_compute_basic;
2021 dev_priv->perf.oa.flex_regs_len =
2022 ARRAY_SIZE(flex_eu_config_compute_basic);
2023
2024 return 0;
2025 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
2026 dev_priv->perf.oa.n_mux_configs =
2027 get_render_pipe_profile_mux_config(dev_priv,
2028 dev_priv->perf.oa.mux_regs,
2029 dev_priv->perf.oa.mux_regs_lens);
2030 if (dev_priv->perf.oa.n_mux_configs == 0) {
2031 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
2032
2033 /* EINVAL because *_register_sysfs already checked this
2034 * and so it wouldn't have been advertised to userspace and
2035 * so shouldn't have been requested
2036 */
2037 return -EINVAL;
2038 }
2039
2040 dev_priv->perf.oa.b_counter_regs =
2041 b_counter_config_render_pipe_profile;
2042 dev_priv->perf.oa.b_counter_regs_len =
2043 ARRAY_SIZE(b_counter_config_render_pipe_profile);
2044
2045 dev_priv->perf.oa.flex_regs =
2046 flex_eu_config_render_pipe_profile;
2047 dev_priv->perf.oa.flex_regs_len =
2048 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
2049
2050 return 0;
2051 case METRIC_SET_ID_MEMORY_READS:
2052 dev_priv->perf.oa.n_mux_configs =
2053 get_memory_reads_mux_config(dev_priv,
2054 dev_priv->perf.oa.mux_regs,
2055 dev_priv->perf.oa.mux_regs_lens);
2056 if (dev_priv->perf.oa.n_mux_configs == 0) {
2057 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
2058
2059 /* EINVAL because *_register_sysfs already checked this
2060 * and so it wouldn't have been advertised to userspace and
2061 * so shouldn't have been requested
2062 */
2063 return -EINVAL;
2064 }
2065
2066 dev_priv->perf.oa.b_counter_regs =
2067 b_counter_config_memory_reads;
2068 dev_priv->perf.oa.b_counter_regs_len =
2069 ARRAY_SIZE(b_counter_config_memory_reads);
2070
2071 dev_priv->perf.oa.flex_regs =
2072 flex_eu_config_memory_reads;
2073 dev_priv->perf.oa.flex_regs_len =
2074 ARRAY_SIZE(flex_eu_config_memory_reads);
2075
2076 return 0;
2077 case METRIC_SET_ID_MEMORY_WRITES:
2078 dev_priv->perf.oa.n_mux_configs =
2079 get_memory_writes_mux_config(dev_priv,
2080 dev_priv->perf.oa.mux_regs,
2081 dev_priv->perf.oa.mux_regs_lens);
2082 if (dev_priv->perf.oa.n_mux_configs == 0) {
2083 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
2084
2085 /* EINVAL because *_register_sysfs already checked this
2086 * and so it wouldn't have been advertised to userspace and
2087 * so shouldn't have been requested
2088 */
2089 return -EINVAL;
2090 }
2091
2092 dev_priv->perf.oa.b_counter_regs =
2093 b_counter_config_memory_writes;
2094 dev_priv->perf.oa.b_counter_regs_len =
2095 ARRAY_SIZE(b_counter_config_memory_writes);
2096
2097 dev_priv->perf.oa.flex_regs =
2098 flex_eu_config_memory_writes;
2099 dev_priv->perf.oa.flex_regs_len =
2100 ARRAY_SIZE(flex_eu_config_memory_writes);
2101
2102 return 0;
2103 case METRIC_SET_ID_COMPUTE_EXTENDED:
2104 dev_priv->perf.oa.n_mux_configs =
2105 get_compute_extended_mux_config(dev_priv,
2106 dev_priv->perf.oa.mux_regs,
2107 dev_priv->perf.oa.mux_regs_lens);
2108 if (dev_priv->perf.oa.n_mux_configs == 0) {
2109 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
2110
2111 /* EINVAL because *_register_sysfs already checked this
2112 * and so it wouldn't have been advertised to userspace and
2113 * so shouldn't have been requested
2114 */
2115 return -EINVAL;
2116 }
2117
2118 dev_priv->perf.oa.b_counter_regs =
2119 b_counter_config_compute_extended;
2120 dev_priv->perf.oa.b_counter_regs_len =
2121 ARRAY_SIZE(b_counter_config_compute_extended);
2122
2123 dev_priv->perf.oa.flex_regs =
2124 flex_eu_config_compute_extended;
2125 dev_priv->perf.oa.flex_regs_len =
2126 ARRAY_SIZE(flex_eu_config_compute_extended);
2127
2128 return 0;
2129 case METRIC_SET_ID_COMPUTE_L3_CACHE:
2130 dev_priv->perf.oa.n_mux_configs =
2131 get_compute_l3_cache_mux_config(dev_priv,
2132 dev_priv->perf.oa.mux_regs,
2133 dev_priv->perf.oa.mux_regs_lens);
2134 if (dev_priv->perf.oa.n_mux_configs == 0) {
2135 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
2136
2137 /* EINVAL because *_register_sysfs already checked this
2138 * and so it wouldn't have been advertised to userspace and
2139 * so shouldn't have been requested
2140 */
2141 return -EINVAL;
2142 }
2143
2144 dev_priv->perf.oa.b_counter_regs =
2145 b_counter_config_compute_l3_cache;
2146 dev_priv->perf.oa.b_counter_regs_len =
2147 ARRAY_SIZE(b_counter_config_compute_l3_cache);
2148
2149 dev_priv->perf.oa.flex_regs =
2150 flex_eu_config_compute_l3_cache;
2151 dev_priv->perf.oa.flex_regs_len =
2152 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
2153
2154 return 0;
2155 case METRIC_SET_ID_HDC_AND_SF:
2156 dev_priv->perf.oa.n_mux_configs =
2157 get_hdc_and_sf_mux_config(dev_priv,
2158 dev_priv->perf.oa.mux_regs,
2159 dev_priv->perf.oa.mux_regs_lens);
2160 if (dev_priv->perf.oa.n_mux_configs == 0) {
2161 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
2162
2163 /* EINVAL because *_register_sysfs already checked this
2164 * and so it wouldn't have been advertised to userspace and
2165 * so shouldn't have been requested
2166 */
2167 return -EINVAL;
2168 }
2169
2170 dev_priv->perf.oa.b_counter_regs =
2171 b_counter_config_hdc_and_sf;
2172 dev_priv->perf.oa.b_counter_regs_len =
2173 ARRAY_SIZE(b_counter_config_hdc_and_sf);
2174
2175 dev_priv->perf.oa.flex_regs =
2176 flex_eu_config_hdc_and_sf;
2177 dev_priv->perf.oa.flex_regs_len =
2178 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2179
2180 return 0;
2181 case METRIC_SET_ID_L3_1:
2182 dev_priv->perf.oa.n_mux_configs =
2183 get_l3_1_mux_config(dev_priv,
2184 dev_priv->perf.oa.mux_regs,
2185 dev_priv->perf.oa.mux_regs_lens);
2186 if (dev_priv->perf.oa.n_mux_configs == 0) {
2187 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2188
2189 /* EINVAL because *_register_sysfs already checked this
2190 * and so it wouldn't have been advertised to userspace and
2191 * so shouldn't have been requested
2192 */
2193 return -EINVAL;
2194 }
2195
2196 dev_priv->perf.oa.b_counter_regs =
2197 b_counter_config_l3_1;
2198 dev_priv->perf.oa.b_counter_regs_len =
2199 ARRAY_SIZE(b_counter_config_l3_1);
2200
2201 dev_priv->perf.oa.flex_regs =
2202 flex_eu_config_l3_1;
2203 dev_priv->perf.oa.flex_regs_len =
2204 ARRAY_SIZE(flex_eu_config_l3_1);
2205
2206 return 0;
2207 case METRIC_SET_ID_L3_2:
2208 dev_priv->perf.oa.n_mux_configs =
2209 get_l3_2_mux_config(dev_priv,
2210 dev_priv->perf.oa.mux_regs,
2211 dev_priv->perf.oa.mux_regs_lens);
2212 if (dev_priv->perf.oa.n_mux_configs == 0) {
2213 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
2214
2215 /* EINVAL because *_register_sysfs already checked this
2216 * and so it wouldn't have been advertised to userspace and
2217 * so shouldn't have been requested
2218 */
2219 return -EINVAL;
2220 }
2221
2222 dev_priv->perf.oa.b_counter_regs =
2223 b_counter_config_l3_2;
2224 dev_priv->perf.oa.b_counter_regs_len =
2225 ARRAY_SIZE(b_counter_config_l3_2);
2226
2227 dev_priv->perf.oa.flex_regs =
2228 flex_eu_config_l3_2;
2229 dev_priv->perf.oa.flex_regs_len =
2230 ARRAY_SIZE(flex_eu_config_l3_2);
2231
2232 return 0;
2233 case METRIC_SET_ID_L3_3:
2234 dev_priv->perf.oa.n_mux_configs =
2235 get_l3_3_mux_config(dev_priv,
2236 dev_priv->perf.oa.mux_regs,
2237 dev_priv->perf.oa.mux_regs_lens);
2238 if (dev_priv->perf.oa.n_mux_configs == 0) {
2239 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
2240
2241 /* EINVAL because *_register_sysfs already checked this
2242 * and so it wouldn't have been advertised to userspace and
2243 * so shouldn't have been requested
2244 */
2245 return -EINVAL;
2246 }
2247
2248 dev_priv->perf.oa.b_counter_regs =
2249 b_counter_config_l3_3;
2250 dev_priv->perf.oa.b_counter_regs_len =
2251 ARRAY_SIZE(b_counter_config_l3_3);
2252
2253 dev_priv->perf.oa.flex_regs =
2254 flex_eu_config_l3_3;
2255 dev_priv->perf.oa.flex_regs_len =
2256 ARRAY_SIZE(flex_eu_config_l3_3);
2257
2258 return 0;
2259 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2260 dev_priv->perf.oa.n_mux_configs =
2261 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2262 dev_priv->perf.oa.mux_regs,
2263 dev_priv->perf.oa.mux_regs_lens);
2264 if (dev_priv->perf.oa.n_mux_configs == 0) {
2265 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2266
2267 /* EINVAL because *_register_sysfs already checked this
2268 * and so it wouldn't have been advertised to userspace and
2269 * so shouldn't have been requested
2270 */
2271 return -EINVAL;
2272 }
2273
2274 dev_priv->perf.oa.b_counter_regs =
2275 b_counter_config_rasterizer_and_pixel_backend;
2276 dev_priv->perf.oa.b_counter_regs_len =
2277 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2278
2279 dev_priv->perf.oa.flex_regs =
2280 flex_eu_config_rasterizer_and_pixel_backend;
2281 dev_priv->perf.oa.flex_regs_len =
2282 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2283
2284 return 0;
2285 case METRIC_SET_ID_SAMPLER:
2286 dev_priv->perf.oa.n_mux_configs =
2287 get_sampler_mux_config(dev_priv,
2288 dev_priv->perf.oa.mux_regs,
2289 dev_priv->perf.oa.mux_regs_lens);
2290 if (dev_priv->perf.oa.n_mux_configs == 0) {
2291 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
2292
2293 /* EINVAL because *_register_sysfs already checked this
2294 * and so it wouldn't have been advertised to userspace and
2295 * so shouldn't have been requested
2296 */
2297 return -EINVAL;
2298 }
2299
2300 dev_priv->perf.oa.b_counter_regs =
2301 b_counter_config_sampler;
2302 dev_priv->perf.oa.b_counter_regs_len =
2303 ARRAY_SIZE(b_counter_config_sampler);
2304
2305 dev_priv->perf.oa.flex_regs =
2306 flex_eu_config_sampler;
2307 dev_priv->perf.oa.flex_regs_len =
2308 ARRAY_SIZE(flex_eu_config_sampler);
2309
2310 return 0;
2311 case METRIC_SET_ID_TDL_1:
2312 dev_priv->perf.oa.n_mux_configs =
2313 get_tdl_1_mux_config(dev_priv,
2314 dev_priv->perf.oa.mux_regs,
2315 dev_priv->perf.oa.mux_regs_lens);
2316 if (dev_priv->perf.oa.n_mux_configs == 0) {
2317 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2318
2319 /* EINVAL because *_register_sysfs already checked this
2320 * and so it wouldn't have been advertised to userspace and
2321 * so shouldn't have been requested
2322 */
2323 return -EINVAL;
2324 }
2325
2326 dev_priv->perf.oa.b_counter_regs =
2327 b_counter_config_tdl_1;
2328 dev_priv->perf.oa.b_counter_regs_len =
2329 ARRAY_SIZE(b_counter_config_tdl_1);
2330
2331 dev_priv->perf.oa.flex_regs =
2332 flex_eu_config_tdl_1;
2333 dev_priv->perf.oa.flex_regs_len =
2334 ARRAY_SIZE(flex_eu_config_tdl_1);
2335
2336 return 0;
2337 case METRIC_SET_ID_TDL_2:
2338 dev_priv->perf.oa.n_mux_configs =
2339 get_tdl_2_mux_config(dev_priv,
2340 dev_priv->perf.oa.mux_regs,
2341 dev_priv->perf.oa.mux_regs_lens);
2342 if (dev_priv->perf.oa.n_mux_configs == 0) {
2343 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2344
2345 /* EINVAL because *_register_sysfs already checked this
2346 * and so it wouldn't have been advertised to userspace and
2347 * so shouldn't have been requested
2348 */
2349 return -EINVAL;
2350 }
2351
2352 dev_priv->perf.oa.b_counter_regs =
2353 b_counter_config_tdl_2;
2354 dev_priv->perf.oa.b_counter_regs_len =
2355 ARRAY_SIZE(b_counter_config_tdl_2);
2356
2357 dev_priv->perf.oa.flex_regs =
2358 flex_eu_config_tdl_2;
2359 dev_priv->perf.oa.flex_regs_len =
2360 ARRAY_SIZE(flex_eu_config_tdl_2);
2361
2362 return 0;
2363 case METRIC_SET_ID_COMPUTE_EXTRA:
2364 dev_priv->perf.oa.n_mux_configs =
2365 get_compute_extra_mux_config(dev_priv,
2366 dev_priv->perf.oa.mux_regs,
2367 dev_priv->perf.oa.mux_regs_lens);
2368 if (dev_priv->perf.oa.n_mux_configs == 0) {
2369 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2370
2371 /* EINVAL because *_register_sysfs already checked this
2372 * and so it wouldn't have been advertised to userspace and
2373 * so shouldn't have been requested
2374 */
2375 return -EINVAL;
2376 }
2377
2378 dev_priv->perf.oa.b_counter_regs =
2379 b_counter_config_compute_extra;
2380 dev_priv->perf.oa.b_counter_regs_len =
2381 ARRAY_SIZE(b_counter_config_compute_extra);
2382
2383 dev_priv->perf.oa.flex_regs =
2384 flex_eu_config_compute_extra;
2385 dev_priv->perf.oa.flex_regs_len =
2386 ARRAY_SIZE(flex_eu_config_compute_extra);
2387
2388 return 0;
2389 case METRIC_SET_ID_VME_PIPE:
2390 dev_priv->perf.oa.n_mux_configs =
2391 get_vme_pipe_mux_config(dev_priv,
2392 dev_priv->perf.oa.mux_regs,
2393 dev_priv->perf.oa.mux_regs_lens);
2394 if (dev_priv->perf.oa.n_mux_configs == 0) {
2395 DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
2396
2397 /* EINVAL because *_register_sysfs already checked this
2398 * and so it wouldn't have been advertised to userspace and
2399 * so shouldn't have been requested
2400 */
2401 return -EINVAL;
2402 }
2403
2404 dev_priv->perf.oa.b_counter_regs =
2405 b_counter_config_vme_pipe;
2406 dev_priv->perf.oa.b_counter_regs_len =
2407 ARRAY_SIZE(b_counter_config_vme_pipe);
2408
2409 dev_priv->perf.oa.flex_regs =
2410 flex_eu_config_vme_pipe;
2411 dev_priv->perf.oa.flex_regs_len =
2412 ARRAY_SIZE(flex_eu_config_vme_pipe);
2413
2414 return 0;
2415 case METRIC_SET_ID_TEST_OA:
2416 dev_priv->perf.oa.n_mux_configs =
2417 get_test_oa_mux_config(dev_priv,
2418 dev_priv->perf.oa.mux_regs,
2419 dev_priv->perf.oa.mux_regs_lens);
2420 if (dev_priv->perf.oa.n_mux_configs == 0) {
2421 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2422
2423 /* EINVAL because *_register_sysfs already checked this
2424 * and so it wouldn't have been advertised to userspace and
2425 * so shouldn't have been requested
2426 */
2427 return -EINVAL;
2428 }
2429
2430 dev_priv->perf.oa.b_counter_regs =
2431 b_counter_config_test_oa;
2432 dev_priv->perf.oa.b_counter_regs_len =
2433 ARRAY_SIZE(b_counter_config_test_oa);
2434
2435 dev_priv->perf.oa.flex_regs =
2436 flex_eu_config_test_oa;
2437 dev_priv->perf.oa.flex_regs_len =
2438 ARRAY_SIZE(flex_eu_config_test_oa);
2439
2440 return 0;
2441 default:
2442 return -ENODEV;
2443 }
2444}
2445
2446static ssize_t
2447show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2448{
2449 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2450}
2451
2452static struct device_attribute dev_attr_render_basic_id = {
2453 .attr = { .name = "id", .mode = 0444 },
2454 .show = show_render_basic_id,
2455 .store = NULL,
2456};
2457
2458static struct attribute *attrs_render_basic[] = {
2459 &dev_attr_render_basic_id.attr,
2460 NULL,
2461};
2462
2463static struct attribute_group group_render_basic = {
2464 .name = "4616d450-2393-4836-8146-53c5ed84d359",
2465 .attrs = attrs_render_basic,
2466};
2467
2468static ssize_t
2469show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2470{
2471 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2472}
2473
2474static struct device_attribute dev_attr_compute_basic_id = {
2475 .attr = { .name = "id", .mode = 0444 },
2476 .show = show_compute_basic_id,
2477 .store = NULL,
2478};
2479
2480static struct attribute *attrs_compute_basic[] = {
2481 &dev_attr_compute_basic_id.attr,
2482 NULL,
2483};
2484
2485static struct attribute_group group_compute_basic = {
2486 .name = "4320492b-fd03-42ac-922f-dbe1ef3b7b58",
2487 .attrs = attrs_compute_basic,
2488};
2489
2490static ssize_t
2491show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2492{
2493 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2494}
2495
2496static struct device_attribute dev_attr_render_pipe_profile_id = {
2497 .attr = { .name = "id", .mode = 0444 },
2498 .show = show_render_pipe_profile_id,
2499 .store = NULL,
2500};
2501
2502static struct attribute *attrs_render_pipe_profile[] = {
2503 &dev_attr_render_pipe_profile_id.attr,
2504 NULL,
2505};
2506
2507static struct attribute_group group_render_pipe_profile = {
2508 .name = "bd2d9cae-b9ec-4f5b-9d2f-934bed398a2d",
2509 .attrs = attrs_render_pipe_profile,
2510};
2511
2512static ssize_t
2513show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2514{
2515 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2516}
2517
2518static struct device_attribute dev_attr_memory_reads_id = {
2519 .attr = { .name = "id", .mode = 0444 },
2520 .show = show_memory_reads_id,
2521 .store = NULL,
2522};
2523
2524static struct attribute *attrs_memory_reads[] = {
2525 &dev_attr_memory_reads_id.attr,
2526 NULL,
2527};
2528
2529static struct attribute_group group_memory_reads = {
2530 .name = "4ca0f3fe-7fd3-4924-98cb-1807d9879767",
2531 .attrs = attrs_memory_reads,
2532};
2533
2534static ssize_t
2535show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2536{
2537 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2538}
2539
2540static struct device_attribute dev_attr_memory_writes_id = {
2541 .attr = { .name = "id", .mode = 0444 },
2542 .show = show_memory_writes_id,
2543 .store = NULL,
2544};
2545
2546static struct attribute *attrs_memory_writes[] = {
2547 &dev_attr_memory_writes_id.attr,
2548 NULL,
2549};
2550
2551static struct attribute_group group_memory_writes = {
2552 .name = "a0c0172c-ee13-403d-99ff-2bdf6936cf14",
2553 .attrs = attrs_memory_writes,
2554};
2555
2556static ssize_t
2557show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2558{
2559 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
2560}
2561
2562static struct device_attribute dev_attr_compute_extended_id = {
2563 .attr = { .name = "id", .mode = 0444 },
2564 .show = show_compute_extended_id,
2565 .store = NULL,
2566};
2567
2568static struct attribute *attrs_compute_extended[] = {
2569 &dev_attr_compute_extended_id.attr,
2570 NULL,
2571};
2572
2573static struct attribute_group group_compute_extended = {
2574 .name = "52435e0b-f188-42ea-8680-21a56ee20dee",
2575 .attrs = attrs_compute_extended,
2576};
2577
2578static ssize_t
2579show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
2580{
2581 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
2582}
2583
2584static struct device_attribute dev_attr_compute_l3_cache_id = {
2585 .attr = { .name = "id", .mode = 0444 },
2586 .show = show_compute_l3_cache_id,
2587 .store = NULL,
2588};
2589
2590static struct attribute *attrs_compute_l3_cache[] = {
2591 &dev_attr_compute_l3_cache_id.attr,
2592 NULL,
2593};
2594
2595static struct attribute_group group_compute_l3_cache = {
2596 .name = "27076eeb-49f3-4fed-8423-c66506005c63",
2597 .attrs = attrs_compute_l3_cache,
2598};
2599
2600static ssize_t
2601show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2602{
2603 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2604}
2605
2606static struct device_attribute dev_attr_hdc_and_sf_id = {
2607 .attr = { .name = "id", .mode = 0444 },
2608 .show = show_hdc_and_sf_id,
2609 .store = NULL,
2610};
2611
2612static struct attribute *attrs_hdc_and_sf[] = {
2613 &dev_attr_hdc_and_sf_id.attr,
2614 NULL,
2615};
2616
2617static struct attribute_group group_hdc_and_sf = {
2618 .name = "8071b409-c39a-4674-94d7-32962ecfb512",
2619 .attrs = attrs_hdc_and_sf,
2620};
2621
2622static ssize_t
2623show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2624{
2625 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2626}
2627
2628static struct device_attribute dev_attr_l3_1_id = {
2629 .attr = { .name = "id", .mode = 0444 },
2630 .show = show_l3_1_id,
2631 .store = NULL,
2632};
2633
2634static struct attribute *attrs_l3_1[] = {
2635 &dev_attr_l3_1_id.attr,
2636 NULL,
2637};
2638
2639static struct attribute_group group_l3_1 = {
2640 .name = "5e0b391e-9ea8-4901-b2ff-b64ff616c7ed",
2641 .attrs = attrs_l3_1,
2642};
2643
2644static ssize_t
2645show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2646{
2647 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
2648}
2649
2650static struct device_attribute dev_attr_l3_2_id = {
2651 .attr = { .name = "id", .mode = 0444 },
2652 .show = show_l3_2_id,
2653 .store = NULL,
2654};
2655
2656static struct attribute *attrs_l3_2[] = {
2657 &dev_attr_l3_2_id.attr,
2658 NULL,
2659};
2660
2661static struct attribute_group group_l3_2 = {
2662 .name = "25dc828e-1d2d-426e-9546-a1d4233cdf16",
2663 .attrs = attrs_l3_2,
2664};
2665
2666static ssize_t
2667show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
2668{
2669 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
2670}
2671
2672static struct device_attribute dev_attr_l3_3_id = {
2673 .attr = { .name = "id", .mode = 0444 },
2674 .show = show_l3_3_id,
2675 .store = NULL,
2676};
2677
2678static struct attribute *attrs_l3_3[] = {
2679 &dev_attr_l3_3_id.attr,
2680 NULL,
2681};
2682
2683static struct attribute_group group_l3_3 = {
2684 .name = "3dba9405-2d7e-4d70-8199-e734e82fd6bf",
2685 .attrs = attrs_l3_3,
2686};
2687
2688static ssize_t
2689show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2690{
2691 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2692}
2693
2694static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2695 .attr = { .name = "id", .mode = 0444 },
2696 .show = show_rasterizer_and_pixel_backend_id,
2697 .store = NULL,
2698};
2699
2700static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2701 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2702 NULL,
2703};
2704
2705static struct attribute_group group_rasterizer_and_pixel_backend = {
2706 .name = "76935d7b-09c9-46bf-87f1-c18b4a86ebe5",
2707 .attrs = attrs_rasterizer_and_pixel_backend,
2708};
2709
2710static ssize_t
2711show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
2712{
2713 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
2714}
2715
2716static struct device_attribute dev_attr_sampler_id = {
2717 .attr = { .name = "id", .mode = 0444 },
2718 .show = show_sampler_id,
2719 .store = NULL,
2720};
2721
2722static struct attribute *attrs_sampler[] = {
2723 &dev_attr_sampler_id.attr,
2724 NULL,
2725};
2726
2727static struct attribute_group group_sampler = {
2728 .name = "1b34c0d6-4f4c-4d7b-833f-4aaf236d87a6",
2729 .attrs = attrs_sampler,
2730};
2731
2732static ssize_t
2733show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2734{
2735 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2736}
2737
2738static struct device_attribute dev_attr_tdl_1_id = {
2739 .attr = { .name = "id", .mode = 0444 },
2740 .show = show_tdl_1_id,
2741 .store = NULL,
2742};
2743
2744static struct attribute *attrs_tdl_1[] = {
2745 &dev_attr_tdl_1_id.attr,
2746 NULL,
2747};
2748
2749static struct attribute_group group_tdl_1 = {
2750 .name = "b375c985-9953-455b-bda2-b03f7594e9db",
2751 .attrs = attrs_tdl_1,
2752};
2753
2754static ssize_t
2755show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2756{
2757 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2758}
2759
2760static struct device_attribute dev_attr_tdl_2_id = {
2761 .attr = { .name = "id", .mode = 0444 },
2762 .show = show_tdl_2_id,
2763 .store = NULL,
2764};
2765
2766static struct attribute *attrs_tdl_2[] = {
2767 &dev_attr_tdl_2_id.attr,
2768 NULL,
2769};
2770
2771static struct attribute_group group_tdl_2 = {
2772 .name = "3e2be2bb-884a-49bb-82c5-2358e6bd5f2d",
2773 .attrs = attrs_tdl_2,
2774};
2775
2776static ssize_t
2777show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
2778{
2779 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
2780}
2781
2782static struct device_attribute dev_attr_compute_extra_id = {
2783 .attr = { .name = "id", .mode = 0444 },
2784 .show = show_compute_extra_id,
2785 .store = NULL,
2786};
2787
2788static struct attribute *attrs_compute_extra[] = {
2789 &dev_attr_compute_extra_id.attr,
2790 NULL,
2791};
2792
2793static struct attribute_group group_compute_extra = {
2794 .name = "2d80a648-7b5a-4e92-bbe7-3b5c76f2e221",
2795 .attrs = attrs_compute_extra,
2796};
2797
2798static ssize_t
2799show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
2800{
2801 return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
2802}
2803
2804static struct device_attribute dev_attr_vme_pipe_id = {
2805 .attr = { .name = "id", .mode = 0444 },
2806 .show = show_vme_pipe_id,
2807 .store = NULL,
2808};
2809
2810static struct attribute *attrs_vme_pipe[] = {
2811 &dev_attr_vme_pipe_id.attr,
2812 NULL,
2813};
2814
2815static struct attribute_group group_vme_pipe = {
2816 .name = "cfae9232-6ffc-42cc-a703-9790016925f0",
2817 .attrs = attrs_vme_pipe,
2818};
2819
2820static ssize_t
2821show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2822{
2823 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2824}
2825
2826static struct device_attribute dev_attr_test_oa_id = {
2827 .attr = { .name = "id", .mode = 0444 },
2828 .show = show_test_oa_id,
2829 .store = NULL,
2830};
2831
2832static struct attribute *attrs_test_oa[] = {
2833 &dev_attr_test_oa_id.attr,
2834 NULL,
2835};
2836
2837static struct attribute_group group_test_oa = {
2838 .name = "2b985803-d3c9-4629-8a4f-634bfecba0e8",
2839 .attrs = attrs_test_oa,
2840};
2841
2842int
2843i915_perf_register_sysfs_sklgt3(struct drm_i915_private *dev_priv)
2844{
2845 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2846 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2847 int ret = 0;
2848
2849 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2850 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2851 if (ret)
2852 goto error_render_basic;
2853 }
2854 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2855 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2856 if (ret)
2857 goto error_compute_basic;
2858 }
2859 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2860 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2861 if (ret)
2862 goto error_render_pipe_profile;
2863 }
2864 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
2865 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2866 if (ret)
2867 goto error_memory_reads;
2868 }
2869 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
2870 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2871 if (ret)
2872 goto error_memory_writes;
2873 }
2874 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
2875 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2876 if (ret)
2877 goto error_compute_extended;
2878 }
2879 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
2880 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2881 if (ret)
2882 goto error_compute_l3_cache;
2883 }
2884 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2885 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2886 if (ret)
2887 goto error_hdc_and_sf;
2888 }
2889 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2890 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2891 if (ret)
2892 goto error_l3_1;
2893 }
2894 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2895 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2896 if (ret)
2897 goto error_l3_2;
2898 }
2899 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
2900 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2901 if (ret)
2902 goto error_l3_3;
2903 }
2904 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2905 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2906 if (ret)
2907 goto error_rasterizer_and_pixel_backend;
2908 }
2909 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
2910 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
2911 if (ret)
2912 goto error_sampler;
2913 }
2914 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2915 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2916 if (ret)
2917 goto error_tdl_1;
2918 }
2919 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2920 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2921 if (ret)
2922 goto error_tdl_2;
2923 }
2924 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
2925 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2926 if (ret)
2927 goto error_compute_extra;
2928 }
2929 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
2930 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2931 if (ret)
2932 goto error_vme_pipe;
2933 }
2934 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2935 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2936 if (ret)
2937 goto error_test_oa;
2938 }
2939
2940 return 0;
2941
2942error_test_oa:
2943 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
2944 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2945error_vme_pipe:
2946 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
2947 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2948error_compute_extra:
2949 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
2950 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2951error_tdl_2:
2952 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
2953 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2954error_tdl_1:
2955 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
2956 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
2957error_sampler:
2958 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
2959 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2960error_rasterizer_and_pixel_backend:
2961 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
2962 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2963error_l3_3:
2964 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
2965 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2966error_l3_2:
2967 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
2968 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2969error_l3_1:
2970 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
2971 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2972error_hdc_and_sf:
2973 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
2974 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2975error_compute_l3_cache:
2976 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
2977 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2978error_compute_extended:
2979 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
2980 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2981error_memory_writes:
2982 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
2983 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2984error_memory_reads:
2985 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
2986 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2987error_render_pipe_profile:
2988 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
2989 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2990error_compute_basic:
2991 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
2992 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2993error_render_basic:
2994 return ret;
2995}
2996
2997void
2998i915_perf_unregister_sysfs_sklgt3(struct drm_i915_private *dev_priv)
2999{
3000 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
3001 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
3002
3003 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
3004 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3005 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
3006 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3007 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
3008 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3009 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
3010 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3011 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
3012 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3013 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
3014 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3015 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
3016 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3017 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
3018 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3019 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
3020 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3021 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
3022 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3023 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
3024 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3025 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
3026 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3027 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
3028 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
3029 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
3030 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3031 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
3032 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3033 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
3034 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3035 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
3036 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
3037 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
3038 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
3039}
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
new file mode 100644
index 000000000000..c0accb1f9b74
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_SKLGT3_H__
30#define __I915_OA_SKLGT3_H__
31
32extern int i915_oa_n_builtin_metric_sets_sklgt3;
33
34extern int i915_oa_select_metric_set_sklgt3(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_sklgt3(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_sklgt3(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
new file mode 100644
index 000000000000..9ddab43a2176
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -0,0 +1,3093 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/sysfs.h>
30
31#include "i915_drv.h"
32#include "i915_oa_sklgt4.h"
33
34enum metric_set_id {
35 METRIC_SET_ID_RENDER_BASIC = 1,
36 METRIC_SET_ID_COMPUTE_BASIC,
37 METRIC_SET_ID_RENDER_PIPE_PROFILE,
38 METRIC_SET_ID_MEMORY_READS,
39 METRIC_SET_ID_MEMORY_WRITES,
40 METRIC_SET_ID_COMPUTE_EXTENDED,
41 METRIC_SET_ID_COMPUTE_L3_CACHE,
42 METRIC_SET_ID_HDC_AND_SF,
43 METRIC_SET_ID_L3_1,
44 METRIC_SET_ID_L3_2,
45 METRIC_SET_ID_L3_3,
46 METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
47 METRIC_SET_ID_SAMPLER,
48 METRIC_SET_ID_TDL_1,
49 METRIC_SET_ID_TDL_2,
50 METRIC_SET_ID_COMPUTE_EXTRA,
51 METRIC_SET_ID_VME_PIPE,
52 METRIC_SET_ID_TEST_OA,
53};
54
55int i915_oa_n_builtin_metric_sets_sklgt4 = 18;
56
57static const struct i915_oa_reg b_counter_config_render_basic[] = {
58 { _MMIO(0x2710), 0x00000000 },
59 { _MMIO(0x2714), 0x00800000 },
60 { _MMIO(0x2720), 0x00000000 },
61 { _MMIO(0x2724), 0x00800000 },
62 { _MMIO(0x2740), 0x00000000 },
63};
64
65static const struct i915_oa_reg flex_eu_config_render_basic[] = {
66 { _MMIO(0xe458), 0x00005004 },
67 { _MMIO(0xe558), 0x00010003 },
68 { _MMIO(0xe658), 0x00012011 },
69 { _MMIO(0xe758), 0x00015014 },
70 { _MMIO(0xe45c), 0x00051050 },
71 { _MMIO(0xe55c), 0x00053052 },
72 { _MMIO(0xe65c), 0x00055054 },
73};
74
75static const struct i915_oa_reg mux_config_render_basic[] = {
76 { _MMIO(0x9888), 0x166c01e0 },
77 { _MMIO(0x9888), 0x12170280 },
78 { _MMIO(0x9888), 0x12370280 },
79 { _MMIO(0x9888), 0x16ec01e0 },
80 { _MMIO(0x9888), 0x176c01e0 },
81 { _MMIO(0x9888), 0x11930317 },
82 { _MMIO(0x9888), 0x159303df },
83 { _MMIO(0x9888), 0x3f900003 },
84 { _MMIO(0x9888), 0x1a4e03b0 },
85 { _MMIO(0x9888), 0x0a6c0053 },
86 { _MMIO(0x9888), 0x106c0000 },
87 { _MMIO(0x9888), 0x1c6c0000 },
88 { _MMIO(0x9888), 0x0a1b4000 },
89 { _MMIO(0x9888), 0x1c1c0001 },
90 { _MMIO(0x9888), 0x002f1000 },
91 { _MMIO(0x9888), 0x042f1000 },
92 { _MMIO(0x9888), 0x004c4000 },
93 { _MMIO(0x9888), 0x0a4ca400 },
94 { _MMIO(0x9888), 0x0c4c0002 },
95 { _MMIO(0x9888), 0x000d2000 },
96 { _MMIO(0x9888), 0x060d8000 },
97 { _MMIO(0x9888), 0x080da000 },
98 { _MMIO(0x9888), 0x0a0da000 },
99 { _MMIO(0x9888), 0x0c0f0400 },
100 { _MMIO(0x9888), 0x0e0f5600 },
101 { _MMIO(0x9888), 0x100f0001 },
102 { _MMIO(0x9888), 0x002c8000 },
103 { _MMIO(0x9888), 0x162caa00 },
104 { _MMIO(0x9888), 0x062d8000 },
105 { _MMIO(0x9888), 0x00133000 },
106 { _MMIO(0x9888), 0x08133000 },
107 { _MMIO(0x9888), 0x00170020 },
108 { _MMIO(0x9888), 0x08170021 },
109 { _MMIO(0x9888), 0x10170000 },
110 { _MMIO(0x9888), 0x0633c000 },
111 { _MMIO(0x9888), 0x06370800 },
112 { _MMIO(0x9888), 0x10370000 },
113 { _MMIO(0x9888), 0x1ace0230 },
114 { _MMIO(0x9888), 0x0aec5300 },
115 { _MMIO(0x9888), 0x10ec0000 },
116 { _MMIO(0x9888), 0x1cec0000 },
117 { _MMIO(0x9888), 0x0a9b8000 },
118 { _MMIO(0x9888), 0x1c9c0002 },
119 { _MMIO(0x9888), 0x0acc2000 },
120 { _MMIO(0x9888), 0x0ccc0002 },
121 { _MMIO(0x9888), 0x088d8000 },
122 { _MMIO(0x9888), 0x0a8d8000 },
123 { _MMIO(0x9888), 0x0e8f1000 },
124 { _MMIO(0x9888), 0x108f0001 },
125 { _MMIO(0x9888), 0x16ac8800 },
126 { _MMIO(0x9888), 0x1b4e0020 },
127 { _MMIO(0x9888), 0x096c5300 },
128 { _MMIO(0x9888), 0x116c0000 },
129 { _MMIO(0x9888), 0x1d6c0000 },
130 { _MMIO(0x9888), 0x091b8000 },
131 { _MMIO(0x9888), 0x1b1c8000 },
132 { _MMIO(0x9888), 0x0b4c2000 },
133 { _MMIO(0x9888), 0x090d8000 },
134 { _MMIO(0x9888), 0x0f0f1000 },
135 { _MMIO(0x9888), 0x172c0800 },
136 { _MMIO(0x9888), 0x0d933031 },
137 { _MMIO(0x9888), 0x0f933e3f },
138 { _MMIO(0x9888), 0x01933d00 },
139 { _MMIO(0x9888), 0x0393073c },
140 { _MMIO(0x9888), 0x0593000e },
141 { _MMIO(0x9888), 0x1d930000 },
142 { _MMIO(0x9888), 0x19930000 },
143 { _MMIO(0x9888), 0x1b930000 },
144 { _MMIO(0x9888), 0x1d900157 },
145 { _MMIO(0x9888), 0x1f900158 },
146 { _MMIO(0x9888), 0x35900000 },
147 { _MMIO(0x9888), 0x2b908000 },
148 { _MMIO(0x9888), 0x2d908000 },
149 { _MMIO(0x9888), 0x2f908000 },
150 { _MMIO(0x9888), 0x31908000 },
151 { _MMIO(0x9888), 0x15908000 },
152 { _MMIO(0x9888), 0x17908000 },
153 { _MMIO(0x9888), 0x19908000 },
154 { _MMIO(0x9888), 0x1b908000 },
155 { _MMIO(0x9888), 0x1190003f },
156 { _MMIO(0x9888), 0x5190ff30 },
157 { _MMIO(0x9888), 0x41900060 },
158 { _MMIO(0x9888), 0x55903033 },
159 { _MMIO(0x9888), 0x45901421 },
160 { _MMIO(0x9888), 0x47900803 },
161 { _MMIO(0x9888), 0x5790fff1 },
162 { _MMIO(0x9888), 0x49900001 },
163 { _MMIO(0x9888), 0x37900000 },
164 { _MMIO(0x9888), 0x33900000 },
165 { _MMIO(0x9888), 0x4b900000 },
166 { _MMIO(0x9888), 0x5990000f },
167 { _MMIO(0x9888), 0x43900000 },
168 { _MMIO(0x9888), 0x5390ffff },
169};
170
171static int
172get_render_basic_mux_config(struct drm_i915_private *dev_priv,
173 const struct i915_oa_reg **regs,
174 int *lens)
175{
176 int n = 0;
177
178 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
179 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
180
181 regs[n] = mux_config_render_basic;
182 lens[n] = ARRAY_SIZE(mux_config_render_basic);
183 n++;
184
185 return n;
186}
187
188static const struct i915_oa_reg b_counter_config_compute_basic[] = {
189 { _MMIO(0x2710), 0x00000000 },
190 { _MMIO(0x2714), 0x00800000 },
191 { _MMIO(0x2720), 0x00000000 },
192 { _MMIO(0x2724), 0x00800000 },
193 { _MMIO(0x2740), 0x00000000 },
194};
195
196static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
197 { _MMIO(0xe458), 0x00005004 },
198 { _MMIO(0xe558), 0x00000003 },
199 { _MMIO(0xe658), 0x00002001 },
200 { _MMIO(0xe758), 0x00778008 },
201 { _MMIO(0xe45c), 0x00088078 },
202 { _MMIO(0xe55c), 0x00808708 },
203 { _MMIO(0xe65c), 0x00a08908 },
204};
205
206static const struct i915_oa_reg mux_config_compute_basic[] = {
207 { _MMIO(0x9888), 0x104f00e0 },
208 { _MMIO(0x9888), 0x124f1c00 },
209 { _MMIO(0x9888), 0x106c00e0 },
210 { _MMIO(0x9888), 0x37906800 },
211 { _MMIO(0x9888), 0x3f900003 },
212 { _MMIO(0x9888), 0x004e8000 },
213 { _MMIO(0x9888), 0x1a4e0820 },
214 { _MMIO(0x9888), 0x1c4e0002 },
215 { _MMIO(0x9888), 0x064f0900 },
216 { _MMIO(0x9888), 0x084f0032 },
217 { _MMIO(0x9888), 0x0a4f1891 },
218 { _MMIO(0x9888), 0x0c4f0e00 },
219 { _MMIO(0x9888), 0x0e4f003c },
220 { _MMIO(0x9888), 0x004f0d80 },
221 { _MMIO(0x9888), 0x024f003b },
222 { _MMIO(0x9888), 0x006c0002 },
223 { _MMIO(0x9888), 0x086c0100 },
224 { _MMIO(0x9888), 0x0c6c000c },
225 { _MMIO(0x9888), 0x0e6c0b00 },
226 { _MMIO(0x9888), 0x186c0000 },
227 { _MMIO(0x9888), 0x1c6c0000 },
228 { _MMIO(0x9888), 0x1e6c0000 },
229 { _MMIO(0x9888), 0x001b4000 },
230 { _MMIO(0x9888), 0x081b8000 },
231 { _MMIO(0x9888), 0x0c1b4000 },
232 { _MMIO(0x9888), 0x0e1b8000 },
233 { _MMIO(0x9888), 0x101c8000 },
234 { _MMIO(0x9888), 0x1a1c8000 },
235 { _MMIO(0x9888), 0x1c1c0024 },
236 { _MMIO(0x9888), 0x065b8000 },
237 { _MMIO(0x9888), 0x085b4000 },
238 { _MMIO(0x9888), 0x0a5bc000 },
239 { _MMIO(0x9888), 0x0c5b8000 },
240 { _MMIO(0x9888), 0x0e5b4000 },
241 { _MMIO(0x9888), 0x005b8000 },
242 { _MMIO(0x9888), 0x025b4000 },
243 { _MMIO(0x9888), 0x1a5c6000 },
244 { _MMIO(0x9888), 0x1c5c001b },
245 { _MMIO(0x9888), 0x125c8000 },
246 { _MMIO(0x9888), 0x145c8000 },
247 { _MMIO(0x9888), 0x004c8000 },
248 { _MMIO(0x9888), 0x0a4c2000 },
249 { _MMIO(0x9888), 0x0c4c0208 },
250 { _MMIO(0x9888), 0x000da000 },
251 { _MMIO(0x9888), 0x060d8000 },
252 { _MMIO(0x9888), 0x080da000 },
253 { _MMIO(0x9888), 0x0a0da000 },
254 { _MMIO(0x9888), 0x0c0da000 },
255 { _MMIO(0x9888), 0x0e0da000 },
256 { _MMIO(0x9888), 0x020d2000 },
257 { _MMIO(0x9888), 0x0c0f5400 },
258 { _MMIO(0x9888), 0x0e0f5500 },
259 { _MMIO(0x9888), 0x100f0155 },
260 { _MMIO(0x9888), 0x002c8000 },
261 { _MMIO(0x9888), 0x0e2cc000 },
262 { _MMIO(0x9888), 0x162cfb00 },
263 { _MMIO(0x9888), 0x182c00be },
264 { _MMIO(0x9888), 0x022cc000 },
265 { _MMIO(0x9888), 0x042cc000 },
266 { _MMIO(0x9888), 0x19900157 },
267 { _MMIO(0x9888), 0x1b900158 },
268 { _MMIO(0x9888), 0x1d900105 },
269 { _MMIO(0x9888), 0x1f900103 },
270 { _MMIO(0x9888), 0x35900000 },
271 { _MMIO(0x9888), 0x11900fff },
272 { _MMIO(0x9888), 0x51900000 },
273 { _MMIO(0x9888), 0x41900800 },
274 { _MMIO(0x9888), 0x55900000 },
275 { _MMIO(0x9888), 0x45900821 },
276 { _MMIO(0x9888), 0x47900802 },
277 { _MMIO(0x9888), 0x57900000 },
278 { _MMIO(0x9888), 0x49900802 },
279 { _MMIO(0x9888), 0x33900000 },
280 { _MMIO(0x9888), 0x4b900002 },
281 { _MMIO(0x9888), 0x59900000 },
282 { _MMIO(0x9888), 0x43900422 },
283 { _MMIO(0x9888), 0x53905555 },
284};
285
286static int
287get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
288 const struct i915_oa_reg **regs,
289 int *lens)
290{
291 int n = 0;
292
293 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
294 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
295
296 regs[n] = mux_config_compute_basic;
297 lens[n] = ARRAY_SIZE(mux_config_compute_basic);
298 n++;
299
300 return n;
301}
302
303static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
304 { _MMIO(0x2724), 0xf0800000 },
305 { _MMIO(0x2720), 0x00000000 },
306 { _MMIO(0x2714), 0xf0800000 },
307 { _MMIO(0x2710), 0x00000000 },
308 { _MMIO(0x2740), 0x00000000 },
309 { _MMIO(0x2770), 0x0007ffea },
310 { _MMIO(0x2774), 0x00007ffc },
311 { _MMIO(0x2778), 0x0007affa },
312 { _MMIO(0x277c), 0x0000f5fd },
313 { _MMIO(0x2780), 0x00079ffa },
314 { _MMIO(0x2784), 0x0000f3fb },
315 { _MMIO(0x2788), 0x0007bf7a },
316 { _MMIO(0x278c), 0x0000f7e7 },
317 { _MMIO(0x2790), 0x0007fefa },
318 { _MMIO(0x2794), 0x0000f7cf },
319 { _MMIO(0x2798), 0x00077ffa },
320 { _MMIO(0x279c), 0x0000efdf },
321 { _MMIO(0x27a0), 0x0006fffa },
322 { _MMIO(0x27a4), 0x0000cfbf },
323 { _MMIO(0x27a8), 0x0003fffa },
324 { _MMIO(0x27ac), 0x00005f7f },
325};
326
327static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
328 { _MMIO(0xe458), 0x00005004 },
329 { _MMIO(0xe558), 0x00015014 },
330 { _MMIO(0xe658), 0x00025024 },
331 { _MMIO(0xe758), 0x00035034 },
332 { _MMIO(0xe45c), 0x00045044 },
333 { _MMIO(0xe55c), 0x00055054 },
334 { _MMIO(0xe65c), 0x00065064 },
335};
336
337static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
338 { _MMIO(0x9888), 0x0c0e001f },
339 { _MMIO(0x9888), 0x0a0f0000 },
340 { _MMIO(0x9888), 0x10116800 },
341 { _MMIO(0x9888), 0x178a03e0 },
342 { _MMIO(0x9888), 0x11824c00 },
343 { _MMIO(0x9888), 0x11830020 },
344 { _MMIO(0x9888), 0x13840020 },
345 { _MMIO(0x9888), 0x11850019 },
346 { _MMIO(0x9888), 0x11860007 },
347 { _MMIO(0x9888), 0x01870c40 },
348 { _MMIO(0x9888), 0x17880000 },
349 { _MMIO(0x9888), 0x022f4000 },
350 { _MMIO(0x9888), 0x0a4c0040 },
351 { _MMIO(0x9888), 0x0c0d8000 },
352 { _MMIO(0x9888), 0x040d4000 },
353 { _MMIO(0x9888), 0x060d2000 },
354 { _MMIO(0x9888), 0x020e5400 },
355 { _MMIO(0x9888), 0x000e0000 },
356 { _MMIO(0x9888), 0x080f0040 },
357 { _MMIO(0x9888), 0x000f0000 },
358 { _MMIO(0x9888), 0x100f0000 },
359 { _MMIO(0x9888), 0x0e0f0040 },
360 { _MMIO(0x9888), 0x0c2c8000 },
361 { _MMIO(0x9888), 0x06104000 },
362 { _MMIO(0x9888), 0x06110012 },
363 { _MMIO(0x9888), 0x06131000 },
364 { _MMIO(0x9888), 0x01898000 },
365 { _MMIO(0x9888), 0x0d890100 },
366 { _MMIO(0x9888), 0x03898000 },
367 { _MMIO(0x9888), 0x09808000 },
368 { _MMIO(0x9888), 0x0b808000 },
369 { _MMIO(0x9888), 0x0380c000 },
370 { _MMIO(0x9888), 0x0f8a0075 },
371 { _MMIO(0x9888), 0x1d8a0000 },
372 { _MMIO(0x9888), 0x118a8000 },
373 { _MMIO(0x9888), 0x1b8a4000 },
374 { _MMIO(0x9888), 0x138a8000 },
375 { _MMIO(0x9888), 0x1d81a000 },
376 { _MMIO(0x9888), 0x15818000 },
377 { _MMIO(0x9888), 0x17818000 },
378 { _MMIO(0x9888), 0x0b820030 },
379 { _MMIO(0x9888), 0x07828000 },
380 { _MMIO(0x9888), 0x0d824000 },
381 { _MMIO(0x9888), 0x0f828000 },
382 { _MMIO(0x9888), 0x05824000 },
383 { _MMIO(0x9888), 0x0d830003 },
384 { _MMIO(0x9888), 0x0583000c },
385 { _MMIO(0x9888), 0x09830000 },
386 { _MMIO(0x9888), 0x03838000 },
387 { _MMIO(0x9888), 0x07838000 },
388 { _MMIO(0x9888), 0x0b840980 },
389 { _MMIO(0x9888), 0x03844d80 },
390 { _MMIO(0x9888), 0x11840000 },
391 { _MMIO(0x9888), 0x09848000 },
392 { _MMIO(0x9888), 0x09850080 },
393 { _MMIO(0x9888), 0x03850003 },
394 { _MMIO(0x9888), 0x01850000 },
395 { _MMIO(0x9888), 0x07860000 },
396 { _MMIO(0x9888), 0x0f860400 },
397 { _MMIO(0x9888), 0x09870032 },
398 { _MMIO(0x9888), 0x01888052 },
399 { _MMIO(0x9888), 0x11880000 },
400 { _MMIO(0x9888), 0x09884000 },
401 { _MMIO(0x9888), 0x1b931001 },
402 { _MMIO(0x9888), 0x1d930001 },
403 { _MMIO(0x9888), 0x19934000 },
404 { _MMIO(0x9888), 0x1b958000 },
405 { _MMIO(0x9888), 0x1d950094 },
406 { _MMIO(0x9888), 0x19958000 },
407 { _MMIO(0x9888), 0x09e58000 },
408 { _MMIO(0x9888), 0x0be58000 },
409 { _MMIO(0x9888), 0x03e5c000 },
410 { _MMIO(0x9888), 0x0592c000 },
411 { _MMIO(0x9888), 0x0b928000 },
412 { _MMIO(0x9888), 0x0d924000 },
413 { _MMIO(0x9888), 0x0f924000 },
414 { _MMIO(0x9888), 0x11928000 },
415 { _MMIO(0x9888), 0x1392c000 },
416 { _MMIO(0x9888), 0x09924000 },
417 { _MMIO(0x9888), 0x01985000 },
418 { _MMIO(0x9888), 0x07988000 },
419 { _MMIO(0x9888), 0x09981000 },
420 { _MMIO(0x9888), 0x0b982000 },
421 { _MMIO(0x9888), 0x0d982000 },
422 { _MMIO(0x9888), 0x0f989000 },
423 { _MMIO(0x9888), 0x05982000 },
424 { _MMIO(0x9888), 0x13904000 },
425 { _MMIO(0x9888), 0x21904000 },
426 { _MMIO(0x9888), 0x23904000 },
427 { _MMIO(0x9888), 0x25908000 },
428 { _MMIO(0x9888), 0x27904000 },
429 { _MMIO(0x9888), 0x29908000 },
430 { _MMIO(0x9888), 0x2b904000 },
431 { _MMIO(0x9888), 0x2f904000 },
432 { _MMIO(0x9888), 0x31904000 },
433 { _MMIO(0x9888), 0x15904000 },
434 { _MMIO(0x9888), 0x17908000 },
435 { _MMIO(0x9888), 0x19908000 },
436 { _MMIO(0x9888), 0x1b904000 },
437 { _MMIO(0x9888), 0x1190c080 },
438 { _MMIO(0x9888), 0x51901110 },
439 { _MMIO(0x9888), 0x41900440 },
440 { _MMIO(0x9888), 0x55901111 },
441 { _MMIO(0x9888), 0x45900400 },
442 { _MMIO(0x9888), 0x47900c21 },
443 { _MMIO(0x9888), 0x57901411 },
444 { _MMIO(0x9888), 0x49900042 },
445 { _MMIO(0x9888), 0x37900000 },
446 { _MMIO(0x9888), 0x33900000 },
447 { _MMIO(0x9888), 0x4b900024 },
448 { _MMIO(0x9888), 0x59900001 },
449 { _MMIO(0x9888), 0x43900841 },
450 { _MMIO(0x9888), 0x53900411 },
451};
452
453static int
454get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
455 const struct i915_oa_reg **regs,
456 int *lens)
457{
458 int n = 0;
459
460 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
461 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
462
463 regs[n] = mux_config_render_pipe_profile;
464 lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
465 n++;
466
467 return n;
468}
469
470static const struct i915_oa_reg b_counter_config_memory_reads[] = {
471 { _MMIO(0x272c), 0xffffffff },
472 { _MMIO(0x2728), 0xffffffff },
473 { _MMIO(0x2724), 0xf0800000 },
474 { _MMIO(0x2720), 0x00000000 },
475 { _MMIO(0x271c), 0xffffffff },
476 { _MMIO(0x2718), 0xffffffff },
477 { _MMIO(0x2714), 0xf0800000 },
478 { _MMIO(0x2710), 0x00000000 },
479 { _MMIO(0x274c), 0x86543210 },
480 { _MMIO(0x2748), 0x86543210 },
481 { _MMIO(0x2744), 0x00006667 },
482 { _MMIO(0x2740), 0x00000000 },
483 { _MMIO(0x275c), 0x86543210 },
484 { _MMIO(0x2758), 0x86543210 },
485 { _MMIO(0x2754), 0x00006465 },
486 { _MMIO(0x2750), 0x00000000 },
487 { _MMIO(0x2770), 0x0007f81a },
488 { _MMIO(0x2774), 0x0000fe00 },
489 { _MMIO(0x2778), 0x0007f82a },
490 { _MMIO(0x277c), 0x0000fe00 },
491 { _MMIO(0x2780), 0x0007f872 },
492 { _MMIO(0x2784), 0x0000fe00 },
493 { _MMIO(0x2788), 0x0007f8ba },
494 { _MMIO(0x278c), 0x0000fe00 },
495 { _MMIO(0x2790), 0x0007f87a },
496 { _MMIO(0x2794), 0x0000fe00 },
497 { _MMIO(0x2798), 0x0007f8ea },
498 { _MMIO(0x279c), 0x0000fe00 },
499 { _MMIO(0x27a0), 0x0007f8e2 },
500 { _MMIO(0x27a4), 0x0000fe00 },
501 { _MMIO(0x27a8), 0x0007f8f2 },
502 { _MMIO(0x27ac), 0x0000fe00 },
503};
504
505static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
506 { _MMIO(0xe458), 0x00005004 },
507 { _MMIO(0xe558), 0x00015014 },
508 { _MMIO(0xe658), 0x00025024 },
509 { _MMIO(0xe758), 0x00035034 },
510 { _MMIO(0xe45c), 0x00045044 },
511 { _MMIO(0xe55c), 0x00055054 },
512 { _MMIO(0xe65c), 0x00065064 },
513};
514
515static const struct i915_oa_reg mux_config_memory_reads[] = {
516 { _MMIO(0x9888), 0x11810c00 },
517 { _MMIO(0x9888), 0x1381001a },
518 { _MMIO(0x9888), 0x37906800 },
519 { _MMIO(0x9888), 0x3f900064 },
520 { _MMIO(0x9888), 0x03811300 },
521 { _MMIO(0x9888), 0x05811b12 },
522 { _MMIO(0x9888), 0x0781001a },
523 { _MMIO(0x9888), 0x1f810000 },
524 { _MMIO(0x9888), 0x17810000 },
525 { _MMIO(0x9888), 0x19810000 },
526 { _MMIO(0x9888), 0x1b810000 },
527 { _MMIO(0x9888), 0x1d810000 },
528 { _MMIO(0x9888), 0x1b930055 },
529 { _MMIO(0x9888), 0x03e58000 },
530 { _MMIO(0x9888), 0x05e5c000 },
531 { _MMIO(0x9888), 0x07e54000 },
532 { _MMIO(0x9888), 0x13900150 },
533 { _MMIO(0x9888), 0x21900151 },
534 { _MMIO(0x9888), 0x23900152 },
535 { _MMIO(0x9888), 0x25900153 },
536 { _MMIO(0x9888), 0x27900154 },
537 { _MMIO(0x9888), 0x29900155 },
538 { _MMIO(0x9888), 0x2b900156 },
539 { _MMIO(0x9888), 0x2d900157 },
540 { _MMIO(0x9888), 0x2f90015f },
541 { _MMIO(0x9888), 0x31900105 },
542 { _MMIO(0x9888), 0x15900103 },
543 { _MMIO(0x9888), 0x17900101 },
544 { _MMIO(0x9888), 0x35900000 },
545 { _MMIO(0x9888), 0x19908000 },
546 { _MMIO(0x9888), 0x1b908000 },
547 { _MMIO(0x9888), 0x1d908000 },
548 { _MMIO(0x9888), 0x1f908000 },
549 { _MMIO(0x9888), 0x11900000 },
550 { _MMIO(0x9888), 0x51900000 },
551 { _MMIO(0x9888), 0x41900c60 },
552 { _MMIO(0x9888), 0x55900000 },
553 { _MMIO(0x9888), 0x45900c00 },
554 { _MMIO(0x9888), 0x47900c63 },
555 { _MMIO(0x9888), 0x57900000 },
556 { _MMIO(0x9888), 0x49900c63 },
557 { _MMIO(0x9888), 0x33900000 },
558 { _MMIO(0x9888), 0x4b900063 },
559 { _MMIO(0x9888), 0x59900000 },
560 { _MMIO(0x9888), 0x43900003 },
561 { _MMIO(0x9888), 0x53900000 },
562};
563
564static int
565get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
566 const struct i915_oa_reg **regs,
567 int *lens)
568{
569 int n = 0;
570
571 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
572 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
573
574 regs[n] = mux_config_memory_reads;
575 lens[n] = ARRAY_SIZE(mux_config_memory_reads);
576 n++;
577
578 return n;
579}
580
581static const struct i915_oa_reg b_counter_config_memory_writes[] = {
582 { _MMIO(0x272c), 0xffffffff },
583 { _MMIO(0x2728), 0xffffffff },
584 { _MMIO(0x2724), 0xf0800000 },
585 { _MMIO(0x2720), 0x00000000 },
586 { _MMIO(0x271c), 0xffffffff },
587 { _MMIO(0x2718), 0xffffffff },
588 { _MMIO(0x2714), 0xf0800000 },
589 { _MMIO(0x2710), 0x00000000 },
590 { _MMIO(0x274c), 0x86543210 },
591 { _MMIO(0x2748), 0x86543210 },
592 { _MMIO(0x2744), 0x00006667 },
593 { _MMIO(0x2740), 0x00000000 },
594 { _MMIO(0x275c), 0x86543210 },
595 { _MMIO(0x2758), 0x86543210 },
596 { _MMIO(0x2754), 0x00006465 },
597 { _MMIO(0x2750), 0x00000000 },
598 { _MMIO(0x2770), 0x0007f81a },
599 { _MMIO(0x2774), 0x0000fe00 },
600 { _MMIO(0x2778), 0x0007f82a },
601 { _MMIO(0x277c), 0x0000fe00 },
602 { _MMIO(0x2780), 0x0007f822 },
603 { _MMIO(0x2784), 0x0000fe00 },
604 { _MMIO(0x2788), 0x0007f8ba },
605 { _MMIO(0x278c), 0x0000fe00 },
606 { _MMIO(0x2790), 0x0007f87a },
607 { _MMIO(0x2794), 0x0000fe00 },
608 { _MMIO(0x2798), 0x0007f8ea },
609 { _MMIO(0x279c), 0x0000fe00 },
610 { _MMIO(0x27a0), 0x0007f8e2 },
611 { _MMIO(0x27a4), 0x0000fe00 },
612 { _MMIO(0x27a8), 0x0007f8f2 },
613 { _MMIO(0x27ac), 0x0000fe00 },
614};
615
616static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
617 { _MMIO(0xe458), 0x00005004 },
618 { _MMIO(0xe558), 0x00015014 },
619 { _MMIO(0xe658), 0x00025024 },
620 { _MMIO(0xe758), 0x00035034 },
621 { _MMIO(0xe45c), 0x00045044 },
622 { _MMIO(0xe55c), 0x00055054 },
623 { _MMIO(0xe65c), 0x00065064 },
624};
625
626static const struct i915_oa_reg mux_config_memory_writes[] = {
627 { _MMIO(0x9888), 0x11810c00 },
628 { _MMIO(0x9888), 0x1381001a },
629 { _MMIO(0x9888), 0x37906800 },
630 { _MMIO(0x9888), 0x3f901000 },
631 { _MMIO(0x9888), 0x03811300 },
632 { _MMIO(0x9888), 0x05811b12 },
633 { _MMIO(0x9888), 0x0781001a },
634 { _MMIO(0x9888), 0x1f810000 },
635 { _MMIO(0x9888), 0x17810000 },
636 { _MMIO(0x9888), 0x19810000 },
637 { _MMIO(0x9888), 0x1b810000 },
638 { _MMIO(0x9888), 0x1d810000 },
639 { _MMIO(0x9888), 0x1b930055 },
640 { _MMIO(0x9888), 0x03e58000 },
641 { _MMIO(0x9888), 0x05e5c000 },
642 { _MMIO(0x9888), 0x07e54000 },
643 { _MMIO(0x9888), 0x13900160 },
644 { _MMIO(0x9888), 0x21900161 },
645 { _MMIO(0x9888), 0x23900162 },
646 { _MMIO(0x9888), 0x25900163 },
647 { _MMIO(0x9888), 0x27900164 },
648 { _MMIO(0x9888), 0x29900165 },
649 { _MMIO(0x9888), 0x2b900166 },
650 { _MMIO(0x9888), 0x2d900167 },
651 { _MMIO(0x9888), 0x2f900150 },
652 { _MMIO(0x9888), 0x31900105 },
653 { _MMIO(0x9888), 0x15900103 },
654 { _MMIO(0x9888), 0x17900101 },
655 { _MMIO(0x9888), 0x35900000 },
656 { _MMIO(0x9888), 0x19908000 },
657 { _MMIO(0x9888), 0x1b908000 },
658 { _MMIO(0x9888), 0x1d908000 },
659 { _MMIO(0x9888), 0x1f908000 },
660 { _MMIO(0x9888), 0x11900000 },
661 { _MMIO(0x9888), 0x51900000 },
662 { _MMIO(0x9888), 0x41900c60 },
663 { _MMIO(0x9888), 0x55900000 },
664 { _MMIO(0x9888), 0x45900c00 },
665 { _MMIO(0x9888), 0x47900c63 },
666 { _MMIO(0x9888), 0x57900000 },
667 { _MMIO(0x9888), 0x49900c63 },
668 { _MMIO(0x9888), 0x33900000 },
669 { _MMIO(0x9888), 0x4b900063 },
670 { _MMIO(0x9888), 0x59900000 },
671 { _MMIO(0x9888), 0x43900003 },
672 { _MMIO(0x9888), 0x53900000 },
673};
674
675static int
676get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
677 const struct i915_oa_reg **regs,
678 int *lens)
679{
680 int n = 0;
681
682 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
683 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
684
685 regs[n] = mux_config_memory_writes;
686 lens[n] = ARRAY_SIZE(mux_config_memory_writes);
687 n++;
688
689 return n;
690}
691
692static const struct i915_oa_reg b_counter_config_compute_extended[] = {
693 { _MMIO(0x2724), 0xf0800000 },
694 { _MMIO(0x2720), 0x00000000 },
695 { _MMIO(0x2714), 0xf0800000 },
696 { _MMIO(0x2710), 0x00000000 },
697 { _MMIO(0x2740), 0x00000000 },
698 { _MMIO(0x2770), 0x0007fc2a },
699 { _MMIO(0x2774), 0x0000bf00 },
700 { _MMIO(0x2778), 0x0007fc6a },
701 { _MMIO(0x277c), 0x0000bf00 },
702 { _MMIO(0x2780), 0x0007fc92 },
703 { _MMIO(0x2784), 0x0000bf00 },
704 { _MMIO(0x2788), 0x0007fca2 },
705 { _MMIO(0x278c), 0x0000bf00 },
706 { _MMIO(0x2790), 0x0007fc32 },
707 { _MMIO(0x2794), 0x0000bf00 },
708 { _MMIO(0x2798), 0x0007fc9a },
709 { _MMIO(0x279c), 0x0000bf00 },
710 { _MMIO(0x27a0), 0x0007fe6a },
711 { _MMIO(0x27a4), 0x0000bf00 },
712 { _MMIO(0x27a8), 0x0007fe7a },
713 { _MMIO(0x27ac), 0x0000bf00 },
714};
715
716static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
717 { _MMIO(0xe458), 0x00005004 },
718 { _MMIO(0xe558), 0x00000003 },
719 { _MMIO(0xe658), 0x00002001 },
720 { _MMIO(0xe758), 0x00778008 },
721 { _MMIO(0xe45c), 0x00088078 },
722 { _MMIO(0xe55c), 0x00808708 },
723 { _MMIO(0xe65c), 0x00a08908 },
724};
725
726static const struct i915_oa_reg mux_config_compute_extended[] = {
727 { _MMIO(0x9888), 0x106c00e0 },
728 { _MMIO(0x9888), 0x141c8160 },
729 { _MMIO(0x9888), 0x161c8015 },
730 { _MMIO(0x9888), 0x181c0120 },
731 { _MMIO(0x9888), 0x004e8000 },
732 { _MMIO(0x9888), 0x0e4e8000 },
733 { _MMIO(0x9888), 0x184e8000 },
734 { _MMIO(0x9888), 0x1a4eaaa0 },
735 { _MMIO(0x9888), 0x1c4e0002 },
736 { _MMIO(0x9888), 0x024e8000 },
737 { _MMIO(0x9888), 0x044e8000 },
738 { _MMIO(0x9888), 0x064e8000 },
739 { _MMIO(0x9888), 0x084e8000 },
740 { _MMIO(0x9888), 0x0a4e8000 },
741 { _MMIO(0x9888), 0x0e6c0b01 },
742 { _MMIO(0x9888), 0x006c0200 },
743 { _MMIO(0x9888), 0x026c000c },
744 { _MMIO(0x9888), 0x1c6c0000 },
745 { _MMIO(0x9888), 0x1e6c0000 },
746 { _MMIO(0x9888), 0x1a6c0000 },
747 { _MMIO(0x9888), 0x0e1bc000 },
748 { _MMIO(0x9888), 0x001b8000 },
749 { _MMIO(0x9888), 0x021bc000 },
750 { _MMIO(0x9888), 0x001c0041 },
751 { _MMIO(0x9888), 0x061c4200 },
752 { _MMIO(0x9888), 0x081c4443 },
753 { _MMIO(0x9888), 0x0a1c4645 },
754 { _MMIO(0x9888), 0x0c1c7647 },
755 { _MMIO(0x9888), 0x041c7357 },
756 { _MMIO(0x9888), 0x1c1c0030 },
757 { _MMIO(0x9888), 0x101c0000 },
758 { _MMIO(0x9888), 0x1a1c0000 },
759 { _MMIO(0x9888), 0x121c8000 },
760 { _MMIO(0x9888), 0x004c8000 },
761 { _MMIO(0x9888), 0x0a4caa2a },
762 { _MMIO(0x9888), 0x0c4c02aa },
763 { _MMIO(0x9888), 0x084ca000 },
764 { _MMIO(0x9888), 0x000da000 },
765 { _MMIO(0x9888), 0x060d8000 },
766 { _MMIO(0x9888), 0x080da000 },
767 { _MMIO(0x9888), 0x0a0da000 },
768 { _MMIO(0x9888), 0x0c0da000 },
769 { _MMIO(0x9888), 0x0e0da000 },
770 { _MMIO(0x9888), 0x020da000 },
771 { _MMIO(0x9888), 0x040da000 },
772 { _MMIO(0x9888), 0x0c0f5400 },
773 { _MMIO(0x9888), 0x0e0f5515 },
774 { _MMIO(0x9888), 0x100f0155 },
775 { _MMIO(0x9888), 0x002c8000 },
776 { _MMIO(0x9888), 0x0e2c8000 },
777 { _MMIO(0x9888), 0x162caa00 },
778 { _MMIO(0x9888), 0x182c00aa },
779 { _MMIO(0x9888), 0x022c8000 },
780 { _MMIO(0x9888), 0x042c8000 },
781 { _MMIO(0x9888), 0x062c8000 },
782 { _MMIO(0x9888), 0x082c8000 },
783 { _MMIO(0x9888), 0x0a2c8000 },
784 { _MMIO(0x9888), 0x11907fff },
785 { _MMIO(0x9888), 0x51900000 },
786 { _MMIO(0x9888), 0x41900040 },
787 { _MMIO(0x9888), 0x55900000 },
788 { _MMIO(0x9888), 0x45900802 },
789 { _MMIO(0x9888), 0x47900842 },
790 { _MMIO(0x9888), 0x57900000 },
791 { _MMIO(0x9888), 0x49900842 },
792 { _MMIO(0x9888), 0x37900000 },
793 { _MMIO(0x9888), 0x33900000 },
794 { _MMIO(0x9888), 0x4b900000 },
795 { _MMIO(0x9888), 0x59900000 },
796 { _MMIO(0x9888), 0x43900800 },
797 { _MMIO(0x9888), 0x53900000 },
798};
799
800static int
801get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
802 const struct i915_oa_reg **regs,
803 int *lens)
804{
805 int n = 0;
806
807 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
808 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
809
810 regs[n] = mux_config_compute_extended;
811 lens[n] = ARRAY_SIZE(mux_config_compute_extended);
812 n++;
813
814 return n;
815}
816
817static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
818 { _MMIO(0x2710), 0x00000000 },
819 { _MMIO(0x2714), 0x30800000 },
820 { _MMIO(0x2720), 0x00000000 },
821 { _MMIO(0x2724), 0x30800000 },
822 { _MMIO(0x2740), 0x00000000 },
823 { _MMIO(0x2770), 0x0007fffa },
824 { _MMIO(0x2774), 0x0000fefe },
825 { _MMIO(0x2778), 0x0007fffa },
826 { _MMIO(0x277c), 0x0000fefd },
827 { _MMIO(0x2790), 0x0007fffa },
828 { _MMIO(0x2794), 0x0000fbef },
829 { _MMIO(0x2798), 0x0007fffa },
830 { _MMIO(0x279c), 0x0000fbdf },
831};
832
833static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
834 { _MMIO(0xe458), 0x00005004 },
835 { _MMIO(0xe558), 0x00000003 },
836 { _MMIO(0xe658), 0x00002001 },
837 { _MMIO(0xe758), 0x00101100 },
838 { _MMIO(0xe45c), 0x00201200 },
839 { _MMIO(0xe55c), 0x00301300 },
840 { _MMIO(0xe65c), 0x00401400 },
841};
842
843static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
844 { _MMIO(0x9888), 0x166c0760 },
845 { _MMIO(0x9888), 0x1593001e },
846 { _MMIO(0x9888), 0x3f900003 },
847 { _MMIO(0x9888), 0x004e8000 },
848 { _MMIO(0x9888), 0x0e4e8000 },
849 { _MMIO(0x9888), 0x184e8000 },
850 { _MMIO(0x9888), 0x1a4e8020 },
851 { _MMIO(0x9888), 0x1c4e0002 },
852 { _MMIO(0x9888), 0x006c0051 },
853 { _MMIO(0x9888), 0x066c5000 },
854 { _MMIO(0x9888), 0x086c5c5d },
855 { _MMIO(0x9888), 0x0e6c5e5f },
856 { _MMIO(0x9888), 0x106c0000 },
857 { _MMIO(0x9888), 0x186c0000 },
858 { _MMIO(0x9888), 0x1c6c0000 },
859 { _MMIO(0x9888), 0x1e6c0000 },
860 { _MMIO(0x9888), 0x001b4000 },
861 { _MMIO(0x9888), 0x061b8000 },
862 { _MMIO(0x9888), 0x081bc000 },
863 { _MMIO(0x9888), 0x0e1bc000 },
864 { _MMIO(0x9888), 0x101c8000 },
865 { _MMIO(0x9888), 0x1a1ce000 },
866 { _MMIO(0x9888), 0x1c1c0030 },
867 { _MMIO(0x9888), 0x004c8000 },
868 { _MMIO(0x9888), 0x0a4c2a00 },
869 { _MMIO(0x9888), 0x0c4c0280 },
870 { _MMIO(0x9888), 0x000d2000 },
871 { _MMIO(0x9888), 0x060d8000 },
872 { _MMIO(0x9888), 0x080da000 },
873 { _MMIO(0x9888), 0x0e0da000 },
874 { _MMIO(0x9888), 0x0c0f0400 },
875 { _MMIO(0x9888), 0x0e0f1500 },
876 { _MMIO(0x9888), 0x100f0140 },
877 { _MMIO(0x9888), 0x002c8000 },
878 { _MMIO(0x9888), 0x0e2c8000 },
879 { _MMIO(0x9888), 0x162c0a00 },
880 { _MMIO(0x9888), 0x182c00a0 },
881 { _MMIO(0x9888), 0x03933300 },
882 { _MMIO(0x9888), 0x05930032 },
883 { _MMIO(0x9888), 0x11930000 },
884 { _MMIO(0x9888), 0x1b930000 },
885 { _MMIO(0x9888), 0x1d900157 },
886 { _MMIO(0x9888), 0x1f900158 },
887 { _MMIO(0x9888), 0x35900000 },
888 { _MMIO(0x9888), 0x19908000 },
889 { _MMIO(0x9888), 0x1b908000 },
890 { _MMIO(0x9888), 0x1190030f },
891 { _MMIO(0x9888), 0x51900000 },
892 { _MMIO(0x9888), 0x41900000 },
893 { _MMIO(0x9888), 0x55900000 },
894 { _MMIO(0x9888), 0x45900021 },
895 { _MMIO(0x9888), 0x47900000 },
896 { _MMIO(0x9888), 0x37900000 },
897 { _MMIO(0x9888), 0x33900000 },
898 { _MMIO(0x9888), 0x57900000 },
899 { _MMIO(0x9888), 0x4b900000 },
900 { _MMIO(0x9888), 0x59900000 },
901 { _MMIO(0x9888), 0x53905555 },
902 { _MMIO(0x9888), 0x43900000 },
903};
904
905static int
906get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
907 const struct i915_oa_reg **regs,
908 int *lens)
909{
910 int n = 0;
911
912 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
913 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
914
915 regs[n] = mux_config_compute_l3_cache;
916 lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
917 n++;
918
919 return n;
920}
921
922static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
923 { _MMIO(0x2740), 0x00000000 },
924 { _MMIO(0x2744), 0x00800000 },
925 { _MMIO(0x2710), 0x00000000 },
926 { _MMIO(0x2714), 0x10800000 },
927 { _MMIO(0x2720), 0x00000000 },
928 { _MMIO(0x2724), 0x00800000 },
929 { _MMIO(0x2770), 0x00000002 },
930 { _MMIO(0x2774), 0x0000fdff },
931};
932
933static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
934 { _MMIO(0xe458), 0x00005004 },
935 { _MMIO(0xe558), 0x00010003 },
936 { _MMIO(0xe658), 0x00012011 },
937 { _MMIO(0xe758), 0x00015014 },
938 { _MMIO(0xe45c), 0x00051050 },
939 { _MMIO(0xe55c), 0x00053052 },
940 { _MMIO(0xe65c), 0x00055054 },
941};
942
943static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
944 { _MMIO(0x9888), 0x104f0232 },
945 { _MMIO(0x9888), 0x124f4640 },
946 { _MMIO(0x9888), 0x106c0232 },
947 { _MMIO(0x9888), 0x11834400 },
948 { _MMIO(0x9888), 0x0a4e8000 },
949 { _MMIO(0x9888), 0x0c4e8000 },
950 { _MMIO(0x9888), 0x004f1880 },
951 { _MMIO(0x9888), 0x024f08bb },
952 { _MMIO(0x9888), 0x044f001b },
953 { _MMIO(0x9888), 0x046c0100 },
954 { _MMIO(0x9888), 0x066c000b },
955 { _MMIO(0x9888), 0x1a6c0000 },
956 { _MMIO(0x9888), 0x041b8000 },
957 { _MMIO(0x9888), 0x061b4000 },
958 { _MMIO(0x9888), 0x1a1c1800 },
959 { _MMIO(0x9888), 0x005b8000 },
960 { _MMIO(0x9888), 0x025bc000 },
961 { _MMIO(0x9888), 0x045b4000 },
962 { _MMIO(0x9888), 0x125c8000 },
963 { _MMIO(0x9888), 0x145c8000 },
964 { _MMIO(0x9888), 0x165c8000 },
965 { _MMIO(0x9888), 0x185c8000 },
966 { _MMIO(0x9888), 0x0a4c00a0 },
967 { _MMIO(0x9888), 0x000d8000 },
968 { _MMIO(0x9888), 0x020da000 },
969 { _MMIO(0x9888), 0x040da000 },
970 { _MMIO(0x9888), 0x060d2000 },
971 { _MMIO(0x9888), 0x0c0f5000 },
972 { _MMIO(0x9888), 0x0e0f0055 },
973 { _MMIO(0x9888), 0x022cc000 },
974 { _MMIO(0x9888), 0x042cc000 },
975 { _MMIO(0x9888), 0x062cc000 },
976 { _MMIO(0x9888), 0x082cc000 },
977 { _MMIO(0x9888), 0x0a2c8000 },
978 { _MMIO(0x9888), 0x0c2c8000 },
979 { _MMIO(0x9888), 0x0f828000 },
980 { _MMIO(0x9888), 0x0f8305c0 },
981 { _MMIO(0x9888), 0x09830000 },
982 { _MMIO(0x9888), 0x07830000 },
983 { _MMIO(0x9888), 0x1d950080 },
984 { _MMIO(0x9888), 0x13928000 },
985 { _MMIO(0x9888), 0x0f988000 },
986 { _MMIO(0x9888), 0x31904000 },
987 { _MMIO(0x9888), 0x1190fc00 },
988 { _MMIO(0x9888), 0x37900000 },
989 { _MMIO(0x9888), 0x59900001 },
990 { _MMIO(0x9888), 0x4b900040 },
991 { _MMIO(0x9888), 0x51900000 },
992 { _MMIO(0x9888), 0x41900800 },
993 { _MMIO(0x9888), 0x43900842 },
994 { _MMIO(0x9888), 0x53900000 },
995 { _MMIO(0x9888), 0x45900000 },
996 { _MMIO(0x9888), 0x33900000 },
997};
998
999static int
1000get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
1001 const struct i915_oa_reg **regs,
1002 int *lens)
1003{
1004 int n = 0;
1005
1006 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1007 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1008
1009 regs[n] = mux_config_hdc_and_sf;
1010 lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
1011 n++;
1012
1013 return n;
1014}
1015
1016static const struct i915_oa_reg b_counter_config_l3_1[] = {
1017 { _MMIO(0x2740), 0x00000000 },
1018 { _MMIO(0x2744), 0x00800000 },
1019 { _MMIO(0x2710), 0x00000000 },
1020 { _MMIO(0x2714), 0xf0800000 },
1021 { _MMIO(0x2720), 0x00000000 },
1022 { _MMIO(0x2724), 0xf0800000 },
1023 { _MMIO(0x2770), 0x00100070 },
1024 { _MMIO(0x2774), 0x0000fff1 },
1025 { _MMIO(0x2778), 0x00014002 },
1026 { _MMIO(0x277c), 0x0000c3ff },
1027 { _MMIO(0x2780), 0x00010002 },
1028 { _MMIO(0x2784), 0x0000c7ff },
1029 { _MMIO(0x2788), 0x00004002 },
1030 { _MMIO(0x278c), 0x0000d3ff },
1031 { _MMIO(0x2790), 0x00100700 },
1032 { _MMIO(0x2794), 0x0000ff1f },
1033 { _MMIO(0x2798), 0x00001402 },
1034 { _MMIO(0x279c), 0x0000fc3f },
1035 { _MMIO(0x27a0), 0x00001002 },
1036 { _MMIO(0x27a4), 0x0000fc7f },
1037 { _MMIO(0x27a8), 0x00000402 },
1038 { _MMIO(0x27ac), 0x0000fd3f },
1039};
1040
1041static const struct i915_oa_reg flex_eu_config_l3_1[] = {
1042 { _MMIO(0xe458), 0x00005004 },
1043 { _MMIO(0xe558), 0x00010003 },
1044 { _MMIO(0xe658), 0x00012011 },
1045 { _MMIO(0xe758), 0x00015014 },
1046 { _MMIO(0xe45c), 0x00051050 },
1047 { _MMIO(0xe55c), 0x00053052 },
1048 { _MMIO(0xe65c), 0x00055054 },
1049};
1050
1051static const struct i915_oa_reg mux_config_l3_1[] = {
1052 { _MMIO(0x9888), 0x126c7b40 },
1053 { _MMIO(0x9888), 0x166c0020 },
1054 { _MMIO(0x9888), 0x0a603444 },
1055 { _MMIO(0x9888), 0x0a613400 },
1056 { _MMIO(0x9888), 0x1a4ea800 },
1057 { _MMIO(0x9888), 0x1c4e0002 },
1058 { _MMIO(0x9888), 0x024e8000 },
1059 { _MMIO(0x9888), 0x044e8000 },
1060 { _MMIO(0x9888), 0x064e8000 },
1061 { _MMIO(0x9888), 0x084e8000 },
1062 { _MMIO(0x9888), 0x0a4e8000 },
1063 { _MMIO(0x9888), 0x064f4000 },
1064 { _MMIO(0x9888), 0x0c6c5327 },
1065 { _MMIO(0x9888), 0x0e6c5425 },
1066 { _MMIO(0x9888), 0x006c2a00 },
1067 { _MMIO(0x9888), 0x026c285b },
1068 { _MMIO(0x9888), 0x046c005c },
1069 { _MMIO(0x9888), 0x106c0000 },
1070 { _MMIO(0x9888), 0x1c6c0000 },
1071 { _MMIO(0x9888), 0x1e6c0000 },
1072 { _MMIO(0x9888), 0x1a6c0800 },
1073 { _MMIO(0x9888), 0x0c1bc000 },
1074 { _MMIO(0x9888), 0x0e1bc000 },
1075 { _MMIO(0x9888), 0x001b8000 },
1076 { _MMIO(0x9888), 0x021bc000 },
1077 { _MMIO(0x9888), 0x041bc000 },
1078 { _MMIO(0x9888), 0x1c1c003c },
1079 { _MMIO(0x9888), 0x121c8000 },
1080 { _MMIO(0x9888), 0x141c8000 },
1081 { _MMIO(0x9888), 0x161c8000 },
1082 { _MMIO(0x9888), 0x181c8000 },
1083 { _MMIO(0x9888), 0x1a1c0800 },
1084 { _MMIO(0x9888), 0x065b4000 },
1085 { _MMIO(0x9888), 0x1a5c1000 },
1086 { _MMIO(0x9888), 0x10600000 },
1087 { _MMIO(0x9888), 0x04600000 },
1088 { _MMIO(0x9888), 0x0c610044 },
1089 { _MMIO(0x9888), 0x10610000 },
1090 { _MMIO(0x9888), 0x06610000 },
1091 { _MMIO(0x9888), 0x0c4c02a8 },
1092 { _MMIO(0x9888), 0x084ca000 },
1093 { _MMIO(0x9888), 0x0a4c002a },
1094 { _MMIO(0x9888), 0x0c0da000 },
1095 { _MMIO(0x9888), 0x0e0da000 },
1096 { _MMIO(0x9888), 0x000d8000 },
1097 { _MMIO(0x9888), 0x020da000 },
1098 { _MMIO(0x9888), 0x040da000 },
1099 { _MMIO(0x9888), 0x060d2000 },
1100 { _MMIO(0x9888), 0x100f0154 },
1101 { _MMIO(0x9888), 0x0c0f5000 },
1102 { _MMIO(0x9888), 0x0e0f0055 },
1103 { _MMIO(0x9888), 0x182c00aa },
1104 { _MMIO(0x9888), 0x022c8000 },
1105 { _MMIO(0x9888), 0x042c8000 },
1106 { _MMIO(0x9888), 0x062c8000 },
1107 { _MMIO(0x9888), 0x082c8000 },
1108 { _MMIO(0x9888), 0x0a2c8000 },
1109 { _MMIO(0x9888), 0x0c2cc000 },
1110 { _MMIO(0x9888), 0x1190ffc0 },
1111 { _MMIO(0x9888), 0x57900000 },
1112 { _MMIO(0x9888), 0x49900420 },
1113 { _MMIO(0x9888), 0x37900000 },
1114 { _MMIO(0x9888), 0x33900000 },
1115 { _MMIO(0x9888), 0x4b900021 },
1116 { _MMIO(0x9888), 0x59900000 },
1117 { _MMIO(0x9888), 0x51900000 },
1118 { _MMIO(0x9888), 0x41900400 },
1119 { _MMIO(0x9888), 0x43900421 },
1120 { _MMIO(0x9888), 0x53900000 },
1121 { _MMIO(0x9888), 0x45900040 },
1122};
1123
1124static int
1125get_l3_1_mux_config(struct drm_i915_private *dev_priv,
1126 const struct i915_oa_reg **regs,
1127 int *lens)
1128{
1129 int n = 0;
1130
1131 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1132 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1133
1134 regs[n] = mux_config_l3_1;
1135 lens[n] = ARRAY_SIZE(mux_config_l3_1);
1136 n++;
1137
1138 return n;
1139}
1140
1141static const struct i915_oa_reg b_counter_config_l3_2[] = {
1142 { _MMIO(0x2740), 0x00000000 },
1143 { _MMIO(0x2744), 0x00800000 },
1144 { _MMIO(0x2710), 0x00000000 },
1145 { _MMIO(0x2714), 0xf0800000 },
1146 { _MMIO(0x2720), 0x00000000 },
1147 { _MMIO(0x2724), 0x00800000 },
1148 { _MMIO(0x2770), 0x00100070 },
1149 { _MMIO(0x2774), 0x0000fff1 },
1150 { _MMIO(0x2778), 0x00028002 },
1151 { _MMIO(0x277c), 0x000087ff },
1152 { _MMIO(0x2780), 0x00020002 },
1153 { _MMIO(0x2784), 0x00008fff },
1154 { _MMIO(0x2788), 0x00008002 },
1155 { _MMIO(0x278c), 0x0000a7ff },
1156};
1157
1158static const struct i915_oa_reg flex_eu_config_l3_2[] = {
1159 { _MMIO(0xe458), 0x00005004 },
1160 { _MMIO(0xe558), 0x00010003 },
1161 { _MMIO(0xe658), 0x00012011 },
1162 { _MMIO(0xe758), 0x00015014 },
1163 { _MMIO(0xe45c), 0x00051050 },
1164 { _MMIO(0xe55c), 0x00053052 },
1165 { _MMIO(0xe65c), 0x00055054 },
1166};
1167
1168static const struct i915_oa_reg mux_config_l3_2[] = {
1169 { _MMIO(0x9888), 0x126c02e0 },
1170 { _MMIO(0x9888), 0x146c0001 },
1171 { _MMIO(0x9888), 0x0a623400 },
1172 { _MMIO(0x9888), 0x044e8000 },
1173 { _MMIO(0x9888), 0x064e8000 },
1174 { _MMIO(0x9888), 0x084e8000 },
1175 { _MMIO(0x9888), 0x0a4e8000 },
1176 { _MMIO(0x9888), 0x064f4000 },
1177 { _MMIO(0x9888), 0x026c3324 },
1178 { _MMIO(0x9888), 0x046c3422 },
1179 { _MMIO(0x9888), 0x106c0000 },
1180 { _MMIO(0x9888), 0x1a6c0000 },
1181 { _MMIO(0x9888), 0x021bc000 },
1182 { _MMIO(0x9888), 0x041bc000 },
1183 { _MMIO(0x9888), 0x141c8000 },
1184 { _MMIO(0x9888), 0x161c8000 },
1185 { _MMIO(0x9888), 0x181c8000 },
1186 { _MMIO(0x9888), 0x1a1c0800 },
1187 { _MMIO(0x9888), 0x065b4000 },
1188 { _MMIO(0x9888), 0x1a5c1000 },
1189 { _MMIO(0x9888), 0x06614000 },
1190 { _MMIO(0x9888), 0x0c620044 },
1191 { _MMIO(0x9888), 0x10620000 },
1192 { _MMIO(0x9888), 0x06620000 },
1193 { _MMIO(0x9888), 0x084c8000 },
1194 { _MMIO(0x9888), 0x0a4c002a },
1195 { _MMIO(0x9888), 0x020da000 },
1196 { _MMIO(0x9888), 0x040da000 },
1197 { _MMIO(0x9888), 0x060d2000 },
1198 { _MMIO(0x9888), 0x0c0f4000 },
1199 { _MMIO(0x9888), 0x0e0f0055 },
1200 { _MMIO(0x9888), 0x042c8000 },
1201 { _MMIO(0x9888), 0x062c8000 },
1202 { _MMIO(0x9888), 0x082c8000 },
1203 { _MMIO(0x9888), 0x0a2c8000 },
1204 { _MMIO(0x9888), 0x0c2cc000 },
1205 { _MMIO(0x9888), 0x1190f800 },
1206 { _MMIO(0x9888), 0x37900000 },
1207 { _MMIO(0x9888), 0x51900000 },
1208 { _MMIO(0x9888), 0x43900000 },
1209 { _MMIO(0x9888), 0x53900000 },
1210 { _MMIO(0x9888), 0x45900000 },
1211 { _MMIO(0x9888), 0x33900000 },
1212};
1213
1214static int
1215get_l3_2_mux_config(struct drm_i915_private *dev_priv,
1216 const struct i915_oa_reg **regs,
1217 int *lens)
1218{
1219 int n = 0;
1220
1221 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1222 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1223
1224 regs[n] = mux_config_l3_2;
1225 lens[n] = ARRAY_SIZE(mux_config_l3_2);
1226 n++;
1227
1228 return n;
1229}
1230
1231static const struct i915_oa_reg b_counter_config_l3_3[] = {
1232 { _MMIO(0x2740), 0x00000000 },
1233 { _MMIO(0x2744), 0x00800000 },
1234 { _MMIO(0x2710), 0x00000000 },
1235 { _MMIO(0x2714), 0xf0800000 },
1236 { _MMIO(0x2720), 0x00000000 },
1237 { _MMIO(0x2724), 0x00800000 },
1238 { _MMIO(0x2770), 0x00100070 },
1239 { _MMIO(0x2774), 0x0000fff1 },
1240 { _MMIO(0x2778), 0x00028002 },
1241 { _MMIO(0x277c), 0x000087ff },
1242 { _MMIO(0x2780), 0x00020002 },
1243 { _MMIO(0x2784), 0x00008fff },
1244 { _MMIO(0x2788), 0x00008002 },
1245 { _MMIO(0x278c), 0x0000a7ff },
1246};
1247
1248static const struct i915_oa_reg flex_eu_config_l3_3[] = {
1249 { _MMIO(0xe458), 0x00005004 },
1250 { _MMIO(0xe558), 0x00010003 },
1251 { _MMIO(0xe658), 0x00012011 },
1252 { _MMIO(0xe758), 0x00015014 },
1253 { _MMIO(0xe45c), 0x00051050 },
1254 { _MMIO(0xe55c), 0x00053052 },
1255 { _MMIO(0xe65c), 0x00055054 },
1256};
1257
1258static const struct i915_oa_reg mux_config_l3_3[] = {
1259 { _MMIO(0x9888), 0x126c4e80 },
1260 { _MMIO(0x9888), 0x146c0000 },
1261 { _MMIO(0x9888), 0x0a633400 },
1262 { _MMIO(0x9888), 0x044e8000 },
1263 { _MMIO(0x9888), 0x064e8000 },
1264 { _MMIO(0x9888), 0x084e8000 },
1265 { _MMIO(0x9888), 0x0a4e8000 },
1266 { _MMIO(0x9888), 0x0c4e8000 },
1267 { _MMIO(0x9888), 0x026c3321 },
1268 { _MMIO(0x9888), 0x046c342f },
1269 { _MMIO(0x9888), 0x106c0000 },
1270 { _MMIO(0x9888), 0x1a6c2000 },
1271 { _MMIO(0x9888), 0x021bc000 },
1272 { _MMIO(0x9888), 0x041bc000 },
1273 { _MMIO(0x9888), 0x061b4000 },
1274 { _MMIO(0x9888), 0x141c8000 },
1275 { _MMIO(0x9888), 0x161c8000 },
1276 { _MMIO(0x9888), 0x181c8000 },
1277 { _MMIO(0x9888), 0x1a1c1800 },
1278 { _MMIO(0x9888), 0x06604000 },
1279 { _MMIO(0x9888), 0x0c630044 },
1280 { _MMIO(0x9888), 0x10630000 },
1281 { _MMIO(0x9888), 0x06630000 },
1282 { _MMIO(0x9888), 0x084c8000 },
1283 { _MMIO(0x9888), 0x0a4c00aa },
1284 { _MMIO(0x9888), 0x020da000 },
1285 { _MMIO(0x9888), 0x040da000 },
1286 { _MMIO(0x9888), 0x060d2000 },
1287 { _MMIO(0x9888), 0x0c0f4000 },
1288 { _MMIO(0x9888), 0x0e0f0055 },
1289 { _MMIO(0x9888), 0x042c8000 },
1290 { _MMIO(0x9888), 0x062c8000 },
1291 { _MMIO(0x9888), 0x082c8000 },
1292 { _MMIO(0x9888), 0x0a2c8000 },
1293 { _MMIO(0x9888), 0x0c2c8000 },
1294 { _MMIO(0x9888), 0x1190f800 },
1295 { _MMIO(0x9888), 0x37900000 },
1296 { _MMIO(0x9888), 0x51900000 },
1297 { _MMIO(0x9888), 0x43900842 },
1298 { _MMIO(0x9888), 0x53900000 },
1299 { _MMIO(0x9888), 0x45900002 },
1300 { _MMIO(0x9888), 0x33900000 },
1301};
1302
1303static int
1304get_l3_3_mux_config(struct drm_i915_private *dev_priv,
1305 const struct i915_oa_reg **regs,
1306 int *lens)
1307{
1308 int n = 0;
1309
1310 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1311 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1312
1313 regs[n] = mux_config_l3_3;
1314 lens[n] = ARRAY_SIZE(mux_config_l3_3);
1315 n++;
1316
1317 return n;
1318}
1319
1320static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
1321 { _MMIO(0x2740), 0x00000000 },
1322 { _MMIO(0x2744), 0x00800000 },
1323 { _MMIO(0x2710), 0x00000000 },
1324 { _MMIO(0x2714), 0x30800000 },
1325 { _MMIO(0x2720), 0x00000000 },
1326 { _MMIO(0x2724), 0x00800000 },
1327 { _MMIO(0x2770), 0x00000002 },
1328 { _MMIO(0x2774), 0x0000efff },
1329 { _MMIO(0x2778), 0x00006000 },
1330 { _MMIO(0x277c), 0x0000f3ff },
1331};
1332
1333static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
1334 { _MMIO(0xe458), 0x00005004 },
1335 { _MMIO(0xe558), 0x00010003 },
1336 { _MMIO(0xe658), 0x00012011 },
1337 { _MMIO(0xe758), 0x00015014 },
1338 { _MMIO(0xe45c), 0x00051050 },
1339 { _MMIO(0xe55c), 0x00053052 },
1340 { _MMIO(0xe65c), 0x00055054 },
1341};
1342
1343static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
1344 { _MMIO(0x9888), 0x102f3800 },
1345 { _MMIO(0x9888), 0x144d0500 },
1346 { _MMIO(0x9888), 0x120d03c0 },
1347 { _MMIO(0x9888), 0x140d03cf },
1348 { _MMIO(0x9888), 0x0c0f0004 },
1349 { _MMIO(0x9888), 0x0c4e4000 },
1350 { _MMIO(0x9888), 0x042f0480 },
1351 { _MMIO(0x9888), 0x082f0000 },
1352 { _MMIO(0x9888), 0x022f0000 },
1353 { _MMIO(0x9888), 0x0a4c0090 },
1354 { _MMIO(0x9888), 0x064d0027 },
1355 { _MMIO(0x9888), 0x004d0000 },
1356 { _MMIO(0x9888), 0x000d0d40 },
1357 { _MMIO(0x9888), 0x020d803f },
1358 { _MMIO(0x9888), 0x040d8023 },
1359 { _MMIO(0x9888), 0x100d0000 },
1360 { _MMIO(0x9888), 0x060d2000 },
1361 { _MMIO(0x9888), 0x020f0010 },
1362 { _MMIO(0x9888), 0x000f0000 },
1363 { _MMIO(0x9888), 0x0e0f0050 },
1364 { _MMIO(0x9888), 0x0a2c8000 },
1365 { _MMIO(0x9888), 0x0c2c8000 },
1366 { _MMIO(0x9888), 0x1190fc00 },
1367 { _MMIO(0x9888), 0x37900000 },
1368 { _MMIO(0x9888), 0x51900000 },
1369 { _MMIO(0x9888), 0x41901400 },
1370 { _MMIO(0x9888), 0x43901485 },
1371 { _MMIO(0x9888), 0x53900000 },
1372 { _MMIO(0x9888), 0x45900001 },
1373 { _MMIO(0x9888), 0x33900000 },
1374};
1375
1376static int
1377get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
1378 const struct i915_oa_reg **regs,
1379 int *lens)
1380{
1381 int n = 0;
1382
1383 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1384 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1385
1386 regs[n] = mux_config_rasterizer_and_pixel_backend;
1387 lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
1388 n++;
1389
1390 return n;
1391}
1392
1393static const struct i915_oa_reg b_counter_config_sampler[] = {
1394 { _MMIO(0x2740), 0x00000000 },
1395 { _MMIO(0x2744), 0x00800000 },
1396 { _MMIO(0x2710), 0x00000000 },
1397 { _MMIO(0x2714), 0x70800000 },
1398 { _MMIO(0x2720), 0x00000000 },
1399 { _MMIO(0x2724), 0x00800000 },
1400 { _MMIO(0x2770), 0x0000c000 },
1401 { _MMIO(0x2774), 0x0000e7ff },
1402 { _MMIO(0x2778), 0x00003000 },
1403 { _MMIO(0x277c), 0x0000f9ff },
1404 { _MMIO(0x2780), 0x00000c00 },
1405 { _MMIO(0x2784), 0x0000fe7f },
1406};
1407
1408static const struct i915_oa_reg flex_eu_config_sampler[] = {
1409 { _MMIO(0xe458), 0x00005004 },
1410 { _MMIO(0xe558), 0x00010003 },
1411 { _MMIO(0xe658), 0x00012011 },
1412 { _MMIO(0xe758), 0x00015014 },
1413 { _MMIO(0xe45c), 0x00051050 },
1414 { _MMIO(0xe55c), 0x00053052 },
1415 { _MMIO(0xe65c), 0x00055054 },
1416};
1417
1418static const struct i915_oa_reg mux_config_sampler[] = {
1419 { _MMIO(0x9888), 0x14152c00 },
1420 { _MMIO(0x9888), 0x16150005 },
1421 { _MMIO(0x9888), 0x121600a0 },
1422 { _MMIO(0x9888), 0x14352c00 },
1423 { _MMIO(0x9888), 0x16350005 },
1424 { _MMIO(0x9888), 0x123600a0 },
1425 { _MMIO(0x9888), 0x14552c00 },
1426 { _MMIO(0x9888), 0x16550005 },
1427 { _MMIO(0x9888), 0x125600a0 },
1428 { _MMIO(0x9888), 0x062f6000 },
1429 { _MMIO(0x9888), 0x022f2000 },
1430 { _MMIO(0x9888), 0x0c4c0050 },
1431 { _MMIO(0x9888), 0x0a4c0010 },
1432 { _MMIO(0x9888), 0x0c0d8000 },
1433 { _MMIO(0x9888), 0x0e0da000 },
1434 { _MMIO(0x9888), 0x000d8000 },
1435 { _MMIO(0x9888), 0x020da000 },
1436 { _MMIO(0x9888), 0x040da000 },
1437 { _MMIO(0x9888), 0x060d2000 },
1438 { _MMIO(0x9888), 0x100f0350 },
1439 { _MMIO(0x9888), 0x0c0fb000 },
1440 { _MMIO(0x9888), 0x0e0f00da },
1441 { _MMIO(0x9888), 0x182c0028 },
1442 { _MMIO(0x9888), 0x0a2c8000 },
1443 { _MMIO(0x9888), 0x022dc000 },
1444 { _MMIO(0x9888), 0x042d4000 },
1445 { _MMIO(0x9888), 0x0c138000 },
1446 { _MMIO(0x9888), 0x0e132000 },
1447 { _MMIO(0x9888), 0x0413c000 },
1448 { _MMIO(0x9888), 0x1c140018 },
1449 { _MMIO(0x9888), 0x0c157000 },
1450 { _MMIO(0x9888), 0x0e150078 },
1451 { _MMIO(0x9888), 0x10150000 },
1452 { _MMIO(0x9888), 0x04162180 },
1453 { _MMIO(0x9888), 0x02160000 },
1454 { _MMIO(0x9888), 0x04174000 },
1455 { _MMIO(0x9888), 0x0233a000 },
1456 { _MMIO(0x9888), 0x04333000 },
1457 { _MMIO(0x9888), 0x14348000 },
1458 { _MMIO(0x9888), 0x16348000 },
1459 { _MMIO(0x9888), 0x02357870 },
1460 { _MMIO(0x9888), 0x10350000 },
1461 { _MMIO(0x9888), 0x04360043 },
1462 { _MMIO(0x9888), 0x02360000 },
1463 { _MMIO(0x9888), 0x04371000 },
1464 { _MMIO(0x9888), 0x0e538000 },
1465 { _MMIO(0x9888), 0x00538000 },
1466 { _MMIO(0x9888), 0x06533000 },
1467 { _MMIO(0x9888), 0x1c540020 },
1468 { _MMIO(0x9888), 0x12548000 },
1469 { _MMIO(0x9888), 0x0e557000 },
1470 { _MMIO(0x9888), 0x00557800 },
1471 { _MMIO(0x9888), 0x10550000 },
1472 { _MMIO(0x9888), 0x06560043 },
1473 { _MMIO(0x9888), 0x02560000 },
1474 { _MMIO(0x9888), 0x06571000 },
1475 { _MMIO(0x9888), 0x1190ff80 },
1476 { _MMIO(0x9888), 0x57900000 },
1477 { _MMIO(0x9888), 0x49900000 },
1478 { _MMIO(0x9888), 0x37900000 },
1479 { _MMIO(0x9888), 0x33900000 },
1480 { _MMIO(0x9888), 0x4b900060 },
1481 { _MMIO(0x9888), 0x59900000 },
1482 { _MMIO(0x9888), 0x51900000 },
1483 { _MMIO(0x9888), 0x41900c00 },
1484 { _MMIO(0x9888), 0x43900842 },
1485 { _MMIO(0x9888), 0x53900000 },
1486 { _MMIO(0x9888), 0x45900060 },
1487};
1488
1489static int
1490get_sampler_mux_config(struct drm_i915_private *dev_priv,
1491 const struct i915_oa_reg **regs,
1492 int *lens)
1493{
1494 int n = 0;
1495
1496 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1497 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1498
1499 regs[n] = mux_config_sampler;
1500 lens[n] = ARRAY_SIZE(mux_config_sampler);
1501 n++;
1502
1503 return n;
1504}
1505
1506static const struct i915_oa_reg b_counter_config_tdl_1[] = {
1507 { _MMIO(0x2740), 0x00000000 },
1508 { _MMIO(0x2744), 0x00800000 },
1509 { _MMIO(0x2710), 0x00000000 },
1510 { _MMIO(0x2714), 0xf0800000 },
1511 { _MMIO(0x2720), 0x00000000 },
1512 { _MMIO(0x2724), 0x30800000 },
1513 { _MMIO(0x2770), 0x00000002 },
1514 { _MMIO(0x2774), 0x00007fff },
1515 { _MMIO(0x2778), 0x00000000 },
1516 { _MMIO(0x277c), 0x00009fff },
1517 { _MMIO(0x2780), 0x00000002 },
1518 { _MMIO(0x2784), 0x0000efff },
1519 { _MMIO(0x2788), 0x00000000 },
1520 { _MMIO(0x278c), 0x0000f3ff },
1521 { _MMIO(0x2790), 0x00000002 },
1522 { _MMIO(0x2794), 0x0000fdff },
1523 { _MMIO(0x2798), 0x00000000 },
1524 { _MMIO(0x279c), 0x0000fe7f },
1525};
1526
1527static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
1528 { _MMIO(0xe458), 0x00005004 },
1529 { _MMIO(0xe558), 0x00010003 },
1530 { _MMIO(0xe658), 0x00012011 },
1531 { _MMIO(0xe758), 0x00015014 },
1532 { _MMIO(0xe45c), 0x00051050 },
1533 { _MMIO(0xe55c), 0x00053052 },
1534 { _MMIO(0xe65c), 0x00055054 },
1535};
1536
1537static const struct i915_oa_reg mux_config_tdl_1[] = {
1538 { _MMIO(0x9888), 0x12120000 },
1539 { _MMIO(0x9888), 0x12320000 },
1540 { _MMIO(0x9888), 0x12520000 },
1541 { _MMIO(0x9888), 0x002f8000 },
1542 { _MMIO(0x9888), 0x022f3000 },
1543 { _MMIO(0x9888), 0x0a4c0015 },
1544 { _MMIO(0x9888), 0x0c0d8000 },
1545 { _MMIO(0x9888), 0x0e0da000 },
1546 { _MMIO(0x9888), 0x000d8000 },
1547 { _MMIO(0x9888), 0x020da000 },
1548 { _MMIO(0x9888), 0x040da000 },
1549 { _MMIO(0x9888), 0x060d2000 },
1550 { _MMIO(0x9888), 0x100f03a0 },
1551 { _MMIO(0x9888), 0x0c0ff000 },
1552 { _MMIO(0x9888), 0x0e0f0095 },
1553 { _MMIO(0x9888), 0x062c8000 },
1554 { _MMIO(0x9888), 0x082c8000 },
1555 { _MMIO(0x9888), 0x0a2c8000 },
1556 { _MMIO(0x9888), 0x0c2d8000 },
1557 { _MMIO(0x9888), 0x0e2d4000 },
1558 { _MMIO(0x9888), 0x062d4000 },
1559 { _MMIO(0x9888), 0x02108000 },
1560 { _MMIO(0x9888), 0x0410c000 },
1561 { _MMIO(0x9888), 0x02118000 },
1562 { _MMIO(0x9888), 0x0411c000 },
1563 { _MMIO(0x9888), 0x02121880 },
1564 { _MMIO(0x9888), 0x041219b5 },
1565 { _MMIO(0x9888), 0x00120000 },
1566 { _MMIO(0x9888), 0x02134000 },
1567 { _MMIO(0x9888), 0x04135000 },
1568 { _MMIO(0x9888), 0x0c308000 },
1569 { _MMIO(0x9888), 0x0e304000 },
1570 { _MMIO(0x9888), 0x06304000 },
1571 { _MMIO(0x9888), 0x0c318000 },
1572 { _MMIO(0x9888), 0x0e314000 },
1573 { _MMIO(0x9888), 0x06314000 },
1574 { _MMIO(0x9888), 0x0c321a80 },
1575 { _MMIO(0x9888), 0x0e320033 },
1576 { _MMIO(0x9888), 0x06320031 },
1577 { _MMIO(0x9888), 0x00320000 },
1578 { _MMIO(0x9888), 0x0c334000 },
1579 { _MMIO(0x9888), 0x0e331000 },
1580 { _MMIO(0x9888), 0x06331000 },
1581 { _MMIO(0x9888), 0x0e508000 },
1582 { _MMIO(0x9888), 0x00508000 },
1583 { _MMIO(0x9888), 0x02504000 },
1584 { _MMIO(0x9888), 0x0e518000 },
1585 { _MMIO(0x9888), 0x00518000 },
1586 { _MMIO(0x9888), 0x02514000 },
1587 { _MMIO(0x9888), 0x0e521880 },
1588 { _MMIO(0x9888), 0x00521a80 },
1589 { _MMIO(0x9888), 0x02520033 },
1590 { _MMIO(0x9888), 0x0e534000 },
1591 { _MMIO(0x9888), 0x00534000 },
1592 { _MMIO(0x9888), 0x02531000 },
1593 { _MMIO(0x9888), 0x1190ff80 },
1594 { _MMIO(0x9888), 0x57900000 },
1595 { _MMIO(0x9888), 0x49900800 },
1596 { _MMIO(0x9888), 0x37900000 },
1597 { _MMIO(0x9888), 0x33900000 },
1598 { _MMIO(0x9888), 0x4b900062 },
1599 { _MMIO(0x9888), 0x59900000 },
1600 { _MMIO(0x9888), 0x51900000 },
1601 { _MMIO(0x9888), 0x41900c00 },
1602 { _MMIO(0x9888), 0x43900003 },
1603 { _MMIO(0x9888), 0x53900000 },
1604 { _MMIO(0x9888), 0x45900040 },
1605};
1606
1607static int
1608get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
1609 const struct i915_oa_reg **regs,
1610 int *lens)
1611{
1612 int n = 0;
1613
1614 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1615 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1616
1617 regs[n] = mux_config_tdl_1;
1618 lens[n] = ARRAY_SIZE(mux_config_tdl_1);
1619 n++;
1620
1621 return n;
1622}
1623
1624static const struct i915_oa_reg b_counter_config_tdl_2[] = {
1625 { _MMIO(0x2740), 0x00000000 },
1626 { _MMIO(0x2744), 0x00800000 },
1627 { _MMIO(0x2710), 0x00000000 },
1628 { _MMIO(0x2714), 0x00800000 },
1629 { _MMIO(0x2720), 0x00000000 },
1630 { _MMIO(0x2724), 0x00800000 },
1631};
1632
1633static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
1634 { _MMIO(0xe458), 0x00005004 },
1635 { _MMIO(0xe558), 0x00010003 },
1636 { _MMIO(0xe658), 0x00012011 },
1637 { _MMIO(0xe758), 0x00015014 },
1638 { _MMIO(0xe45c), 0x00051050 },
1639 { _MMIO(0xe55c), 0x00053052 },
1640 { _MMIO(0xe65c), 0x00055054 },
1641};
1642
1643static const struct i915_oa_reg mux_config_tdl_2[] = {
1644 { _MMIO(0x9888), 0x12124d60 },
1645 { _MMIO(0x9888), 0x12322e60 },
1646 { _MMIO(0x9888), 0x12524d60 },
1647 { _MMIO(0x9888), 0x022f3000 },
1648 { _MMIO(0x9888), 0x0a4c0014 },
1649 { _MMIO(0x9888), 0x000d8000 },
1650 { _MMIO(0x9888), 0x020da000 },
1651 { _MMIO(0x9888), 0x040da000 },
1652 { _MMIO(0x9888), 0x060d2000 },
1653 { _MMIO(0x9888), 0x0c0fe000 },
1654 { _MMIO(0x9888), 0x0e0f0097 },
1655 { _MMIO(0x9888), 0x082c8000 },
1656 { _MMIO(0x9888), 0x0a2c8000 },
1657 { _MMIO(0x9888), 0x002d8000 },
1658 { _MMIO(0x9888), 0x062d4000 },
1659 { _MMIO(0x9888), 0x0410c000 },
1660 { _MMIO(0x9888), 0x0411c000 },
1661 { _MMIO(0x9888), 0x04121fb7 },
1662 { _MMIO(0x9888), 0x00120000 },
1663 { _MMIO(0x9888), 0x04135000 },
1664 { _MMIO(0x9888), 0x00308000 },
1665 { _MMIO(0x9888), 0x06304000 },
1666 { _MMIO(0x9888), 0x00318000 },
1667 { _MMIO(0x9888), 0x06314000 },
1668 { _MMIO(0x9888), 0x00321b80 },
1669 { _MMIO(0x9888), 0x0632003f },
1670 { _MMIO(0x9888), 0x00334000 },
1671 { _MMIO(0x9888), 0x06331000 },
1672 { _MMIO(0x9888), 0x0250c000 },
1673 { _MMIO(0x9888), 0x0251c000 },
1674 { _MMIO(0x9888), 0x02521fb7 },
1675 { _MMIO(0x9888), 0x00520000 },
1676 { _MMIO(0x9888), 0x02535000 },
1677 { _MMIO(0x9888), 0x1190fc00 },
1678 { _MMIO(0x9888), 0x37900000 },
1679 { _MMIO(0x9888), 0x51900000 },
1680 { _MMIO(0x9888), 0x41900800 },
1681 { _MMIO(0x9888), 0x43900063 },
1682 { _MMIO(0x9888), 0x53900000 },
1683 { _MMIO(0x9888), 0x45900040 },
1684 { _MMIO(0x9888), 0x33900000 },
1685};
1686
1687static int
1688get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
1689 const struct i915_oa_reg **regs,
1690 int *lens)
1691{
1692 int n = 0;
1693
1694 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1695 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1696
1697 regs[n] = mux_config_tdl_2;
1698 lens[n] = ARRAY_SIZE(mux_config_tdl_2);
1699 n++;
1700
1701 return n;
1702}
1703
1704static const struct i915_oa_reg b_counter_config_compute_extra[] = {
1705};
1706
1707static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
1708};
1709
1710static const struct i915_oa_reg mux_config_compute_extra[] = {
1711 { _MMIO(0x9888), 0x121203e0 },
1712 { _MMIO(0x9888), 0x123203e0 },
1713 { _MMIO(0x9888), 0x125203e0 },
1714 { _MMIO(0x9888), 0x129203e0 },
1715 { _MMIO(0x9888), 0x12b203e0 },
1716 { _MMIO(0x9888), 0x12d203e0 },
1717 { _MMIO(0x9888), 0x131203e0 },
1718 { _MMIO(0x9888), 0x133203e0 },
1719 { _MMIO(0x9888), 0x135203e0 },
1720 { _MMIO(0x9888), 0x1a4ef000 },
1721 { _MMIO(0x9888), 0x1c4e0003 },
1722 { _MMIO(0x9888), 0x024ec000 },
1723 { _MMIO(0x9888), 0x044ec000 },
1724 { _MMIO(0x9888), 0x064ec000 },
1725 { _MMIO(0x9888), 0x022f4000 },
1726 { _MMIO(0x9888), 0x0c4c02a0 },
1727 { _MMIO(0x9888), 0x084ca000 },
1728 { _MMIO(0x9888), 0x0a4c0042 },
1729 { _MMIO(0x9888), 0x0c0d8000 },
1730 { _MMIO(0x9888), 0x0e0da000 },
1731 { _MMIO(0x9888), 0x000d8000 },
1732 { _MMIO(0x9888), 0x020da000 },
1733 { _MMIO(0x9888), 0x040da000 },
1734 { _MMIO(0x9888), 0x060d2000 },
1735 { _MMIO(0x9888), 0x100f0150 },
1736 { _MMIO(0x9888), 0x0c0f5000 },
1737 { _MMIO(0x9888), 0x0e0f006d },
1738 { _MMIO(0x9888), 0x182c00a8 },
1739 { _MMIO(0x9888), 0x022c8000 },
1740 { _MMIO(0x9888), 0x042c8000 },
1741 { _MMIO(0x9888), 0x062c8000 },
1742 { _MMIO(0x9888), 0x0c2c8000 },
1743 { _MMIO(0x9888), 0x042d8000 },
1744 { _MMIO(0x9888), 0x06104000 },
1745 { _MMIO(0x9888), 0x06114000 },
1746 { _MMIO(0x9888), 0x06120033 },
1747 { _MMIO(0x9888), 0x00120000 },
1748 { _MMIO(0x9888), 0x06131000 },
1749 { _MMIO(0x9888), 0x04308000 },
1750 { _MMIO(0x9888), 0x04318000 },
1751 { _MMIO(0x9888), 0x04321980 },
1752 { _MMIO(0x9888), 0x00320000 },
1753 { _MMIO(0x9888), 0x04334000 },
1754 { _MMIO(0x9888), 0x04504000 },
1755 { _MMIO(0x9888), 0x04514000 },
1756 { _MMIO(0x9888), 0x04520033 },
1757 { _MMIO(0x9888), 0x00520000 },
1758 { _MMIO(0x9888), 0x04531000 },
1759 { _MMIO(0x9888), 0x1acef000 },
1760 { _MMIO(0x9888), 0x1cce0003 },
1761 { _MMIO(0x9888), 0x00af8000 },
1762 { _MMIO(0x9888), 0x0ccc02a0 },
1763 { _MMIO(0x9888), 0x0acc0001 },
1764 { _MMIO(0x9888), 0x0c8d8000 },
1765 { _MMIO(0x9888), 0x0e8da000 },
1766 { _MMIO(0x9888), 0x008d8000 },
1767 { _MMIO(0x9888), 0x028da000 },
1768 { _MMIO(0x9888), 0x108f0150 },
1769 { _MMIO(0x9888), 0x0c8fb000 },
1770 { _MMIO(0x9888), 0x0e8f0001 },
1771 { _MMIO(0x9888), 0x18ac00a8 },
1772 { _MMIO(0x9888), 0x06ac8000 },
1773 { _MMIO(0x9888), 0x02ad4000 },
1774 { _MMIO(0x9888), 0x02908000 },
1775 { _MMIO(0x9888), 0x02918000 },
1776 { _MMIO(0x9888), 0x02921980 },
1777 { _MMIO(0x9888), 0x00920000 },
1778 { _MMIO(0x9888), 0x02934000 },
1779 { _MMIO(0x9888), 0x02b04000 },
1780 { _MMIO(0x9888), 0x02b14000 },
1781 { _MMIO(0x9888), 0x02b20033 },
1782 { _MMIO(0x9888), 0x00b20000 },
1783 { _MMIO(0x9888), 0x02b31000 },
1784 { _MMIO(0x9888), 0x00d08000 },
1785 { _MMIO(0x9888), 0x00d18000 },
1786 { _MMIO(0x9888), 0x00d21980 },
1787 { _MMIO(0x9888), 0x00d34000 },
1788 { _MMIO(0x9888), 0x072f8000 },
1789 { _MMIO(0x9888), 0x0d4c0100 },
1790 { _MMIO(0x9888), 0x0d0d8000 },
1791 { _MMIO(0x9888), 0x0f0da000 },
1792 { _MMIO(0x9888), 0x110f01b0 },
1793 { _MMIO(0x9888), 0x192c0080 },
1794 { _MMIO(0x9888), 0x0f2d4000 },
1795 { _MMIO(0x9888), 0x0f108000 },
1796 { _MMIO(0x9888), 0x0f118000 },
1797 { _MMIO(0x9888), 0x0f121980 },
1798 { _MMIO(0x9888), 0x01120000 },
1799 { _MMIO(0x9888), 0x0f134000 },
1800 { _MMIO(0x9888), 0x0f304000 },
1801 { _MMIO(0x9888), 0x0f314000 },
1802 { _MMIO(0x9888), 0x0f320033 },
1803 { _MMIO(0x9888), 0x01320000 },
1804 { _MMIO(0x9888), 0x0f331000 },
1805 { _MMIO(0x9888), 0x0d508000 },
1806 { _MMIO(0x9888), 0x0d518000 },
1807 { _MMIO(0x9888), 0x0d521980 },
1808 { _MMIO(0x9888), 0x01520000 },
1809 { _MMIO(0x9888), 0x0d534000 },
1810 { _MMIO(0x9888), 0x1190ff80 },
1811 { _MMIO(0x9888), 0x57900000 },
1812 { _MMIO(0x9888), 0x49900c00 },
1813 { _MMIO(0x9888), 0x37900000 },
1814 { _MMIO(0x9888), 0x33900000 },
1815 { _MMIO(0x9888), 0x4b900002 },
1816 { _MMIO(0x9888), 0x59900000 },
1817 { _MMIO(0x9888), 0x51901100 },
1818 { _MMIO(0x9888), 0x41901000 },
1819 { _MMIO(0x9888), 0x43901423 },
1820 { _MMIO(0x9888), 0x53903331 },
1821 { _MMIO(0x9888), 0x45900044 },
1822};
1823
1824static int
1825get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
1826 const struct i915_oa_reg **regs,
1827 int *lens)
1828{
1829 int n = 0;
1830
1831 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1832 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1833
1834 regs[n] = mux_config_compute_extra;
1835 lens[n] = ARRAY_SIZE(mux_config_compute_extra);
1836 n++;
1837
1838 return n;
1839}
1840
1841static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
1842 { _MMIO(0x2740), 0x00000000 },
1843 { _MMIO(0x2710), 0x00000000 },
1844 { _MMIO(0x2714), 0xf0800000 },
1845 { _MMIO(0x2720), 0x00000000 },
1846 { _MMIO(0x2724), 0x30800000 },
1847 { _MMIO(0x2770), 0x00100030 },
1848 { _MMIO(0x2774), 0x0000fff9 },
1849 { _MMIO(0x2778), 0x00000002 },
1850 { _MMIO(0x277c), 0x0000fffc },
1851 { _MMIO(0x2780), 0x00000002 },
1852 { _MMIO(0x2784), 0x0000fff3 },
1853 { _MMIO(0x2788), 0x00100180 },
1854 { _MMIO(0x278c), 0x0000ffcf },
1855 { _MMIO(0x2790), 0x00000002 },
1856 { _MMIO(0x2794), 0x0000ffcf },
1857 { _MMIO(0x2798), 0x00000002 },
1858 { _MMIO(0x279c), 0x0000ff3f },
1859};
1860
1861static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
1862 { _MMIO(0xe458), 0x00005004 },
1863 { _MMIO(0xe558), 0x00008003 },
1864};
1865
1866static const struct i915_oa_reg mux_config_vme_pipe[] = {
1867 { _MMIO(0x9888), 0x141a5800 },
1868 { _MMIO(0x9888), 0x161a00c0 },
1869 { _MMIO(0x9888), 0x12180240 },
1870 { _MMIO(0x9888), 0x14180002 },
1871 { _MMIO(0x9888), 0x149a5800 },
1872 { _MMIO(0x9888), 0x169a00c0 },
1873 { _MMIO(0x9888), 0x12980240 },
1874 { _MMIO(0x9888), 0x14980002 },
1875 { _MMIO(0x9888), 0x1a4e3fc0 },
1876 { _MMIO(0x9888), 0x002f1000 },
1877 { _MMIO(0x9888), 0x022f8000 },
1878 { _MMIO(0x9888), 0x042f3000 },
1879 { _MMIO(0x9888), 0x004c4000 },
1880 { _MMIO(0x9888), 0x0a4c9500 },
1881 { _MMIO(0x9888), 0x0c4c002a },
1882 { _MMIO(0x9888), 0x000d2000 },
1883 { _MMIO(0x9888), 0x060d8000 },
1884 { _MMIO(0x9888), 0x080da000 },
1885 { _MMIO(0x9888), 0x0a0da000 },
1886 { _MMIO(0x9888), 0x0c0da000 },
1887 { _MMIO(0x9888), 0x0c0f0400 },
1888 { _MMIO(0x9888), 0x0e0f5500 },
1889 { _MMIO(0x9888), 0x100f0015 },
1890 { _MMIO(0x9888), 0x002c8000 },
1891 { _MMIO(0x9888), 0x0e2c8000 },
1892 { _MMIO(0x9888), 0x162caa00 },
1893 { _MMIO(0x9888), 0x182c000a },
1894 { _MMIO(0x9888), 0x04193000 },
1895 { _MMIO(0x9888), 0x081a28c1 },
1896 { _MMIO(0x9888), 0x001a0000 },
1897 { _MMIO(0x9888), 0x00133000 },
1898 { _MMIO(0x9888), 0x0613c000 },
1899 { _MMIO(0x9888), 0x0813f000 },
1900 { _MMIO(0x9888), 0x00172000 },
1901 { _MMIO(0x9888), 0x06178000 },
1902 { _MMIO(0x9888), 0x0817a000 },
1903 { _MMIO(0x9888), 0x00180037 },
1904 { _MMIO(0x9888), 0x06180940 },
1905 { _MMIO(0x9888), 0x08180000 },
1906 { _MMIO(0x9888), 0x02180000 },
1907 { _MMIO(0x9888), 0x04183000 },
1908 { _MMIO(0x9888), 0x04afc000 },
1909 { _MMIO(0x9888), 0x06af3000 },
1910 { _MMIO(0x9888), 0x0acc4000 },
1911 { _MMIO(0x9888), 0x0ccc0015 },
1912 { _MMIO(0x9888), 0x0a8da000 },
1913 { _MMIO(0x9888), 0x0c8da000 },
1914 { _MMIO(0x9888), 0x0e8f4000 },
1915 { _MMIO(0x9888), 0x108f0015 },
1916 { _MMIO(0x9888), 0x16aca000 },
1917 { _MMIO(0x9888), 0x18ac000a },
1918 { _MMIO(0x9888), 0x06993000 },
1919 { _MMIO(0x9888), 0x0c9a28c1 },
1920 { _MMIO(0x9888), 0x009a0000 },
1921 { _MMIO(0x9888), 0x0a93f000 },
1922 { _MMIO(0x9888), 0x0c93f000 },
1923 { _MMIO(0x9888), 0x0a97a000 },
1924 { _MMIO(0x9888), 0x0c97a000 },
1925 { _MMIO(0x9888), 0x0a980977 },
1926 { _MMIO(0x9888), 0x08980000 },
1927 { _MMIO(0x9888), 0x04980000 },
1928 { _MMIO(0x9888), 0x06983000 },
1929 { _MMIO(0x9888), 0x119000ff },
1930 { _MMIO(0x9888), 0x51900010 },
1931 { _MMIO(0x9888), 0x41900060 },
1932 { _MMIO(0x9888), 0x55900111 },
1933 { _MMIO(0x9888), 0x45900c00 },
1934 { _MMIO(0x9888), 0x47900821 },
1935 { _MMIO(0x9888), 0x57900000 },
1936 { _MMIO(0x9888), 0x49900002 },
1937 { _MMIO(0x9888), 0x37900000 },
1938 { _MMIO(0x9888), 0x33900000 },
1939};
1940
1941static int
1942get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
1943 const struct i915_oa_reg **regs,
1944 int *lens)
1945{
1946 int n = 0;
1947
1948 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
1949 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
1950
1951 regs[n] = mux_config_vme_pipe;
1952 lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
1953 n++;
1954
1955 return n;
1956}
1957
1958static const struct i915_oa_reg b_counter_config_test_oa[] = {
1959 { _MMIO(0x2740), 0x00000000 },
1960 { _MMIO(0x2744), 0x00800000 },
1961 { _MMIO(0x2714), 0xf0800000 },
1962 { _MMIO(0x2710), 0x00000000 },
1963 { _MMIO(0x2724), 0xf0800000 },
1964 { _MMIO(0x2720), 0x00000000 },
1965 { _MMIO(0x2770), 0x00000004 },
1966 { _MMIO(0x2774), 0x00000000 },
1967 { _MMIO(0x2778), 0x00000003 },
1968 { _MMIO(0x277c), 0x00000000 },
1969 { _MMIO(0x2780), 0x00000007 },
1970 { _MMIO(0x2784), 0x00000000 },
1971 { _MMIO(0x2788), 0x00100002 },
1972 { _MMIO(0x278c), 0x0000fff7 },
1973 { _MMIO(0x2790), 0x00100002 },
1974 { _MMIO(0x2794), 0x0000ffcf },
1975 { _MMIO(0x2798), 0x00100082 },
1976 { _MMIO(0x279c), 0x0000ffef },
1977 { _MMIO(0x27a0), 0x001000c2 },
1978 { _MMIO(0x27a4), 0x0000ffe7 },
1979 { _MMIO(0x27a8), 0x00100001 },
1980 { _MMIO(0x27ac), 0x0000ffe7 },
1981};
1982
1983static const struct i915_oa_reg flex_eu_config_test_oa[] = {
1984};
1985
1986static const struct i915_oa_reg mux_config_test_oa[] = {
1987 { _MMIO(0x9888), 0x11810000 },
1988 { _MMIO(0x9888), 0x07810013 },
1989 { _MMIO(0x9888), 0x1f810000 },
1990 { _MMIO(0x9888), 0x1d810000 },
1991 { _MMIO(0x9888), 0x1b930040 },
1992 { _MMIO(0x9888), 0x07e54000 },
1993 { _MMIO(0x9888), 0x1f908000 },
1994 { _MMIO(0x9888), 0x11900000 },
1995 { _MMIO(0x9888), 0x37900000 },
1996 { _MMIO(0x9888), 0x53900000 },
1997 { _MMIO(0x9888), 0x45900000 },
1998 { _MMIO(0x9888), 0x33900000 },
1999};
2000
2001static int
2002get_test_oa_mux_config(struct drm_i915_private *dev_priv,
2003 const struct i915_oa_reg **regs,
2004 int *lens)
2005{
2006 int n = 0;
2007
2008 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
2009 BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
2010
2011 regs[n] = mux_config_test_oa;
2012 lens[n] = ARRAY_SIZE(mux_config_test_oa);
2013 n++;
2014
2015 return n;
2016}
2017
2018int i915_oa_select_metric_set_sklgt4(struct drm_i915_private *dev_priv)
2019{
2020 dev_priv->perf.oa.n_mux_configs = 0;
2021 dev_priv->perf.oa.b_counter_regs = NULL;
2022 dev_priv->perf.oa.b_counter_regs_len = 0;
2023 dev_priv->perf.oa.flex_regs = NULL;
2024 dev_priv->perf.oa.flex_regs_len = 0;
2025
2026 switch (dev_priv->perf.oa.metrics_set) {
2027 case METRIC_SET_ID_RENDER_BASIC:
2028 dev_priv->perf.oa.n_mux_configs =
2029 get_render_basic_mux_config(dev_priv,
2030 dev_priv->perf.oa.mux_regs,
2031 dev_priv->perf.oa.mux_regs_lens);
2032 if (dev_priv->perf.oa.n_mux_configs == 0) {
2033 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
2034
2035 /* EINVAL because *_register_sysfs already checked this
2036 * and so it wouldn't have been advertised to userspace and
2037 * so shouldn't have been requested
2038 */
2039 return -EINVAL;
2040 }
2041
2042 dev_priv->perf.oa.b_counter_regs =
2043 b_counter_config_render_basic;
2044 dev_priv->perf.oa.b_counter_regs_len =
2045 ARRAY_SIZE(b_counter_config_render_basic);
2046
2047 dev_priv->perf.oa.flex_regs =
2048 flex_eu_config_render_basic;
2049 dev_priv->perf.oa.flex_regs_len =
2050 ARRAY_SIZE(flex_eu_config_render_basic);
2051
2052 return 0;
2053 case METRIC_SET_ID_COMPUTE_BASIC:
2054 dev_priv->perf.oa.n_mux_configs =
2055 get_compute_basic_mux_config(dev_priv,
2056 dev_priv->perf.oa.mux_regs,
2057 dev_priv->perf.oa.mux_regs_lens);
2058 if (dev_priv->perf.oa.n_mux_configs == 0) {
2059 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
2060
2061 /* EINVAL because *_register_sysfs already checked this
2062 * and so it wouldn't have been advertised to userspace and
2063 * so shouldn't have been requested
2064 */
2065 return -EINVAL;
2066 }
2067
2068 dev_priv->perf.oa.b_counter_regs =
2069 b_counter_config_compute_basic;
2070 dev_priv->perf.oa.b_counter_regs_len =
2071 ARRAY_SIZE(b_counter_config_compute_basic);
2072
2073 dev_priv->perf.oa.flex_regs =
2074 flex_eu_config_compute_basic;
2075 dev_priv->perf.oa.flex_regs_len =
2076 ARRAY_SIZE(flex_eu_config_compute_basic);
2077
2078 return 0;
2079 case METRIC_SET_ID_RENDER_PIPE_PROFILE:
2080 dev_priv->perf.oa.n_mux_configs =
2081 get_render_pipe_profile_mux_config(dev_priv,
2082 dev_priv->perf.oa.mux_regs,
2083 dev_priv->perf.oa.mux_regs_lens);
2084 if (dev_priv->perf.oa.n_mux_configs == 0) {
2085 DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
2086
2087 /* EINVAL because *_register_sysfs already checked this
2088 * and so it wouldn't have been advertised to userspace and
2089 * so shouldn't have been requested
2090 */
2091 return -EINVAL;
2092 }
2093
2094 dev_priv->perf.oa.b_counter_regs =
2095 b_counter_config_render_pipe_profile;
2096 dev_priv->perf.oa.b_counter_regs_len =
2097 ARRAY_SIZE(b_counter_config_render_pipe_profile);
2098
2099 dev_priv->perf.oa.flex_regs =
2100 flex_eu_config_render_pipe_profile;
2101 dev_priv->perf.oa.flex_regs_len =
2102 ARRAY_SIZE(flex_eu_config_render_pipe_profile);
2103
2104 return 0;
2105 case METRIC_SET_ID_MEMORY_READS:
2106 dev_priv->perf.oa.n_mux_configs =
2107 get_memory_reads_mux_config(dev_priv,
2108 dev_priv->perf.oa.mux_regs,
2109 dev_priv->perf.oa.mux_regs_lens);
2110 if (dev_priv->perf.oa.n_mux_configs == 0) {
2111 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
2112
2113 /* EINVAL because *_register_sysfs already checked this
2114 * and so it wouldn't have been advertised to userspace and
2115 * so shouldn't have been requested
2116 */
2117 return -EINVAL;
2118 }
2119
2120 dev_priv->perf.oa.b_counter_regs =
2121 b_counter_config_memory_reads;
2122 dev_priv->perf.oa.b_counter_regs_len =
2123 ARRAY_SIZE(b_counter_config_memory_reads);
2124
2125 dev_priv->perf.oa.flex_regs =
2126 flex_eu_config_memory_reads;
2127 dev_priv->perf.oa.flex_regs_len =
2128 ARRAY_SIZE(flex_eu_config_memory_reads);
2129
2130 return 0;
2131 case METRIC_SET_ID_MEMORY_WRITES:
2132 dev_priv->perf.oa.n_mux_configs =
2133 get_memory_writes_mux_config(dev_priv,
2134 dev_priv->perf.oa.mux_regs,
2135 dev_priv->perf.oa.mux_regs_lens);
2136 if (dev_priv->perf.oa.n_mux_configs == 0) {
2137 DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
2138
2139 /* EINVAL because *_register_sysfs already checked this
2140 * and so it wouldn't have been advertised to userspace and
2141 * so shouldn't have been requested
2142 */
2143 return -EINVAL;
2144 }
2145
2146 dev_priv->perf.oa.b_counter_regs =
2147 b_counter_config_memory_writes;
2148 dev_priv->perf.oa.b_counter_regs_len =
2149 ARRAY_SIZE(b_counter_config_memory_writes);
2150
2151 dev_priv->perf.oa.flex_regs =
2152 flex_eu_config_memory_writes;
2153 dev_priv->perf.oa.flex_regs_len =
2154 ARRAY_SIZE(flex_eu_config_memory_writes);
2155
2156 return 0;
2157 case METRIC_SET_ID_COMPUTE_EXTENDED:
2158 dev_priv->perf.oa.n_mux_configs =
2159 get_compute_extended_mux_config(dev_priv,
2160 dev_priv->perf.oa.mux_regs,
2161 dev_priv->perf.oa.mux_regs_lens);
2162 if (dev_priv->perf.oa.n_mux_configs == 0) {
2163 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
2164
2165 /* EINVAL because *_register_sysfs already checked this
2166 * and so it wouldn't have been advertised to userspace and
2167 * so shouldn't have been requested
2168 */
2169 return -EINVAL;
2170 }
2171
2172 dev_priv->perf.oa.b_counter_regs =
2173 b_counter_config_compute_extended;
2174 dev_priv->perf.oa.b_counter_regs_len =
2175 ARRAY_SIZE(b_counter_config_compute_extended);
2176
2177 dev_priv->perf.oa.flex_regs =
2178 flex_eu_config_compute_extended;
2179 dev_priv->perf.oa.flex_regs_len =
2180 ARRAY_SIZE(flex_eu_config_compute_extended);
2181
2182 return 0;
2183 case METRIC_SET_ID_COMPUTE_L3_CACHE:
2184 dev_priv->perf.oa.n_mux_configs =
2185 get_compute_l3_cache_mux_config(dev_priv,
2186 dev_priv->perf.oa.mux_regs,
2187 dev_priv->perf.oa.mux_regs_lens);
2188 if (dev_priv->perf.oa.n_mux_configs == 0) {
2189 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
2190
2191 /* EINVAL because *_register_sysfs already checked this
2192 * and so it wouldn't have been advertised to userspace and
2193 * so shouldn't have been requested
2194 */
2195 return -EINVAL;
2196 }
2197
2198 dev_priv->perf.oa.b_counter_regs =
2199 b_counter_config_compute_l3_cache;
2200 dev_priv->perf.oa.b_counter_regs_len =
2201 ARRAY_SIZE(b_counter_config_compute_l3_cache);
2202
2203 dev_priv->perf.oa.flex_regs =
2204 flex_eu_config_compute_l3_cache;
2205 dev_priv->perf.oa.flex_regs_len =
2206 ARRAY_SIZE(flex_eu_config_compute_l3_cache);
2207
2208 return 0;
2209 case METRIC_SET_ID_HDC_AND_SF:
2210 dev_priv->perf.oa.n_mux_configs =
2211 get_hdc_and_sf_mux_config(dev_priv,
2212 dev_priv->perf.oa.mux_regs,
2213 dev_priv->perf.oa.mux_regs_lens);
2214 if (dev_priv->perf.oa.n_mux_configs == 0) {
2215 DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
2216
2217 /* EINVAL because *_register_sysfs already checked this
2218 * and so it wouldn't have been advertised to userspace and
2219 * so shouldn't have been requested
2220 */
2221 return -EINVAL;
2222 }
2223
2224 dev_priv->perf.oa.b_counter_regs =
2225 b_counter_config_hdc_and_sf;
2226 dev_priv->perf.oa.b_counter_regs_len =
2227 ARRAY_SIZE(b_counter_config_hdc_and_sf);
2228
2229 dev_priv->perf.oa.flex_regs =
2230 flex_eu_config_hdc_and_sf;
2231 dev_priv->perf.oa.flex_regs_len =
2232 ARRAY_SIZE(flex_eu_config_hdc_and_sf);
2233
2234 return 0;
2235 case METRIC_SET_ID_L3_1:
2236 dev_priv->perf.oa.n_mux_configs =
2237 get_l3_1_mux_config(dev_priv,
2238 dev_priv->perf.oa.mux_regs,
2239 dev_priv->perf.oa.mux_regs_lens);
2240 if (dev_priv->perf.oa.n_mux_configs == 0) {
2241 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
2242
2243 /* EINVAL because *_register_sysfs already checked this
2244 * and so it wouldn't have been advertised to userspace and
2245 * so shouldn't have been requested
2246 */
2247 return -EINVAL;
2248 }
2249
2250 dev_priv->perf.oa.b_counter_regs =
2251 b_counter_config_l3_1;
2252 dev_priv->perf.oa.b_counter_regs_len =
2253 ARRAY_SIZE(b_counter_config_l3_1);
2254
2255 dev_priv->perf.oa.flex_regs =
2256 flex_eu_config_l3_1;
2257 dev_priv->perf.oa.flex_regs_len =
2258 ARRAY_SIZE(flex_eu_config_l3_1);
2259
2260 return 0;
2261 case METRIC_SET_ID_L3_2:
2262 dev_priv->perf.oa.n_mux_configs =
2263 get_l3_2_mux_config(dev_priv,
2264 dev_priv->perf.oa.mux_regs,
2265 dev_priv->perf.oa.mux_regs_lens);
2266 if (dev_priv->perf.oa.n_mux_configs == 0) {
2267 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
2268
2269 /* EINVAL because *_register_sysfs already checked this
2270 * and so it wouldn't have been advertised to userspace and
2271 * so shouldn't have been requested
2272 */
2273 return -EINVAL;
2274 }
2275
2276 dev_priv->perf.oa.b_counter_regs =
2277 b_counter_config_l3_2;
2278 dev_priv->perf.oa.b_counter_regs_len =
2279 ARRAY_SIZE(b_counter_config_l3_2);
2280
2281 dev_priv->perf.oa.flex_regs =
2282 flex_eu_config_l3_2;
2283 dev_priv->perf.oa.flex_regs_len =
2284 ARRAY_SIZE(flex_eu_config_l3_2);
2285
2286 return 0;
2287 case METRIC_SET_ID_L3_3:
2288 dev_priv->perf.oa.n_mux_configs =
2289 get_l3_3_mux_config(dev_priv,
2290 dev_priv->perf.oa.mux_regs,
2291 dev_priv->perf.oa.mux_regs_lens);
2292 if (dev_priv->perf.oa.n_mux_configs == 0) {
2293 DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
2294
2295 /* EINVAL because *_register_sysfs already checked this
2296 * and so it wouldn't have been advertised to userspace and
2297 * so shouldn't have been requested
2298 */
2299 return -EINVAL;
2300 }
2301
2302 dev_priv->perf.oa.b_counter_regs =
2303 b_counter_config_l3_3;
2304 dev_priv->perf.oa.b_counter_regs_len =
2305 ARRAY_SIZE(b_counter_config_l3_3);
2306
2307 dev_priv->perf.oa.flex_regs =
2308 flex_eu_config_l3_3;
2309 dev_priv->perf.oa.flex_regs_len =
2310 ARRAY_SIZE(flex_eu_config_l3_3);
2311
2312 return 0;
2313 case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
2314 dev_priv->perf.oa.n_mux_configs =
2315 get_rasterizer_and_pixel_backend_mux_config(dev_priv,
2316 dev_priv->perf.oa.mux_regs,
2317 dev_priv->perf.oa.mux_regs_lens);
2318 if (dev_priv->perf.oa.n_mux_configs == 0) {
2319 DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
2320
2321 /* EINVAL because *_register_sysfs already checked this
2322 * and so it wouldn't have been advertised to userspace and
2323 * so shouldn't have been requested
2324 */
2325 return -EINVAL;
2326 }
2327
2328 dev_priv->perf.oa.b_counter_regs =
2329 b_counter_config_rasterizer_and_pixel_backend;
2330 dev_priv->perf.oa.b_counter_regs_len =
2331 ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
2332
2333 dev_priv->perf.oa.flex_regs =
2334 flex_eu_config_rasterizer_and_pixel_backend;
2335 dev_priv->perf.oa.flex_regs_len =
2336 ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
2337
2338 return 0;
2339 case METRIC_SET_ID_SAMPLER:
2340 dev_priv->perf.oa.n_mux_configs =
2341 get_sampler_mux_config(dev_priv,
2342 dev_priv->perf.oa.mux_regs,
2343 dev_priv->perf.oa.mux_regs_lens);
2344 if (dev_priv->perf.oa.n_mux_configs == 0) {
2345 DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
2346
2347 /* EINVAL because *_register_sysfs already checked this
2348 * and so it wouldn't have been advertised to userspace and
2349 * so shouldn't have been requested
2350 */
2351 return -EINVAL;
2352 }
2353
2354 dev_priv->perf.oa.b_counter_regs =
2355 b_counter_config_sampler;
2356 dev_priv->perf.oa.b_counter_regs_len =
2357 ARRAY_SIZE(b_counter_config_sampler);
2358
2359 dev_priv->perf.oa.flex_regs =
2360 flex_eu_config_sampler;
2361 dev_priv->perf.oa.flex_regs_len =
2362 ARRAY_SIZE(flex_eu_config_sampler);
2363
2364 return 0;
2365 case METRIC_SET_ID_TDL_1:
2366 dev_priv->perf.oa.n_mux_configs =
2367 get_tdl_1_mux_config(dev_priv,
2368 dev_priv->perf.oa.mux_regs,
2369 dev_priv->perf.oa.mux_regs_lens);
2370 if (dev_priv->perf.oa.n_mux_configs == 0) {
2371 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
2372
2373 /* EINVAL because *_register_sysfs already checked this
2374 * and so it wouldn't have been advertised to userspace and
2375 * so shouldn't have been requested
2376 */
2377 return -EINVAL;
2378 }
2379
2380 dev_priv->perf.oa.b_counter_regs =
2381 b_counter_config_tdl_1;
2382 dev_priv->perf.oa.b_counter_regs_len =
2383 ARRAY_SIZE(b_counter_config_tdl_1);
2384
2385 dev_priv->perf.oa.flex_regs =
2386 flex_eu_config_tdl_1;
2387 dev_priv->perf.oa.flex_regs_len =
2388 ARRAY_SIZE(flex_eu_config_tdl_1);
2389
2390 return 0;
2391 case METRIC_SET_ID_TDL_2:
2392 dev_priv->perf.oa.n_mux_configs =
2393 get_tdl_2_mux_config(dev_priv,
2394 dev_priv->perf.oa.mux_regs,
2395 dev_priv->perf.oa.mux_regs_lens);
2396 if (dev_priv->perf.oa.n_mux_configs == 0) {
2397 DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
2398
2399 /* EINVAL because *_register_sysfs already checked this
2400 * and so it wouldn't have been advertised to userspace and
2401 * so shouldn't have been requested
2402 */
2403 return -EINVAL;
2404 }
2405
2406 dev_priv->perf.oa.b_counter_regs =
2407 b_counter_config_tdl_2;
2408 dev_priv->perf.oa.b_counter_regs_len =
2409 ARRAY_SIZE(b_counter_config_tdl_2);
2410
2411 dev_priv->perf.oa.flex_regs =
2412 flex_eu_config_tdl_2;
2413 dev_priv->perf.oa.flex_regs_len =
2414 ARRAY_SIZE(flex_eu_config_tdl_2);
2415
2416 return 0;
2417 case METRIC_SET_ID_COMPUTE_EXTRA:
2418 dev_priv->perf.oa.n_mux_configs =
2419 get_compute_extra_mux_config(dev_priv,
2420 dev_priv->perf.oa.mux_regs,
2421 dev_priv->perf.oa.mux_regs_lens);
2422 if (dev_priv->perf.oa.n_mux_configs == 0) {
2423 DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
2424
2425 /* EINVAL because *_register_sysfs already checked this
2426 * and so it wouldn't have been advertised to userspace and
2427 * so shouldn't have been requested
2428 */
2429 return -EINVAL;
2430 }
2431
2432 dev_priv->perf.oa.b_counter_regs =
2433 b_counter_config_compute_extra;
2434 dev_priv->perf.oa.b_counter_regs_len =
2435 ARRAY_SIZE(b_counter_config_compute_extra);
2436
2437 dev_priv->perf.oa.flex_regs =
2438 flex_eu_config_compute_extra;
2439 dev_priv->perf.oa.flex_regs_len =
2440 ARRAY_SIZE(flex_eu_config_compute_extra);
2441
2442 return 0;
2443 case METRIC_SET_ID_VME_PIPE:
2444 dev_priv->perf.oa.n_mux_configs =
2445 get_vme_pipe_mux_config(dev_priv,
2446 dev_priv->perf.oa.mux_regs,
2447 dev_priv->perf.oa.mux_regs_lens);
2448 if (dev_priv->perf.oa.n_mux_configs == 0) {
2449 DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
2450
2451 /* EINVAL because *_register_sysfs already checked this
2452 * and so it wouldn't have been advertised to userspace and
2453 * so shouldn't have been requested
2454 */
2455 return -EINVAL;
2456 }
2457
2458 dev_priv->perf.oa.b_counter_regs =
2459 b_counter_config_vme_pipe;
2460 dev_priv->perf.oa.b_counter_regs_len =
2461 ARRAY_SIZE(b_counter_config_vme_pipe);
2462
2463 dev_priv->perf.oa.flex_regs =
2464 flex_eu_config_vme_pipe;
2465 dev_priv->perf.oa.flex_regs_len =
2466 ARRAY_SIZE(flex_eu_config_vme_pipe);
2467
2468 return 0;
2469 case METRIC_SET_ID_TEST_OA:
2470 dev_priv->perf.oa.n_mux_configs =
2471 get_test_oa_mux_config(dev_priv,
2472 dev_priv->perf.oa.mux_regs,
2473 dev_priv->perf.oa.mux_regs_lens);
2474 if (dev_priv->perf.oa.n_mux_configs == 0) {
2475 DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
2476
2477 /* EINVAL because *_register_sysfs already checked this
2478 * and so it wouldn't have been advertised to userspace and
2479 * so shouldn't have been requested
2480 */
2481 return -EINVAL;
2482 }
2483
2484 dev_priv->perf.oa.b_counter_regs =
2485 b_counter_config_test_oa;
2486 dev_priv->perf.oa.b_counter_regs_len =
2487 ARRAY_SIZE(b_counter_config_test_oa);
2488
2489 dev_priv->perf.oa.flex_regs =
2490 flex_eu_config_test_oa;
2491 dev_priv->perf.oa.flex_regs_len =
2492 ARRAY_SIZE(flex_eu_config_test_oa);
2493
2494 return 0;
2495 default:
2496 return -ENODEV;
2497 }
2498}
2499
2500static ssize_t
2501show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2502{
2503 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
2504}
2505
2506static struct device_attribute dev_attr_render_basic_id = {
2507 .attr = { .name = "id", .mode = 0444 },
2508 .show = show_render_basic_id,
2509 .store = NULL,
2510};
2511
2512static struct attribute *attrs_render_basic[] = {
2513 &dev_attr_render_basic_id.attr,
2514 NULL,
2515};
2516
2517static struct attribute_group group_render_basic = {
2518 .name = "bad77c24-cc64-480d-99bf-e7b740713800",
2519 .attrs = attrs_render_basic,
2520};
2521
2522static ssize_t
2523show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
2524{
2525 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
2526}
2527
2528static struct device_attribute dev_attr_compute_basic_id = {
2529 .attr = { .name = "id", .mode = 0444 },
2530 .show = show_compute_basic_id,
2531 .store = NULL,
2532};
2533
2534static struct attribute *attrs_compute_basic[] = {
2535 &dev_attr_compute_basic_id.attr,
2536 NULL,
2537};
2538
2539static struct attribute_group group_compute_basic = {
2540 .name = "7277228f-e7f3-4743-945a-6a2049d11377",
2541 .attrs = attrs_compute_basic,
2542};
2543
2544static ssize_t
2545show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
2546{
2547 return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
2548}
2549
2550static struct device_attribute dev_attr_render_pipe_profile_id = {
2551 .attr = { .name = "id", .mode = 0444 },
2552 .show = show_render_pipe_profile_id,
2553 .store = NULL,
2554};
2555
2556static struct attribute *attrs_render_pipe_profile[] = {
2557 &dev_attr_render_pipe_profile_id.attr,
2558 NULL,
2559};
2560
2561static struct attribute_group group_render_pipe_profile = {
2562 .name = "463c668c-3f60-49b6-8f85-d995b635b3b2",
2563 .attrs = attrs_render_pipe_profile,
2564};
2565
2566static ssize_t
2567show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
2568{
2569 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
2570}
2571
2572static struct device_attribute dev_attr_memory_reads_id = {
2573 .attr = { .name = "id", .mode = 0444 },
2574 .show = show_memory_reads_id,
2575 .store = NULL,
2576};
2577
2578static struct attribute *attrs_memory_reads[] = {
2579 &dev_attr_memory_reads_id.attr,
2580 NULL,
2581};
2582
2583static struct attribute_group group_memory_reads = {
2584 .name = "3ae6e74c-72c3-4040-9bd0-7961430b8cc8",
2585 .attrs = attrs_memory_reads,
2586};
2587
2588static ssize_t
2589show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
2590{
2591 return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
2592}
2593
2594static struct device_attribute dev_attr_memory_writes_id = {
2595 .attr = { .name = "id", .mode = 0444 },
2596 .show = show_memory_writes_id,
2597 .store = NULL,
2598};
2599
2600static struct attribute *attrs_memory_writes[] = {
2601 &dev_attr_memory_writes_id.attr,
2602 NULL,
2603};
2604
2605static struct attribute_group group_memory_writes = {
2606 .name = "055f256d-4052-467c-8dec-6064a4806433",
2607 .attrs = attrs_memory_writes,
2608};
2609
2610static ssize_t
2611show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
2612{
2613 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
2614}
2615
2616static struct device_attribute dev_attr_compute_extended_id = {
2617 .attr = { .name = "id", .mode = 0444 },
2618 .show = show_compute_extended_id,
2619 .store = NULL,
2620};
2621
2622static struct attribute *attrs_compute_extended[] = {
2623 &dev_attr_compute_extended_id.attr,
2624 NULL,
2625};
2626
2627static struct attribute_group group_compute_extended = {
2628 .name = "753972d4-87cd-4460-824d-754463ac5054",
2629 .attrs = attrs_compute_extended,
2630};
2631
2632static ssize_t
2633show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
2634{
2635 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
2636}
2637
2638static struct device_attribute dev_attr_compute_l3_cache_id = {
2639 .attr = { .name = "id", .mode = 0444 },
2640 .show = show_compute_l3_cache_id,
2641 .store = NULL,
2642};
2643
2644static struct attribute *attrs_compute_l3_cache[] = {
2645 &dev_attr_compute_l3_cache_id.attr,
2646 NULL,
2647};
2648
2649static struct attribute_group group_compute_l3_cache = {
2650 .name = "4e4392e9-8f73-457b-ab44-b49f7a0c733b",
2651 .attrs = attrs_compute_l3_cache,
2652};
2653
2654static ssize_t
2655show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
2656{
2657 return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
2658}
2659
2660static struct device_attribute dev_attr_hdc_and_sf_id = {
2661 .attr = { .name = "id", .mode = 0444 },
2662 .show = show_hdc_and_sf_id,
2663 .store = NULL,
2664};
2665
2666static struct attribute *attrs_hdc_and_sf[] = {
2667 &dev_attr_hdc_and_sf_id.attr,
2668 NULL,
2669};
2670
2671static struct attribute_group group_hdc_and_sf = {
2672 .name = "730d95dd-7da8-4e1c-ab8d-c0eb1e4c1805",
2673 .attrs = attrs_hdc_and_sf,
2674};
2675
2676static ssize_t
2677show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2678{
2679 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
2680}
2681
2682static struct device_attribute dev_attr_l3_1_id = {
2683 .attr = { .name = "id", .mode = 0444 },
2684 .show = show_l3_1_id,
2685 .store = NULL,
2686};
2687
2688static struct attribute *attrs_l3_1[] = {
2689 &dev_attr_l3_1_id.attr,
2690 NULL,
2691};
2692
2693static struct attribute_group group_l3_1 = {
2694 .name = "d9e86d70-462b-462a-851e-fd63e8c13d63",
2695 .attrs = attrs_l3_1,
2696};
2697
2698static ssize_t
2699show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2700{
2701 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
2702}
2703
2704static struct device_attribute dev_attr_l3_2_id = {
2705 .attr = { .name = "id", .mode = 0444 },
2706 .show = show_l3_2_id,
2707 .store = NULL,
2708};
2709
2710static struct attribute *attrs_l3_2[] = {
2711 &dev_attr_l3_2_id.attr,
2712 NULL,
2713};
2714
2715static struct attribute_group group_l3_2 = {
2716 .name = "52200424-6ee9-48b3-b7fa-0afcf1975e4d",
2717 .attrs = attrs_l3_2,
2718};
2719
2720static ssize_t
2721show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
2722{
2723 return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
2724}
2725
2726static struct device_attribute dev_attr_l3_3_id = {
2727 .attr = { .name = "id", .mode = 0444 },
2728 .show = show_l3_3_id,
2729 .store = NULL,
2730};
2731
2732static struct attribute *attrs_l3_3[] = {
2733 &dev_attr_l3_3_id.attr,
2734 NULL,
2735};
2736
2737static struct attribute_group group_l3_3 = {
2738 .name = "1988315f-0a26-44df-acb0-df7ec86b1456",
2739 .attrs = attrs_l3_3,
2740};
2741
2742static ssize_t
2743show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
2744{
2745 return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
2746}
2747
2748static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
2749 .attr = { .name = "id", .mode = 0444 },
2750 .show = show_rasterizer_and_pixel_backend_id,
2751 .store = NULL,
2752};
2753
2754static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
2755 &dev_attr_rasterizer_and_pixel_backend_id.attr,
2756 NULL,
2757};
2758
2759static struct attribute_group group_rasterizer_and_pixel_backend = {
2760 .name = "f1f17ca7-286e-4ae5-9d15-9fccad6c665d",
2761 .attrs = attrs_rasterizer_and_pixel_backend,
2762};
2763
2764static ssize_t
2765show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
2766{
2767 return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
2768}
2769
2770static struct device_attribute dev_attr_sampler_id = {
2771 .attr = { .name = "id", .mode = 0444 },
2772 .show = show_sampler_id,
2773 .store = NULL,
2774};
2775
2776static struct attribute *attrs_sampler[] = {
2777 &dev_attr_sampler_id.attr,
2778 NULL,
2779};
2780
2781static struct attribute_group group_sampler = {
2782 .name = "00a9e0fb-3d2e-4405-852c-dce6334ffb3b",
2783 .attrs = attrs_sampler,
2784};
2785
2786static ssize_t
2787show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
2788{
2789 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
2790}
2791
2792static struct device_attribute dev_attr_tdl_1_id = {
2793 .attr = { .name = "id", .mode = 0444 },
2794 .show = show_tdl_1_id,
2795 .store = NULL,
2796};
2797
2798static struct attribute *attrs_tdl_1[] = {
2799 &dev_attr_tdl_1_id.attr,
2800 NULL,
2801};
2802
2803static struct attribute_group group_tdl_1 = {
2804 .name = "13dcc50a-7ec0-409b-99d6-a3f932cedcb3",
2805 .attrs = attrs_tdl_1,
2806};
2807
2808static ssize_t
2809show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
2810{
2811 return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
2812}
2813
2814static struct device_attribute dev_attr_tdl_2_id = {
2815 .attr = { .name = "id", .mode = 0444 },
2816 .show = show_tdl_2_id,
2817 .store = NULL,
2818};
2819
2820static struct attribute *attrs_tdl_2[] = {
2821 &dev_attr_tdl_2_id.attr,
2822 NULL,
2823};
2824
2825static struct attribute_group group_tdl_2 = {
2826 .name = "97875e21-6624-4aee-9191-682feb3eae21",
2827 .attrs = attrs_tdl_2,
2828};
2829
2830static ssize_t
2831show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
2832{
2833 return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
2834}
2835
2836static struct device_attribute dev_attr_compute_extra_id = {
2837 .attr = { .name = "id", .mode = 0444 },
2838 .show = show_compute_extra_id,
2839 .store = NULL,
2840};
2841
2842static struct attribute *attrs_compute_extra[] = {
2843 &dev_attr_compute_extra_id.attr,
2844 NULL,
2845};
2846
2847static struct attribute_group group_compute_extra = {
2848 .name = "a5aa857d-e8f0-4dfa-8981-ce340fa748fd",
2849 .attrs = attrs_compute_extra,
2850};
2851
2852static ssize_t
2853show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
2854{
2855 return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
2856}
2857
2858static struct device_attribute dev_attr_vme_pipe_id = {
2859 .attr = { .name = "id", .mode = 0444 },
2860 .show = show_vme_pipe_id,
2861 .store = NULL,
2862};
2863
2864static struct attribute *attrs_vme_pipe[] = {
2865 &dev_attr_vme_pipe_id.attr,
2866 NULL,
2867};
2868
2869static struct attribute_group group_vme_pipe = {
2870 .name = "0e8d8b86-4ee7-4cdd-aaaa-58adc92cb29e",
2871 .attrs = attrs_vme_pipe,
2872};
2873
2874static ssize_t
2875show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
2876{
2877 return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
2878}
2879
2880static struct device_attribute dev_attr_test_oa_id = {
2881 .attr = { .name = "id", .mode = 0444 },
2882 .show = show_test_oa_id,
2883 .store = NULL,
2884};
2885
2886static struct attribute *attrs_test_oa[] = {
2887 &dev_attr_test_oa_id.attr,
2888 NULL,
2889};
2890
2891static struct attribute_group group_test_oa = {
2892 .name = "882fa433-1f4a-4a67-a962-c741888fe5f5",
2893 .attrs = attrs_test_oa,
2894};
2895
2896int
2897i915_perf_register_sysfs_sklgt4(struct drm_i915_private *dev_priv)
2898{
2899 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
2900 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
2901 int ret = 0;
2902
2903 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2904 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
2905 if (ret)
2906 goto error_render_basic;
2907 }
2908 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
2909 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
2910 if (ret)
2911 goto error_compute_basic;
2912 }
2913 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
2914 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
2915 if (ret)
2916 goto error_render_pipe_profile;
2917 }
2918 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
2919 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
2920 if (ret)
2921 goto error_memory_reads;
2922 }
2923 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
2924 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
2925 if (ret)
2926 goto error_memory_writes;
2927 }
2928 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
2929 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
2930 if (ret)
2931 goto error_compute_extended;
2932 }
2933 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
2934 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
2935 if (ret)
2936 goto error_compute_l3_cache;
2937 }
2938 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
2939 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
2940 if (ret)
2941 goto error_hdc_and_sf;
2942 }
2943 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2944 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
2945 if (ret)
2946 goto error_l3_1;
2947 }
2948 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2949 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
2950 if (ret)
2951 goto error_l3_2;
2952 }
2953 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
2954 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
2955 if (ret)
2956 goto error_l3_3;
2957 }
2958 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
2959 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
2960 if (ret)
2961 goto error_rasterizer_and_pixel_backend;
2962 }
2963 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
2964 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
2965 if (ret)
2966 goto error_sampler;
2967 }
2968 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
2969 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
2970 if (ret)
2971 goto error_tdl_1;
2972 }
2973 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
2974 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
2975 if (ret)
2976 goto error_tdl_2;
2977 }
2978 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
2979 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
2980 if (ret)
2981 goto error_compute_extra;
2982 }
2983 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
2984 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2985 if (ret)
2986 goto error_vme_pipe;
2987 }
2988 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
2989 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
2990 if (ret)
2991 goto error_test_oa;
2992 }
2993
2994 return 0;
2995
2996error_test_oa:
2997 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
2998 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
2999error_vme_pipe:
3000 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
3001 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3002error_compute_extra:
3003 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
3004 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3005error_tdl_2:
3006 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
3007 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3008error_tdl_1:
3009 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
3010 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
3011error_sampler:
3012 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
3013 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3014error_rasterizer_and_pixel_backend:
3015 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
3016 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3017error_l3_3:
3018 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
3019 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3020error_l3_2:
3021 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
3022 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3023error_l3_1:
3024 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
3025 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3026error_hdc_and_sf:
3027 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
3028 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3029error_compute_l3_cache:
3030 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
3031 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3032error_compute_extended:
3033 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
3034 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3035error_memory_writes:
3036 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
3037 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3038error_memory_reads:
3039 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
3040 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3041error_render_pipe_profile:
3042 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
3043 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3044error_compute_basic:
3045 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
3046 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3047error_render_basic:
3048 return ret;
3049}
3050
3051void
3052i915_perf_unregister_sysfs_sklgt4(struct drm_i915_private *dev_priv)
3053{
3054 const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
3055 int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
3056
3057 if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
3058 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
3059 if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
3060 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
3061 if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
3062 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
3063 if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
3064 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
3065 if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
3066 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
3067 if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
3068 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
3069 if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
3070 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
3071 if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
3072 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
3073 if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
3074 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
3075 if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
3076 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
3077 if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
3078 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
3079 if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
3080 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
3081 if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
3082 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
3083 if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
3084 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
3085 if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
3086 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
3087 if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
3088 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
3089 if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
3090 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
3091 if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
3092 sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
3093}
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
new file mode 100644
index 000000000000..1b718f15f62e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -0,0 +1,40 @@
1/*
2 * Autogenerated file by GPU Top : https://github.com/rib/gputop
3 * DO NOT EDIT manually!
4 *
5 *
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 */
28
29#ifndef __I915_OA_SKLGT4_H__
30#define __I915_OA_SKLGT4_H__
31
32extern int i915_oa_n_builtin_metric_sets_sklgt4;
33
34extern int i915_oa_select_metric_set_sklgt4(struct drm_i915_private *dev_priv);
35
36extern int i915_perf_register_sysfs_sklgt4(struct drm_i915_private *dev_priv);
37
38extern void i915_perf_unregister_sysfs_sklgt4(struct drm_i915_private *dev_priv);
39
40#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f940e486a62a..506ec32b9e53 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -312,16 +312,17 @@ static const struct intel_device_info intel_haswell_info = {
312 .has_full_48bit_ppgtt = 1, \ 312 .has_full_48bit_ppgtt = 1, \
313 .has_64bit_reloc = 1 313 .has_64bit_reloc = 1
314 314
315#define BDW_PLATFORM \
316 BDW_FEATURES, \
317 .gen = 8, \
318 .platform = INTEL_BROADWELL
319
315static const struct intel_device_info intel_broadwell_info = { 320static const struct intel_device_info intel_broadwell_info = {
316 BDW_FEATURES, 321 BDW_PLATFORM,
317 .gen = 8,
318 .platform = INTEL_BROADWELL,
319}; 322};
320 323
321static const struct intel_device_info intel_broadwell_gt3_info = { 324static const struct intel_device_info intel_broadwell_gt3_info = {
322 BDW_FEATURES, 325 BDW_PLATFORM,
323 .gen = 8,
324 .platform = INTEL_BROADWELL,
325 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 326 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
326}; 327};
327 328
@@ -347,22 +348,20 @@ static const struct intel_device_info intel_cherryview_info = {
347 CHV_COLORS, 348 CHV_COLORS,
348}; 349};
349 350
351#define SKL_PLATFORM \
352 BDW_FEATURES, \
353 .gen = 9, \
354 .platform = INTEL_SKYLAKE, \
355 .has_csr = 1, \
356 .has_guc = 1, \
357 .ddb_size = 896
358
350static const struct intel_device_info intel_skylake_info = { 359static const struct intel_device_info intel_skylake_info = {
351 BDW_FEATURES, 360 SKL_PLATFORM,
352 .platform = INTEL_SKYLAKE,
353 .gen = 9,
354 .has_csr = 1,
355 .has_guc = 1,
356 .ddb_size = 896,
357}; 361};
358 362
359static const struct intel_device_info intel_skylake_gt3_info = { 363static const struct intel_device_info intel_skylake_gt3_info = {
360 BDW_FEATURES, 364 SKL_PLATFORM,
361 .platform = INTEL_SKYLAKE,
362 .gen = 9,
363 .has_csr = 1,
364 .has_guc = 1,
365 .ddb_size = 896,
366 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 365 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
367}; 366};
368 367
@@ -401,28 +400,52 @@ static const struct intel_device_info intel_broxton_info = {
401static const struct intel_device_info intel_geminilake_info = { 400static const struct intel_device_info intel_geminilake_info = {
402 GEN9_LP_FEATURES, 401 GEN9_LP_FEATURES,
403 .platform = INTEL_GEMINILAKE, 402 .platform = INTEL_GEMINILAKE,
404 .is_alpha_support = 1,
405 .ddb_size = 1024, 403 .ddb_size = 1024,
406 .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } 404 .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
407}; 405};
408 406
407#define KBL_PLATFORM \
408 BDW_FEATURES, \
409 .gen = 9, \
410 .platform = INTEL_KABYLAKE, \
411 .has_csr = 1, \
412 .has_guc = 1, \
413 .ddb_size = 896
414
409static const struct intel_device_info intel_kabylake_info = { 415static const struct intel_device_info intel_kabylake_info = {
410 BDW_FEATURES, 416 KBL_PLATFORM,
411 .platform = INTEL_KABYLAKE,
412 .gen = 9,
413 .has_csr = 1,
414 .has_guc = 1,
415 .ddb_size = 896,
416}; 417};
417 418
418static const struct intel_device_info intel_kabylake_gt3_info = { 419static const struct intel_device_info intel_kabylake_gt3_info = {
420 KBL_PLATFORM,
421 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
422};
423
424#define CFL_PLATFORM \
425 .is_alpha_support = 1, \
426 BDW_FEATURES, \
427 .gen = 9, \
428 .platform = INTEL_COFFEELAKE, \
429 .has_csr = 1, \
430 .has_guc = 1, \
431 .ddb_size = 896
432
433static const struct intel_device_info intel_coffeelake_info = {
434 CFL_PLATFORM,
435};
436
437static const struct intel_device_info intel_coffeelake_gt3_info = {
438 CFL_PLATFORM,
439 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
440};
441
442static const struct intel_device_info intel_cannonlake_info = {
419 BDW_FEATURES, 443 BDW_FEATURES,
420 .platform = INTEL_KABYLAKE, 444 .is_alpha_support = 1,
421 .gen = 9, 445 .platform = INTEL_CANNONLAKE,
446 .gen = 10,
447 .ddb_size = 1024,
422 .has_csr = 1, 448 .has_csr = 1,
423 .has_guc = 1,
424 .ddb_size = 896,
425 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
426}; 449};
427 450
428/* 451/*
@@ -469,6 +492,10 @@ static const struct pci_device_id pciidlist[] = {
469 INTEL_KBL_GT2_IDS(&intel_kabylake_info), 492 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
470 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), 493 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
471 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), 494 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
495 INTEL_CFL_S_IDS(&intel_coffeelake_info),
496 INTEL_CFL_H_IDS(&intel_coffeelake_info),
497 INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info),
498 INTEL_CNL_IDS(&intel_cannonlake_info),
472 {0, 0, 0} 499 {0, 0, 0}
473}; 500};
474MODULE_DEVICE_TABLE(pci, pciidlist); 501MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 85269bcc8372..38c44407bafc 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,6 +196,15 @@
196 196
197#include "i915_drv.h" 197#include "i915_drv.h"
198#include "i915_oa_hsw.h" 198#include "i915_oa_hsw.h"
199#include "i915_oa_bdw.h"
200#include "i915_oa_chv.h"
201#include "i915_oa_sklgt2.h"
202#include "i915_oa_sklgt3.h"
203#include "i915_oa_sklgt4.h"
204#include "i915_oa_bxt.h"
205#include "i915_oa_kblgt2.h"
206#include "i915_oa_kblgt3.h"
207#include "i915_oa_glk.h"
199 208
200/* HW requires this to be a power of two, between 128k and 16M, though driver 209/* HW requires this to be a power of two, between 128k and 16M, though driver
201 * is currently generally designed assuming the largest 16M size is used such 210 * is currently generally designed assuming the largest 16M size is used such
@@ -215,7 +224,7 @@
215 * 224 *
216 * Although this can be observed explicitly while copying reports to userspace 225 * Although this can be observed explicitly while copying reports to userspace
217 * by checking for a zeroed report-id field in tail reports, we want to account 226 * by checking for a zeroed report-id field in tail reports, we want to account
218 * for this earlier, as part of the _oa_buffer_check to avoid lots of redundant 227 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
219 * read() attempts. 228 * read() attempts.
220 * 229 *
221 * In effect we define a tail pointer for reading that lags the real tail 230 * In effect we define a tail pointer for reading that lags the real tail
@@ -237,7 +246,7 @@
237 * indicates that an updated tail pointer is needed. 246 * indicates that an updated tail pointer is needed.
238 * 247 *
239 * Most of the implementation details for this workaround are in 248 * Most of the implementation details for this workaround are in
240 * gen7_oa_buffer_check_unlocked() and gen7_appand_oa_reports() 249 * oa_buffer_check_unlocked() and _append_oa_reports()
241 * 250 *
242 * Note for posterity: previously the driver used to define an effective tail 251 * Note for posterity: previously the driver used to define an effective tail
243 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 252 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
@@ -272,13 +281,22 @@ static u32 i915_perf_stream_paranoid = true;
272 281
273#define INVALID_CTX_ID 0xffffffff 282#define INVALID_CTX_ID 0xffffffff
274 283
284/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
285#define OAREPORT_REASON_MASK 0x3f
286#define OAREPORT_REASON_SHIFT 19
287#define OAREPORT_REASON_TIMER (1<<0)
288#define OAREPORT_REASON_CTX_SWITCH (1<<3)
289#define OAREPORT_REASON_CLK_RATIO (1<<5)
290
275 291
276/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 292/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
277 * 293 *
278 * 160ns is the smallest sampling period we can theoretically program the OA 294 * The highest sampling frequency we can theoretically program the OA unit
279 * unit with on Haswell, corresponding to 6.25MHz. 295 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
296 *
297 * Initialized just before we register the sysctl parameter.
280 */ 298 */
281static int oa_sample_rate_hard_limit = 6250000; 299static int oa_sample_rate_hard_limit;
282 300
283/* Theoretically we can program the OA unit to sample every 160ns but don't 301/* Theoretically we can program the OA unit to sample every 160ns but don't
284 * allow that by default unless root... 302 * allow that by default unless root...
@@ -303,6 +321,13 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
303 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 321 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
304}; 322};
305 323
324static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
325 [I915_OA_FORMAT_A12] = { 0, 64 },
326 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
327 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
328 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
329};
330
306#define SAMPLE_OA_REPORT (1<<0) 331#define SAMPLE_OA_REPORT (1<<0)
307 332
308/** 333/**
@@ -332,8 +357,20 @@ struct perf_open_properties {
332 int oa_period_exponent; 357 int oa_period_exponent;
333}; 358};
334 359
360static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
361{
362 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
363}
364
365static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
366{
367 u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
368
369 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
370}
371
335/** 372/**
336 * gen7_oa_buffer_check_unlocked - check for data and update tail ptr state 373 * oa_buffer_check_unlocked - check for data and update tail ptr state
337 * @dev_priv: i915 device instance 374 * @dev_priv: i915 device instance
338 * 375 *
339 * This is either called via fops (for blocking reads in user ctx) or the poll 376 * This is either called via fops (for blocking reads in user ctx) or the poll
@@ -356,12 +393,11 @@ struct perf_open_properties {
356 * 393 *
357 * Returns: %true if the OA buffer contains data, else %false 394 * Returns: %true if the OA buffer contains data, else %false
358 */ 395 */
359static bool gen7_oa_buffer_check_unlocked(struct drm_i915_private *dev_priv) 396static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
360{ 397{
361 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 398 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
362 unsigned long flags; 399 unsigned long flags;
363 unsigned int aged_idx; 400 unsigned int aged_idx;
364 u32 oastatus1;
365 u32 head, hw_tail, aged_tail, aging_tail; 401 u32 head, hw_tail, aged_tail, aging_tail;
366 u64 now; 402 u64 now;
367 403
@@ -381,8 +417,7 @@ static bool gen7_oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
381 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset; 417 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
382 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset; 418 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
383 419
384 oastatus1 = I915_READ(GEN7_OASTATUS1); 420 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
385 hw_tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
386 421
387 /* The tail pointer increases in 64 byte increments, 422 /* The tail pointer increases in 64 byte increments,
388 * not in report_size steps... 423 * not in report_size steps...
@@ -404,6 +439,7 @@ static bool gen7_oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
404 if (aging_tail != INVALID_TAIL_PTR && 439 if (aging_tail != INVALID_TAIL_PTR &&
405 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) > 440 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
406 OA_TAIL_MARGIN_NSEC)) { 441 OA_TAIL_MARGIN_NSEC)) {
442
407 aged_idx ^= 1; 443 aged_idx ^= 1;
408 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx; 444 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
409 445
@@ -553,6 +589,300 @@ static int append_oa_sample(struct i915_perf_stream *stream,
553 * 589 *
554 * Returns: 0 on success, negative error code on failure. 590 * Returns: 0 on success, negative error code on failure.
555 */ 591 */
592static int gen8_append_oa_reports(struct i915_perf_stream *stream,
593 char __user *buf,
594 size_t count,
595 size_t *offset)
596{
597 struct drm_i915_private *dev_priv = stream->dev_priv;
598 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
599 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
600 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
601 u32 mask = (OA_BUFFER_SIZE - 1);
602 size_t start_offset = *offset;
603 unsigned long flags;
604 unsigned int aged_tail_idx;
605 u32 head, tail;
606 u32 taken;
607 int ret = 0;
608
609 if (WARN_ON(!stream->enabled))
610 return -EIO;
611
612 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
613
614 head = dev_priv->perf.oa.oa_buffer.head;
615 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
616 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
617
618 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
619
620 /*
621 * An invalid tail pointer here means we're still waiting for the poll
622 * hrtimer callback to give us a pointer
623 */
624 if (tail == INVALID_TAIL_PTR)
625 return -EAGAIN;
626
627 /*
628 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
629 * while indexing relative to oa_buf_base.
630 */
631 head -= gtt_offset;
632 tail -= gtt_offset;
633
634 /*
635 * An out of bounds or misaligned head or tail pointer implies a driver
636 * bug since we validate + align the tail pointers we read from the
637 * hardware and we are in full control of the head pointer which should
638 * only be incremented by multiples of the report size (notably also
639 * all a power of two).
640 */
641 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
642 tail > OA_BUFFER_SIZE || tail % report_size,
643 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
644 head, tail))
645 return -EIO;
646
647
648 for (/* none */;
649 (taken = OA_TAKEN(tail, head));
650 head = (head + report_size) & mask) {
651 u8 *report = oa_buf_base + head;
652 u32 *report32 = (void *)report;
653 u32 ctx_id;
654 u32 reason;
655
656 /*
657 * All the report sizes factor neatly into the buffer
658 * size so we never expect to see a report split
659 * between the beginning and end of the buffer.
660 *
661 * Given the initial alignment check a misalignment
662 * here would imply a driver bug that would result
663 * in an overrun.
664 */
665 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
666 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
667 break;
668 }
669
670 /*
671 * The reason field includes flags identifying what
672 * triggered this specific report (mostly timer
673 * triggered or e.g. due to a context switch).
674 *
675 * This field is never expected to be zero so we can
676 * check that the report isn't invalid before copying
677 * it to userspace...
678 */
679 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
680 OAREPORT_REASON_MASK);
681 if (reason == 0) {
682 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
683 DRM_NOTE("Skipping spurious, invalid OA report\n");
684 continue;
685 }
686
687 /*
688 * XXX: Just keep the lower 21 bits for now since I'm not
689 * entirely sure if the HW touches any of the higher bits in
690 * this field
691 */
692 ctx_id = report32[2] & 0x1fffff;
693
694 /*
695 * Squash whatever is in the CTX_ID field if it's marked as
696 * invalid to be sure we avoid false-positive, single-context
697 * filtering below...
698 *
699 * Note: that we don't clear the valid_ctx_bit so userspace can
700 * understand that the ID has been squashed by the kernel.
701 */
702 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
703 ctx_id = report32[2] = INVALID_CTX_ID;
704
705 /*
706 * NB: For Gen 8 the OA unit no longer supports clock gating
707 * off for a specific context and the kernel can't securely
708 * stop the counters from updating as system-wide / global
709 * values.
710 *
711 * Automatic reports now include a context ID so reports can be
712 * filtered on the cpu but it's not worth trying to
713 * automatically subtract/hide counter progress for other
714 * contexts while filtering since we can't stop userspace
715 * issuing MI_REPORT_PERF_COUNT commands which would still
716 * provide a side-band view of the real values.
717 *
718 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
719 * to normalize counters for a single filtered context then it
720 * needs be forwarded bookend context-switch reports so that it
721 * can track switches in between MI_REPORT_PERF_COUNT commands
722 * and can itself subtract/ignore the progress of counters
723 * associated with other contexts. Note that the hardware
724 * automatically triggers reports when switching to a new
725 * context which are tagged with the ID of the newly active
726 * context. To avoid the complexity (and likely fragility) of
727 * reading ahead while parsing reports to try and minimize
728 * forwarding redundant context switch reports (i.e. between
729 * other, unrelated contexts) we simply elect to forward them
730 * all.
731 *
732 * We don't rely solely on the reason field to identify context
733 * switches since it's not-uncommon for periodic samples to
734 * identify a switch before any 'context switch' report.
735 */
736 if (!dev_priv->perf.oa.exclusive_stream->ctx ||
737 dev_priv->perf.oa.specific_ctx_id == ctx_id ||
738 (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
739 dev_priv->perf.oa.specific_ctx_id) ||
740 reason & OAREPORT_REASON_CTX_SWITCH) {
741
742 /*
743 * While filtering for a single context we avoid
744 * leaking the IDs of other contexts.
745 */
746 if (dev_priv->perf.oa.exclusive_stream->ctx &&
747 dev_priv->perf.oa.specific_ctx_id != ctx_id) {
748 report32[2] = INVALID_CTX_ID;
749 }
750
751 ret = append_oa_sample(stream, buf, count, offset,
752 report);
753 if (ret)
754 break;
755
756 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
757 }
758
759 /*
760 * The above reason field sanity check is based on
761 * the assumption that the OA buffer is initially
762 * zeroed and we reset the field after copying so the
763 * check is still meaningful once old reports start
764 * being overwritten.
765 */
766 report32[0] = 0;
767 }
768
769 if (start_offset != *offset) {
770 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
771
772 /*
773 * We removed the gtt_offset for the copy loop above, indexing
774 * relative to oa_buf_base so put back here...
775 */
776 head += gtt_offset;
777
778 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
779 dev_priv->perf.oa.oa_buffer.head = head;
780
781 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
782 }
783
784 return ret;
785}
786
787/**
788 * gen8_oa_read - copy status records then buffered OA reports
789 * @stream: An i915-perf stream opened for OA metrics
790 * @buf: destination buffer given by userspace
791 * @count: the number of bytes userspace wants to read
792 * @offset: (inout): the current position for writing into @buf
793 *
794 * Checks OA unit status registers and if necessary appends corresponding
795 * status records for userspace (such as for a buffer full condition) and then
796 * initiate appending any buffered OA reports.
797 *
798 * Updates @offset according to the number of bytes successfully copied into
799 * the userspace buffer.
800 *
801 * NB: some data may be successfully copied to the userspace buffer
802 * even if an error is returned, and this is reflected in the
803 * updated @offset.
804 *
805 * Returns: zero on success or a negative error code
806 */
807static int gen8_oa_read(struct i915_perf_stream *stream,
808 char __user *buf,
809 size_t count,
810 size_t *offset)
811{
812 struct drm_i915_private *dev_priv = stream->dev_priv;
813 u32 oastatus;
814 int ret;
815
816 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
817 return -EIO;
818
819 oastatus = I915_READ(GEN8_OASTATUS);
820
821 /*
822 * We treat OABUFFER_OVERFLOW as a significant error:
823 *
824 * Although theoretically we could handle this more gracefully
825 * sometimes, some Gens don't correctly suppress certain
826 * automatically triggered reports in this condition and so we
827 * have to assume that old reports are now being trampled
828 * over.
829 *
830 * Considering how we don't currently give userspace control
831 * over the OA buffer size and always configure a large 16MB
832 * buffer, then a buffer overflow does anyway likely indicate
833 * that something has gone quite badly wrong.
834 */
835 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
836 ret = append_oa_status(stream, buf, count, offset,
837 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
838 if (ret)
839 return ret;
840
841 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
842 dev_priv->perf.oa.period_exponent);
843
844 dev_priv->perf.oa.ops.oa_disable(dev_priv);
845 dev_priv->perf.oa.ops.oa_enable(dev_priv);
846
847 /*
848 * Note: .oa_enable() is expected to re-init the oabuffer and
849 * reset GEN8_OASTATUS for us
850 */
851 oastatus = I915_READ(GEN8_OASTATUS);
852 }
853
854 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
855 ret = append_oa_status(stream, buf, count, offset,
856 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
857 if (ret)
858 return ret;
859 I915_WRITE(GEN8_OASTATUS,
860 oastatus & ~GEN8_OASTATUS_REPORT_LOST);
861 }
862
863 return gen8_append_oa_reports(stream, buf, count, offset);
864}
865
866/**
867 * Copies all buffered OA reports into userspace read() buffer.
868 * @stream: An i915-perf stream opened for OA metrics
869 * @buf: destination buffer given by userspace
870 * @count: the number of bytes userspace wants to read
871 * @offset: (inout): the current position for writing into @buf
872 *
873 * Notably any error condition resulting in a short read (-%ENOSPC or
874 * -%EFAULT) will be returned even though one or more records may
875 * have been successfully copied. In this case it's up to the caller
876 * to decide if the error should be squashed before returning to
877 * userspace.
878 *
879 * Note: reports are consumed from the head, and appended to the
880 * tail, so the tail chases the head?... If you think that's mad
881 * and back-to-front you're not alone, but this follows the
882 * Gen PRM naming convention.
883 *
884 * Returns: 0 on success, negative error code on failure.
885 */
556static int gen7_append_oa_reports(struct i915_perf_stream *stream, 886static int gen7_append_oa_reports(struct i915_perf_stream *stream,
557 char __user *buf, 887 char __user *buf,
558 size_t count, 888 size_t count,
@@ -732,7 +1062,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
732 if (ret) 1062 if (ret)
733 return ret; 1063 return ret;
734 1064
735 DRM_DEBUG("OA buffer overflow: force restart\n"); 1065 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1066 dev_priv->perf.oa.period_exponent);
736 1067
737 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1068 dev_priv->perf.oa.ops.oa_disable(dev_priv);
738 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1069 dev_priv->perf.oa.ops.oa_enable(dev_priv);
@@ -775,7 +1106,7 @@ static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
775 return -EIO; 1106 return -EIO;
776 1107
777 return wait_event_interruptible(dev_priv->perf.oa.poll_wq, 1108 return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
778 dev_priv->perf.oa.ops.oa_buffer_check(dev_priv)); 1109 oa_buffer_check_unlocked(dev_priv));
779} 1110}
780 1111
781/** 1112/**
@@ -832,30 +1163,38 @@ static int i915_oa_read(struct i915_perf_stream *stream,
832static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1163static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
833{ 1164{
834 struct drm_i915_private *dev_priv = stream->dev_priv; 1165 struct drm_i915_private *dev_priv = stream->dev_priv;
835 struct intel_engine_cs *engine = dev_priv->engine[RCS];
836 struct intel_ring *ring;
837 int ret;
838 1166
839 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1167 if (i915.enable_execlists)
840 if (ret) 1168 dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
841 return ret; 1169 else {
1170 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1171 struct intel_ring *ring;
1172 int ret;
842 1173
843 /* As the ID is the gtt offset of the context's vma we pin 1174 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
844 * the vma to ensure the ID remains fixed. 1175 if (ret)
845 * 1176 return ret;
846 * NB: implied RCS engine...
847 */
848 ring = engine->context_pin(engine, stream->ctx);
849 mutex_unlock(&dev_priv->drm.struct_mutex);
850 if (IS_ERR(ring))
851 return PTR_ERR(ring);
852 1177
853 /* Explicitly track the ID (instead of calling i915_ggtt_offset() 1178 /*
854 * on the fly) considering the difference with gen8+ and 1179 * As the ID is the gtt offset of the context's vma we
855 * execlists 1180 * pin the vma to ensure the ID remains fixed.
856 */ 1181 *
857 dev_priv->perf.oa.specific_ctx_id = 1182 * NB: implied RCS engine...
858 i915_ggtt_offset(stream->ctx->engine[engine->id].state); 1183 */
1184 ring = engine->context_pin(engine, stream->ctx);
1185 mutex_unlock(&dev_priv->drm.struct_mutex);
1186 if (IS_ERR(ring))
1187 return PTR_ERR(ring);
1188
1189
1190 /*
1191 * Explicitly track the ID (instead of calling
1192 * i915_ggtt_offset() on the fly) considering the difference
1193 * with gen8+ and execlists
1194 */
1195 dev_priv->perf.oa.specific_ctx_id =
1196 i915_ggtt_offset(stream->ctx->engine[engine->id].state);
1197 }
859 1198
860 return 0; 1199 return 0;
861} 1200}
@@ -870,14 +1209,19 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
870static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1209static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
871{ 1210{
872 struct drm_i915_private *dev_priv = stream->dev_priv; 1211 struct drm_i915_private *dev_priv = stream->dev_priv;
873 struct intel_engine_cs *engine = dev_priv->engine[RCS];
874 1212
875 mutex_lock(&dev_priv->drm.struct_mutex); 1213 if (i915.enable_execlists) {
1214 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1215 } else {
1216 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1217
1218 mutex_lock(&dev_priv->drm.struct_mutex);
876 1219
877 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1220 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
878 engine->context_unpin(engine, stream->ctx); 1221 engine->context_unpin(engine, stream->ctx);
879 1222
880 mutex_unlock(&dev_priv->drm.struct_mutex); 1223 mutex_unlock(&dev_priv->drm.struct_mutex);
1224 }
881} 1225}
882 1226
883static void 1227static void
@@ -901,6 +1245,12 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
901 1245
902 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); 1246 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
903 1247
1248 /*
1249 * Unset exclusive_stream first, it might be checked while
1250 * disabling the metric set on gen8+.
1251 */
1252 dev_priv->perf.oa.exclusive_stream = NULL;
1253
904 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1254 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
905 1255
906 free_oa_buffer(dev_priv); 1256 free_oa_buffer(dev_priv);
@@ -911,8 +1261,6 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
911 if (stream->ctx) 1261 if (stream->ctx)
912 oa_put_render_ctx_id(stream); 1262 oa_put_render_ctx_id(stream);
913 1263
914 dev_priv->perf.oa.exclusive_stream = NULL;
915
916 if (dev_priv->perf.oa.spurious_report_rs.missed) { 1264 if (dev_priv->perf.oa.spurious_report_rs.missed) {
917 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1265 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
918 dev_priv->perf.oa.spurious_report_rs.missed); 1266 dev_priv->perf.oa.spurious_report_rs.missed);
@@ -967,6 +1315,65 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
967 dev_priv->perf.oa.pollin = false; 1315 dev_priv->perf.oa.pollin = false;
968} 1316}
969 1317
1318static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
1319{
1320 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1321 unsigned long flags;
1322
1323 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1324
1325 I915_WRITE(GEN8_OASTATUS, 0);
1326 I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
1327 dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1328
1329 I915_WRITE(GEN8_OABUFFER_UDW, 0);
1330
1331 /*
1332 * PRM says:
1333 *
1334 * "This MMIO must be set before the OATAILPTR
1335 * register and after the OAHEADPTR register. This is
1336 * to enable proper functionality of the overflow
1337 * bit."
1338 */
1339 I915_WRITE(GEN8_OABUFFER, gtt_offset |
1340 OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
1341 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1342
1343 /* Mark that we need updated tail pointers to read from... */
1344 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1345 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1346
1347 /*
1348 * Reset state used to recognise context switches, affecting which
1349 * reports we will forward to userspace while filtering for a single
1350 * context.
1351 */
1352 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
1353
1354 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1355
1356 /*
1357 * NB: although the OA buffer will initially be allocated
1358 * zeroed via shmfs (and so this memset is redundant when
1359 * first allocating), we may re-init the OA buffer, either
1360 * when re-enabling a stream or in error/reset paths.
1361 *
1362 * The reason we clear the buffer for each re-init is for the
1363 * sanity check in gen8_append_oa_reports() that looks at the
1364 * reason field to make sure it's non-zero which relies on
1365 * the assumption that new reports are being written to zeroed
1366 * memory...
1367 */
1368 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1369
1370 /*
1371 * Maybe make ->pollin per-stream state if we support multiple
1372 * concurrent streams in the future.
1373 */
1374 dev_priv->perf.oa.pollin = false;
1375}
1376
970static int alloc_oa_buffer(struct drm_i915_private *dev_priv) 1377static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
971{ 1378{
972 struct drm_i915_gem_object *bo; 1379 struct drm_i915_gem_object *bo;
@@ -1047,6 +1454,7 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
1047static int hsw_enable_metric_set(struct drm_i915_private *dev_priv) 1454static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
1048{ 1455{
1049 int ret = i915_oa_select_metric_set_hsw(dev_priv); 1456 int ret = i915_oa_select_metric_set_hsw(dev_priv);
1457 int i;
1050 1458
1051 if (ret) 1459 if (ret)
1052 return ret; 1460 return ret;
@@ -1068,8 +1476,10 @@ static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
1068 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | 1476 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1069 GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1477 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1070 1478
1071 config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs, 1479 for (i = 0; i < dev_priv->perf.oa.n_mux_configs; i++) {
1072 dev_priv->perf.oa.mux_regs_len); 1480 config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs[i],
1481 dev_priv->perf.oa.mux_regs_lens[i]);
1482 }
1073 1483
1074 /* It apparently takes a fairly long time for a new MUX 1484 /* It apparently takes a fairly long time for a new MUX
1075 * configuration to be be applied after these register writes. 1485 * configuration to be be applied after these register writes.
@@ -1111,9 +1521,337 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
1111 ~GT_NOA_ENABLE)); 1521 ~GT_NOA_ENABLE));
1112} 1522}
1113 1523
1114static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv) 1524/*
1525 * NB: It must always remain pointer safe to run this even if the OA unit
1526 * has been disabled.
1527 *
1528 * It's fine to put out-of-date values into these per-context registers
1529 * in the case that the OA unit has been disabled.
1530 */
1531static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
1532 u32 *reg_state)
1533{
1534 struct drm_i915_private *dev_priv = ctx->i915;
1535 const struct i915_oa_reg *flex_regs = dev_priv->perf.oa.flex_regs;
1536 int n_flex_regs = dev_priv->perf.oa.flex_regs_len;
1537 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
1538 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
1539 /* The MMIO offsets for Flex EU registers aren't contiguous */
1540 u32 flex_mmio[] = {
1541 i915_mmio_reg_offset(EU_PERF_CNTL0),
1542 i915_mmio_reg_offset(EU_PERF_CNTL1),
1543 i915_mmio_reg_offset(EU_PERF_CNTL2),
1544 i915_mmio_reg_offset(EU_PERF_CNTL3),
1545 i915_mmio_reg_offset(EU_PERF_CNTL4),
1546 i915_mmio_reg_offset(EU_PERF_CNTL5),
1547 i915_mmio_reg_offset(EU_PERF_CNTL6),
1548 };
1549 int i;
1550
1551 reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1552 reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
1553 GEN8_OA_TIMER_PERIOD_SHIFT) |
1554 (dev_priv->perf.oa.periodic ?
1555 GEN8_OA_TIMER_ENABLE : 0) |
1556 GEN8_OA_COUNTER_RESUME;
1557
1558 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1559 u32 state_offset = ctx_flexeu0 + i * 2;
1560 u32 mmio = flex_mmio[i];
1561
1562 /*
1563 * This arbitrary default will select the 'EU FPU0 Pipeline
1564 * Active' event. In the future it's anticipated that there
1565 * will be an explicit 'No Event' we can select, but not yet...
1566 */
1567 u32 value = 0;
1568 int j;
1569
1570 for (j = 0; j < n_flex_regs; j++) {
1571 if (i915_mmio_reg_offset(flex_regs[j].addr) == mmio) {
1572 value = flex_regs[j].value;
1573 break;
1574 }
1575 }
1576
1577 reg_state[state_offset] = mmio;
1578 reg_state[state_offset+1] = value;
1579 }
1580}
1581
1582/*
1583 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
1584 * is only used by the kernel context.
1585 */
1586static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
1587{
1588 struct drm_i915_private *dev_priv = req->i915;
1589 const struct i915_oa_reg *flex_regs = dev_priv->perf.oa.flex_regs;
1590 int n_flex_regs = dev_priv->perf.oa.flex_regs_len;
1591 /* The MMIO offsets for Flex EU registers aren't contiguous */
1592 u32 flex_mmio[] = {
1593 i915_mmio_reg_offset(EU_PERF_CNTL0),
1594 i915_mmio_reg_offset(EU_PERF_CNTL1),
1595 i915_mmio_reg_offset(EU_PERF_CNTL2),
1596 i915_mmio_reg_offset(EU_PERF_CNTL3),
1597 i915_mmio_reg_offset(EU_PERF_CNTL4),
1598 i915_mmio_reg_offset(EU_PERF_CNTL5),
1599 i915_mmio_reg_offset(EU_PERF_CNTL6),
1600 };
1601 u32 *cs;
1602 int i;
1603
1604 cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
1605 if (IS_ERR(cs))
1606 return PTR_ERR(cs);
1607
1608 *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
1609
1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1612 (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
1613 GEN8_OA_COUNTER_RESUME;
1614
1615 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1616 u32 mmio = flex_mmio[i];
1617
1618 /*
1619 * This arbitrary default will select the 'EU FPU0 Pipeline
1620 * Active' event. In the future it's anticipated that there
1621 * will be an explicit 'No Event' we can select, but not
1622 * yet...
1623 */
1624 u32 value = 0;
1625 int j;
1626
1627 for (j = 0; j < n_flex_regs; j++) {
1628 if (i915_mmio_reg_offset(flex_regs[j].addr) == mmio) {
1629 value = flex_regs[j].value;
1630 break;
1631 }
1632 }
1633
1634 *cs++ = mmio;
1635 *cs++ = value;
1636 }
1637
1638 *cs++ = MI_NOOP;
1639 intel_ring_advance(req, cs);
1640
1641 return 0;
1642}
1643
1644static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv)
1645{
1646 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1647 struct i915_gem_timeline *timeline;
1648 struct drm_i915_gem_request *req;
1649 int ret;
1650
1651 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1652
1653 i915_gem_retire_requests(dev_priv);
1654
1655 req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
1656 if (IS_ERR(req))
1657 return PTR_ERR(req);
1658
1659 ret = gen8_emit_oa_config(req);
1660 if (ret) {
1661 i915_add_request(req);
1662 return ret;
1663 }
1664
1665 /* Queue this switch after all other activity */
1666 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
1667 struct drm_i915_gem_request *prev;
1668 struct intel_timeline *tl;
1669
1670 tl = &timeline->engine[engine->id];
1671 prev = i915_gem_active_raw(&tl->last_request,
1672 &dev_priv->drm.struct_mutex);
1673 if (prev)
1674 i915_sw_fence_await_sw_fence_gfp(&req->submit,
1675 &prev->submit,
1676 GFP_KERNEL);
1677 }
1678
1679 ret = i915_switch_context(req);
1680 i915_add_request(req);
1681
1682 return ret;
1683}
1684
1685/*
1686 * Manages updating the per-context aspects of the OA stream
1687 * configuration across all contexts.
1688 *
1689 * The awkward consideration here is that OACTXCONTROL controls the
1690 * exponent for periodic sampling which is primarily used for system
1691 * wide profiling where we'd like a consistent sampling period even in
1692 * the face of context switches.
1693 *
1694 * Our approach of updating the register state context (as opposed to
1695 * say using a workaround batch buffer) ensures that the hardware
1696 * won't automatically reload an out-of-date timer exponent even
1697 * transiently before a WA BB could be parsed.
1698 *
1699 * This function needs to:
1700 * - Ensure the currently running context's per-context OA state is
1701 * updated
1702 * - Ensure that all existing contexts will have the correct per-context
1703 * OA state if they are scheduled for use.
1704 * - Ensure any new contexts will be initialized with the correct
1705 * per-context OA state.
1706 *
1707 * Note: it's only the RCS/Render context that has any OA state.
1708 */
1709static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1710 bool interruptible)
1711{
1712 struct i915_gem_context *ctx;
1713 int ret;
1714 unsigned int wait_flags = I915_WAIT_LOCKED;
1715
1716 if (interruptible) {
1717 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1718 if (ret)
1719 return ret;
1720
1721 wait_flags |= I915_WAIT_INTERRUPTIBLE;
1722 } else {
1723 mutex_lock(&dev_priv->drm.struct_mutex);
1724 }
1725
1726 /* Switch away from any user context. */
1727 ret = gen8_switch_to_updated_kernel_context(dev_priv);
1728 if (ret)
1729 goto out;
1730
1731 /*
1732 * The OA register config is setup through the context image. This image
1733 * might be written to by the GPU on context switch (in particular on
1734 * lite-restore). This means we can't safely update a context's image,
1735 * if this context is scheduled/submitted to run on the GPU.
1736 *
1737 * We could emit the OA register config through the batch buffer but
1738 * this might leave small interval of time where the OA unit is
1739 * configured at an invalid sampling period.
1740 *
1741 * So far the best way to work around this issue seems to be draining
1742 * the GPU from any submitted work.
1743 */
1744 ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
1745 if (ret)
1746 goto out;
1747
1748 /* Update all contexts now that we've stalled the submission. */
1749 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1750 struct intel_context *ce = &ctx->engine[RCS];
1751 u32 *regs;
1752
1753 /* OA settings will be set upon first use */
1754 if (!ce->state)
1755 continue;
1756
1757 regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
1758 if (IS_ERR(regs)) {
1759 ret = PTR_ERR(regs);
1760 goto out;
1761 }
1762
1763 ce->state->obj->mm.dirty = true;
1764 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
1765
1766 gen8_update_reg_state_unlocked(ctx, regs);
1767
1768 i915_gem_object_unpin_map(ce->state->obj);
1769 }
1770
1771 out:
1772 mutex_unlock(&dev_priv->drm.struct_mutex);
1773
1774 return ret;
1775}
1776
1777static int gen8_enable_metric_set(struct drm_i915_private *dev_priv)
1115{ 1778{
1116 lockdep_assert_held(&dev_priv->perf.hook_lock); 1779 int ret = dev_priv->perf.oa.ops.select_metric_set(dev_priv);
1780 int i;
1781
1782 if (ret)
1783 return ret;
1784
1785 /*
1786 * We disable slice/unslice clock ratio change reports on SKL since
1787 * they are too noisy. The HW generates a lot of redundant reports
1788 * where the ratio hasn't really changed causing a lot of redundant
1789 * work to processes and increasing the chances we'll hit buffer
1790 * overruns.
1791 *
1792 * Although we don't currently use the 'disable overrun' OABUFFER
1793 * feature it's worth noting that clock ratio reports have to be
1794 * disabled before considering to use that feature since the HW doesn't
1795 * correctly block these reports.
1796 *
1797 * Currently none of the high-level metrics we have depend on knowing
1798 * this ratio to normalize.
1799 *
1800 * Note: This register is not power context saved and restored, but
1801 * that's OK considering that we disable RC6 while the OA unit is
1802 * enabled.
1803 *
1804 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
1805 * be read back from automatically triggered reports, as part of the
1806 * RPT_ID field.
1807 */
1808 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
1809 IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
1810 I915_WRITE(GEN8_OA_DEBUG,
1811 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1812 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
1813 }
1814
1815 /*
1816 * Update all contexts prior writing the mux configurations as we need
1817 * to make sure all slices/subslices are ON before writing to NOA
1818 * registers.
1819 */
1820 ret = gen8_configure_all_contexts(dev_priv, true);
1821 if (ret)
1822 return ret;
1823
1824 I915_WRITE(GDT_CHICKEN_BITS, 0xA0);
1825 for (i = 0; i < dev_priv->perf.oa.n_mux_configs; i++) {
1826 config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs[i],
1827 dev_priv->perf.oa.mux_regs_lens[i]);
1828 }
1829 I915_WRITE(GDT_CHICKEN_BITS, 0x80);
1830
1831 config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs,
1832 dev_priv->perf.oa.b_counter_regs_len);
1833
1834 return 0;
1835}
1836
1837static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1838{
1839 /* Reset all contexts' slices/subslices configurations. */
1840 gen8_configure_all_contexts(dev_priv, false);
1841}
1842
1843static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1844{
1845 /*
1846 * Reset buf pointers so we don't forward reports from before now.
1847 *
1848 * Think carefully if considering trying to avoid this, since it
1849 * also ensures status flags and the buffer itself are cleared
1850 * in error paths, and we have checks for invalid reports based
1851 * on the assumption that certain fields are written to zeroed
1852 * memory which this helps maintains.
1853 */
1854 gen7_init_oa_buffer(dev_priv);
1117 1855
1118 if (dev_priv->perf.oa.exclusive_stream->enabled) { 1856 if (dev_priv->perf.oa.exclusive_stream->enabled) {
1119 struct i915_gem_context *ctx = 1857 struct i915_gem_context *ctx =
@@ -1136,11 +1874,12 @@ static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
1136 I915_WRITE(GEN7_OACONTROL, 0); 1874 I915_WRITE(GEN7_OACONTROL, 0);
1137} 1875}
1138 1876
1139static void gen7_oa_enable(struct drm_i915_private *dev_priv) 1877static void gen8_oa_enable(struct drm_i915_private *dev_priv)
1140{ 1878{
1141 unsigned long flags; 1879 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1142 1880
1143 /* Reset buf pointers so we don't forward reports from before now. 1881 /*
1882 * Reset buf pointers so we don't forward reports from before now.
1144 * 1883 *
1145 * Think carefully if considering trying to avoid this, since it 1884 * Think carefully if considering trying to avoid this, since it
1146 * also ensures status flags and the buffer itself are cleared 1885 * also ensures status flags and the buffer itself are cleared
@@ -1148,11 +1887,16 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1148 * on the assumption that certain fields are written to zeroed 1887 * on the assumption that certain fields are written to zeroed
1149 * memory which this helps maintains. 1888 * memory which this helps maintains.
1150 */ 1889 */
1151 gen7_init_oa_buffer(dev_priv); 1890 gen8_init_oa_buffer(dev_priv);
1152 1891
1153 spin_lock_irqsave(&dev_priv->perf.hook_lock, flags); 1892 /*
1154 gen7_update_oacontrol_locked(dev_priv); 1893 * Note: we don't rely on the hardware to perform single context
1155 spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags); 1894 * filtering and instead filter on the cpu based on the context-id
1895 * field of reports
1896 */
1897 I915_WRITE(GEN8_OACONTROL, (report_format <<
1898 GEN8_OA_REPORT_FORMAT_SHIFT) |
1899 GEN8_OA_COUNTER_ENABLE);
1156} 1900}
1157 1901
1158/** 1902/**
@@ -1181,6 +1925,11 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
1181 I915_WRITE(GEN7_OACONTROL, 0); 1925 I915_WRITE(GEN7_OACONTROL, 0);
1182} 1926}
1183 1927
1928static void gen8_oa_disable(struct drm_i915_private *dev_priv)
1929{
1930 I915_WRITE(GEN8_OACONTROL, 0);
1931}
1932
1184/** 1933/**
1185 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 1934 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
1186 * @stream: An i915 perf stream opened for OA metrics 1935 * @stream: An i915 perf stream opened for OA metrics
@@ -1359,6 +2108,21 @@ err_oa_buf_alloc:
1359 return ret; 2108 return ret;
1360} 2109}
1361 2110
2111void i915_oa_init_reg_state(struct intel_engine_cs *engine,
2112 struct i915_gem_context *ctx,
2113 u32 *reg_state)
2114{
2115 struct drm_i915_private *dev_priv = engine->i915;
2116
2117 if (engine->id != RCS)
2118 return;
2119
2120 if (!dev_priv->perf.initialized)
2121 return;
2122
2123 gen8_update_reg_state_unlocked(ctx, reg_state);
2124}
2125
1362/** 2126/**
1363 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation 2127 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
1364 * @stream: An i915 perf stream 2128 * @stream: An i915 perf stream
@@ -1484,7 +2248,7 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
1484 container_of(hrtimer, typeof(*dev_priv), 2248 container_of(hrtimer, typeof(*dev_priv),
1485 perf.oa.poll_check_timer); 2249 perf.oa.poll_check_timer);
1486 2250
1487 if (dev_priv->perf.oa.ops.oa_buffer_check(dev_priv)) { 2251 if (oa_buffer_check_unlocked(dev_priv)) {
1488 dev_priv->perf.oa.pollin = true; 2252 dev_priv->perf.oa.pollin = true;
1489 wake_up(&dev_priv->perf.oa.poll_wq); 2253 wake_up(&dev_priv->perf.oa.poll_wq);
1490 } 2254 }
@@ -1773,6 +2537,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1773 struct i915_gem_context *specific_ctx = NULL; 2537 struct i915_gem_context *specific_ctx = NULL;
1774 struct i915_perf_stream *stream = NULL; 2538 struct i915_perf_stream *stream = NULL;
1775 unsigned long f_flags = 0; 2539 unsigned long f_flags = 0;
2540 bool privileged_op = true;
1776 int stream_fd; 2541 int stream_fd;
1777 int ret; 2542 int ret;
1778 2543
@@ -1790,12 +2555,29 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1790 } 2555 }
1791 } 2556 }
1792 2557
2558 /*
2559 * On Haswell the OA unit supports clock gating off for a specific
2560 * context and in this mode there's no visibility of metrics for the
2561 * rest of the system, which we consider acceptable for a
2562 * non-privileged client.
2563 *
2564 * For Gen8+ the OA unit no longer supports clock gating off for a
2565 * specific context and the kernel can't securely stop the counters
2566 * from updating as system-wide / global values. Even though we can
2567 * filter reports based on the included context ID we can't block
2568 * clients from seeing the raw / global counter values via
2569 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
2570 * enable the OA unit by default.
2571 */
2572 if (IS_HASWELL(dev_priv) && specific_ctx)
2573 privileged_op = false;
2574
1793 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 2575 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
1794 * we check a dev.i915.perf_stream_paranoid sysctl option 2576 * we check a dev.i915.perf_stream_paranoid sysctl option
1795 * to determine if it's ok to access system wide OA counters 2577 * to determine if it's ok to access system wide OA counters
1796 * without CAP_SYS_ADMIN privileges. 2578 * without CAP_SYS_ADMIN privileges.
1797 */ 2579 */
1798 if (!specific_ctx && 2580 if (privileged_op &&
1799 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 2581 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
1800 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); 2582 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
1801 ret = -EACCES; 2583 ret = -EACCES;
@@ -1856,6 +2638,12 @@ err:
1856 return ret; 2638 return ret;
1857} 2639}
1858 2640
2641static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2642{
2643 return div_u64(1000000000ULL * (2ULL << exponent),
2644 dev_priv->perf.oa.timestamp_frequency);
2645}
2646
1859/** 2647/**
1860 * read_properties_unlocked - validate + copy userspace stream open properties 2648 * read_properties_unlocked - validate + copy userspace stream open properties
1861 * @dev_priv: i915 device instance 2649 * @dev_priv: i915 device instance
@@ -1952,16 +2740,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1952 } 2740 }
1953 2741
1954 /* Theoretically we can program the OA unit to sample 2742 /* Theoretically we can program the OA unit to sample
1955 * every 160ns but don't allow that by default unless 2743 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
1956 * root. 2744 * for BXT. We don't allow such high sampling
1957 * 2745 * frequencies by default unless root.
1958 * On Haswell the period is derived from the exponent
1959 * as:
1960 *
1961 * period = 80ns * 2^(exponent + 1)
1962 */ 2746 */
2747
1963 BUILD_BUG_ON(sizeof(oa_period) != 8); 2748 BUILD_BUG_ON(sizeof(oa_period) != 8);
1964 oa_period = 80ull * (2ull << value); 2749 oa_period = oa_exponent_to_ns(dev_priv, value);
1965 2750
1966 /* This check is primarily to ensure that oa_period <= 2751 /* This check is primarily to ensure that oa_period <=
1967 * UINT32_MAX (before passing to do_div which only 2752 * UINT32_MAX (before passing to do_div which only
@@ -2067,9 +2852,6 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2067 */ 2852 */
2068void i915_perf_register(struct drm_i915_private *dev_priv) 2853void i915_perf_register(struct drm_i915_private *dev_priv)
2069{ 2854{
2070 if (!IS_HASWELL(dev_priv))
2071 return;
2072
2073 if (!dev_priv->perf.initialized) 2855 if (!dev_priv->perf.initialized)
2074 return; 2856 return;
2075 2857
@@ -2085,11 +2867,50 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
2085 if (!dev_priv->perf.metrics_kobj) 2867 if (!dev_priv->perf.metrics_kobj)
2086 goto exit; 2868 goto exit;
2087 2869
2088 if (i915_perf_register_sysfs_hsw(dev_priv)) { 2870 if (IS_HASWELL(dev_priv)) {
2089 kobject_put(dev_priv->perf.metrics_kobj); 2871 if (i915_perf_register_sysfs_hsw(dev_priv))
2090 dev_priv->perf.metrics_kobj = NULL; 2872 goto sysfs_error;
2873 } else if (IS_BROADWELL(dev_priv)) {
2874 if (i915_perf_register_sysfs_bdw(dev_priv))
2875 goto sysfs_error;
2876 } else if (IS_CHERRYVIEW(dev_priv)) {
2877 if (i915_perf_register_sysfs_chv(dev_priv))
2878 goto sysfs_error;
2879 } else if (IS_SKYLAKE(dev_priv)) {
2880 if (IS_SKL_GT2(dev_priv)) {
2881 if (i915_perf_register_sysfs_sklgt2(dev_priv))
2882 goto sysfs_error;
2883 } else if (IS_SKL_GT3(dev_priv)) {
2884 if (i915_perf_register_sysfs_sklgt3(dev_priv))
2885 goto sysfs_error;
2886 } else if (IS_SKL_GT4(dev_priv)) {
2887 if (i915_perf_register_sysfs_sklgt4(dev_priv))
2888 goto sysfs_error;
2889 } else
2890 goto sysfs_error;
2891 } else if (IS_BROXTON(dev_priv)) {
2892 if (i915_perf_register_sysfs_bxt(dev_priv))
2893 goto sysfs_error;
2894 } else if (IS_KABYLAKE(dev_priv)) {
2895 if (IS_KBL_GT2(dev_priv)) {
2896 if (i915_perf_register_sysfs_kblgt2(dev_priv))
2897 goto sysfs_error;
2898 } else if (IS_KBL_GT3(dev_priv)) {
2899 if (i915_perf_register_sysfs_kblgt3(dev_priv))
2900 goto sysfs_error;
2901 } else
2902 goto sysfs_error;
2903 } else if (IS_GEMINILAKE(dev_priv)) {
2904 if (i915_perf_register_sysfs_glk(dev_priv))
2905 goto sysfs_error;
2091 } 2906 }
2092 2907
2908 goto exit;
2909
2910sysfs_error:
2911 kobject_put(dev_priv->perf.metrics_kobj);
2912 dev_priv->perf.metrics_kobj = NULL;
2913
2093exit: 2914exit:
2094 mutex_unlock(&dev_priv->perf.lock); 2915 mutex_unlock(&dev_priv->perf.lock);
2095} 2916}
@@ -2105,13 +2926,32 @@ exit:
2105 */ 2926 */
2106void i915_perf_unregister(struct drm_i915_private *dev_priv) 2927void i915_perf_unregister(struct drm_i915_private *dev_priv)
2107{ 2928{
2108 if (!IS_HASWELL(dev_priv))
2109 return;
2110
2111 if (!dev_priv->perf.metrics_kobj) 2929 if (!dev_priv->perf.metrics_kobj)
2112 return; 2930 return;
2113 2931
2114 i915_perf_unregister_sysfs_hsw(dev_priv); 2932 if (IS_HASWELL(dev_priv))
2933 i915_perf_unregister_sysfs_hsw(dev_priv);
2934 else if (IS_BROADWELL(dev_priv))
2935 i915_perf_unregister_sysfs_bdw(dev_priv);
2936 else if (IS_CHERRYVIEW(dev_priv))
2937 i915_perf_unregister_sysfs_chv(dev_priv);
2938 else if (IS_SKYLAKE(dev_priv)) {
2939 if (IS_SKL_GT2(dev_priv))
2940 i915_perf_unregister_sysfs_sklgt2(dev_priv);
2941 else if (IS_SKL_GT3(dev_priv))
2942 i915_perf_unregister_sysfs_sklgt3(dev_priv);
2943 else if (IS_SKL_GT4(dev_priv))
2944 i915_perf_unregister_sysfs_sklgt4(dev_priv);
2945 } else if (IS_BROXTON(dev_priv))
2946 i915_perf_unregister_sysfs_bxt(dev_priv);
2947 else if (IS_KABYLAKE(dev_priv)) {
2948 if (IS_KBL_GT2(dev_priv))
2949 i915_perf_unregister_sysfs_kblgt2(dev_priv);
2950 else if (IS_KBL_GT3(dev_priv))
2951 i915_perf_unregister_sysfs_kblgt3(dev_priv);
2952 } else if (IS_GEMINILAKE(dev_priv))
2953 i915_perf_unregister_sysfs_glk(dev_priv);
2954
2115 2955
2116 kobject_put(dev_priv->perf.metrics_kobj); 2956 kobject_put(dev_priv->perf.metrics_kobj);
2117 dev_priv->perf.metrics_kobj = NULL; 2957 dev_priv->perf.metrics_kobj = NULL;
@@ -2170,36 +3010,133 @@ static struct ctl_table dev_root[] = {
2170 */ 3010 */
2171void i915_perf_init(struct drm_i915_private *dev_priv) 3011void i915_perf_init(struct drm_i915_private *dev_priv)
2172{ 3012{
2173 if (!IS_HASWELL(dev_priv)) 3013 dev_priv->perf.oa.n_builtin_sets = 0;
2174 return; 3014
3015 if (IS_HASWELL(dev_priv)) {
3016 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
3017 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3018 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3019 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
3020 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
3021 dev_priv->perf.oa.ops.read = gen7_oa_read;
3022 dev_priv->perf.oa.ops.oa_hw_tail_read =
3023 gen7_oa_hw_tail_read;
3024
3025 dev_priv->perf.oa.timestamp_frequency = 12500000;
3026
3027 dev_priv->perf.oa.oa_formats = hsw_oa_formats;
3028
3029 dev_priv->perf.oa.n_builtin_sets =
3030 i915_oa_n_builtin_metric_sets_hsw;
3031 } else if (i915.enable_execlists) {
3032 /* Note: that although we could theoretically also support the
3033 * legacy ringbuffer mode on BDW (and earlier iterations of
3034 * this driver, before upstreaming did this) it didn't seem
3035 * worth the complexity to maintain now that BDW+ enable
3036 * execlist mode by default.
3037 */
2175 3038
2176 hrtimer_init(&dev_priv->perf.oa.poll_check_timer, 3039 if (IS_GEN8(dev_priv)) {
2177 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3040 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
2178 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; 3041 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
2179 init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
2180 3042
2181 INIT_LIST_HEAD(&dev_priv->perf.streams); 3043 dev_priv->perf.oa.timestamp_frequency = 12500000;
2182 mutex_init(&dev_priv->perf.lock);
2183 spin_lock_init(&dev_priv->perf.hook_lock);
2184 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
2185 3044
2186 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; 3045 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
2187 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
2188 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
2189 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
2190 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
2191 dev_priv->perf.oa.ops.read = gen7_oa_read;
2192 dev_priv->perf.oa.ops.oa_buffer_check =
2193 gen7_oa_buffer_check_unlocked;
2194 3046
2195 dev_priv->perf.oa.oa_formats = hsw_oa_formats; 3047 if (IS_BROADWELL(dev_priv)) {
3048 dev_priv->perf.oa.n_builtin_sets =
3049 i915_oa_n_builtin_metric_sets_bdw;
3050 dev_priv->perf.oa.ops.select_metric_set =
3051 i915_oa_select_metric_set_bdw;
3052 } else if (IS_CHERRYVIEW(dev_priv)) {
3053 dev_priv->perf.oa.n_builtin_sets =
3054 i915_oa_n_builtin_metric_sets_chv;
3055 dev_priv->perf.oa.ops.select_metric_set =
3056 i915_oa_select_metric_set_chv;
3057 }
3058 } else if (IS_GEN9(dev_priv)) {
3059 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3060 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3061
3062 dev_priv->perf.oa.timestamp_frequency = 12000000;
3063
3064 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3065
3066 if (IS_SKL_GT2(dev_priv)) {
3067 dev_priv->perf.oa.n_builtin_sets =
3068 i915_oa_n_builtin_metric_sets_sklgt2;
3069 dev_priv->perf.oa.ops.select_metric_set =
3070 i915_oa_select_metric_set_sklgt2;
3071 } else if (IS_SKL_GT3(dev_priv)) {
3072 dev_priv->perf.oa.n_builtin_sets =
3073 i915_oa_n_builtin_metric_sets_sklgt3;
3074 dev_priv->perf.oa.ops.select_metric_set =
3075 i915_oa_select_metric_set_sklgt3;
3076 } else if (IS_SKL_GT4(dev_priv)) {
3077 dev_priv->perf.oa.n_builtin_sets =
3078 i915_oa_n_builtin_metric_sets_sklgt4;
3079 dev_priv->perf.oa.ops.select_metric_set =
3080 i915_oa_select_metric_set_sklgt4;
3081 } else if (IS_BROXTON(dev_priv)) {
3082 dev_priv->perf.oa.timestamp_frequency = 19200000;
3083
3084 dev_priv->perf.oa.n_builtin_sets =
3085 i915_oa_n_builtin_metric_sets_bxt;
3086 dev_priv->perf.oa.ops.select_metric_set =
3087 i915_oa_select_metric_set_bxt;
3088 } else if (IS_KBL_GT2(dev_priv)) {
3089 dev_priv->perf.oa.n_builtin_sets =
3090 i915_oa_n_builtin_metric_sets_kblgt2;
3091 dev_priv->perf.oa.ops.select_metric_set =
3092 i915_oa_select_metric_set_kblgt2;
3093 } else if (IS_KBL_GT3(dev_priv)) {
3094 dev_priv->perf.oa.n_builtin_sets =
3095 i915_oa_n_builtin_metric_sets_kblgt3;
3096 dev_priv->perf.oa.ops.select_metric_set =
3097 i915_oa_select_metric_set_kblgt3;
3098 } else if (IS_GEMINILAKE(dev_priv)) {
3099 dev_priv->perf.oa.timestamp_frequency = 19200000;
3100
3101 dev_priv->perf.oa.n_builtin_sets =
3102 i915_oa_n_builtin_metric_sets_glk;
3103 dev_priv->perf.oa.ops.select_metric_set =
3104 i915_oa_select_metric_set_glk;
3105 }
3106 }
2196 3107
2197 dev_priv->perf.oa.n_builtin_sets = 3108 if (dev_priv->perf.oa.n_builtin_sets) {
2198 i915_oa_n_builtin_metric_sets_hsw; 3109 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
3110 dev_priv->perf.oa.ops.enable_metric_set =
3111 gen8_enable_metric_set;
3112 dev_priv->perf.oa.ops.disable_metric_set =
3113 gen8_disable_metric_set;
3114 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3115 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3116 dev_priv->perf.oa.ops.read = gen8_oa_read;
3117 dev_priv->perf.oa.ops.oa_hw_tail_read =
3118 gen8_oa_hw_tail_read;
3119
3120 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
3121 }
3122 }
3123
3124 if (dev_priv->perf.oa.n_builtin_sets) {
3125 hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
3126 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3127 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
3128 init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
2199 3129
2200 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); 3130 INIT_LIST_HEAD(&dev_priv->perf.streams);
3131 mutex_init(&dev_priv->perf.lock);
3132 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
2201 3133
2202 dev_priv->perf.initialized = true; 3134 oa_sample_rate_hard_limit =
3135 dev_priv->perf.oa.timestamp_frequency / 2;
3136 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3137
3138 dev_priv->perf.initialized = true;
3139 }
2203} 3140}
2204 3141
2205/** 3142/**
@@ -2214,5 +3151,6 @@ void i915_perf_fini(struct drm_i915_private *dev_priv)
2214 unregister_sysctl_table(dev_priv->perf.sysctl_header); 3151 unregister_sysctl_table(dev_priv->perf.sysctl_header);
2215 3152
2216 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); 3153 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
3154
2217 dev_priv->perf.initialized = false; 3155 dev_priv->perf.initialized = false;
2218} 3156}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 89888adb9af1..bd535f12db18 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -58,10 +58,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
58#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) 58#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
59#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 59#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
60#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) 60#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
61#define _PIPE3(pipe, ...) _PICK(pipe, __VA_ARGS__) 61#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
62#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c)) 62#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
63#define _PORT3(port, ...) _PICK(port, __VA_ARGS__) 63#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
64#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c)) 64#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
65#define _MMIO_PORT6(port, a, b, c, d, e, f) _MMIO(_PICK(port, a, b, c, d, e, f))
66#define _MMIO_PORT6_LN(port, ln, a0, a1, b, c, d, e, f) \
67 _MMIO(_PICK(port, a0, b, c, d, e, f) + (ln * (a1 - a0)))
65#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) 68#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
66#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) 69#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
67 70
@@ -653,6 +656,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
653 656
654#define GEN8_OACTXID _MMIO(0x2364) 657#define GEN8_OACTXID _MMIO(0x2364)
655 658
659#define GEN8_OA_DEBUG _MMIO(0x2B04)
660#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5)
661#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6)
662#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2)
663#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1)
664
656#define GEN8_OACONTROL _MMIO(0x2B00) 665#define GEN8_OACONTROL _MMIO(0x2B00)
657#define GEN8_OA_REPORT_FORMAT_A12 (0<<2) 666#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
658#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2) 667#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
@@ -674,6 +683,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
674#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1) 683#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
675#define GEN7_OABUFFER_RESUME (1<<0) 684#define GEN7_OABUFFER_RESUME (1<<0)
676 685
686#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
677#define GEN8_OABUFFER _MMIO(0x2b14) 687#define GEN8_OABUFFER _MMIO(0x2b14)
678 688
679#define GEN7_OASTATUS1 _MMIO(0x2364) 689#define GEN7_OASTATUS1 _MMIO(0x2364)
@@ -692,7 +702,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
692#define GEN8_OASTATUS_REPORT_LOST (1<<0) 702#define GEN8_OASTATUS_REPORT_LOST (1<<0)
693 703
694#define GEN8_OAHEADPTR _MMIO(0x2B0C) 704#define GEN8_OAHEADPTR _MMIO(0x2B0C)
705#define GEN8_OAHEADPTR_MASK 0xffffffc0
695#define GEN8_OATAILPTR _MMIO(0x2B10) 706#define GEN8_OATAILPTR _MMIO(0x2B10)
707#define GEN8_OATAILPTR_MASK 0xffffffc0
696 708
697#define OABUFFER_SIZE_128K (0<<3) 709#define OABUFFER_SIZE_128K (0<<3)
698#define OABUFFER_SIZE_256K (1<<3) 710#define OABUFFER_SIZE_256K (1<<3)
@@ -705,7 +717,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
705 717
706#define OA_MEM_SELECT_GGTT (1<<0) 718#define OA_MEM_SELECT_GGTT (1<<0)
707 719
720/*
721 * Flexible, Aggregate EU Counter Registers.
722 * Note: these aren't contiguous
723 */
708#define EU_PERF_CNTL0 _MMIO(0xe458) 724#define EU_PERF_CNTL0 _MMIO(0xe458)
725#define EU_PERF_CNTL1 _MMIO(0xe558)
726#define EU_PERF_CNTL2 _MMIO(0xe658)
727#define EU_PERF_CNTL3 _MMIO(0xe758)
728#define EU_PERF_CNTL4 _MMIO(0xe45c)
729#define EU_PERF_CNTL5 _MMIO(0xe55c)
730#define EU_PERF_CNTL6 _MMIO(0xe65c)
709 731
710#define GDT_CHICKEN_BITS _MMIO(0x9840) 732#define GDT_CHICKEN_BITS _MMIO(0x9840)
711#define GT_NOA_ENABLE 0x00000080 733#define GT_NOA_ENABLE 0x00000080
@@ -1065,6 +1087,7 @@ enum skl_disp_power_wells {
1065 SKL_DISP_PW_MISC_IO, 1087 SKL_DISP_PW_MISC_IO,
1066 SKL_DISP_PW_DDI_A_E, 1088 SKL_DISP_PW_DDI_A_E,
1067 GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, 1089 GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
1090 CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
1068 SKL_DISP_PW_DDI_B, 1091 SKL_DISP_PW_DDI_B,
1069 SKL_DISP_PW_DDI_C, 1092 SKL_DISP_PW_DDI_C,
1070 SKL_DISP_PW_DDI_D, 1093 SKL_DISP_PW_DDI_D,
@@ -1072,6 +1095,10 @@ enum skl_disp_power_wells {
1072 GLK_DISP_PW_AUX_A = 8, 1095 GLK_DISP_PW_AUX_A = 8,
1073 GLK_DISP_PW_AUX_B, 1096 GLK_DISP_PW_AUX_B,
1074 GLK_DISP_PW_AUX_C, 1097 GLK_DISP_PW_AUX_C,
1098 CNL_DISP_PW_AUX_A = GLK_DISP_PW_AUX_A,
1099 CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B,
1100 CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C,
1101 CNL_DISP_PW_AUX_D,
1075 1102
1076 SKL_DISP_PW_1 = 14, 1103 SKL_DISP_PW_1 = 14,
1077 SKL_DISP_PW_2, 1104 SKL_DISP_PW_2,
@@ -1658,6 +1685,10 @@ enum skl_disp_power_wells {
1658#define PHY_RESERVED (1 << 7) 1685#define PHY_RESERVED (1 << 7)
1659#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) 1686#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
1660 1687
1688#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
1689#define CL_POWER_DOWN_ENABLE (1 << 4)
1690#define SUS_CLOCK_CONFIG (3 << 0)
1691
1661#define _PORT_CL1CM_DW9_A 0x162024 1692#define _PORT_CL1CM_DW9_A 0x162024
1662#define _PORT_CL1CM_DW9_BC 0x6C024 1693#define _PORT_CL1CM_DW9_BC 0x6C024
1663#define IREF0RC_OFFSET_SHIFT 8 1694#define IREF0RC_OFFSET_SHIFT 8
@@ -1682,6 +1713,146 @@ enum skl_disp_power_wells {
1682#define OCL2_LDOFUSE_PWR_DIS (1 << 6) 1713#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
1683#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) 1714#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
1684 1715
1716#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
1717#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
1718#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
1719#define _CNL_PORT_PCS_DW1_GRP_D 0x162B84
1720#define _CNL_PORT_PCS_DW1_GRP_F 0x162A04
1721#define _CNL_PORT_PCS_DW1_LN0_AE 0x162404
1722#define _CNL_PORT_PCS_DW1_LN0_B 0x162604
1723#define _CNL_PORT_PCS_DW1_LN0_C 0x162C04
1724#define _CNL_PORT_PCS_DW1_LN0_D 0x162E04
1725#define _CNL_PORT_PCS_DW1_LN0_F 0x162804
1726#define CNL_PORT_PCS_DW1_GRP(port) _MMIO_PORT6(port, \
1727 _CNL_PORT_PCS_DW1_GRP_AE, \
1728 _CNL_PORT_PCS_DW1_GRP_B, \
1729 _CNL_PORT_PCS_DW1_GRP_C, \
1730 _CNL_PORT_PCS_DW1_GRP_D, \
1731 _CNL_PORT_PCS_DW1_GRP_AE, \
1732 _CNL_PORT_PCS_DW1_GRP_F)
1733#define CNL_PORT_PCS_DW1_LN0(port) _MMIO_PORT6(port, \
1734 _CNL_PORT_PCS_DW1_LN0_AE, \
1735 _CNL_PORT_PCS_DW1_LN0_B, \
1736 _CNL_PORT_PCS_DW1_LN0_C, \
1737 _CNL_PORT_PCS_DW1_LN0_D, \
1738 _CNL_PORT_PCS_DW1_LN0_AE, \
1739 _CNL_PORT_PCS_DW1_LN0_F)
1740#define COMMON_KEEPER_EN (1 << 26)
1741
1742#define _CNL_PORT_TX_DW2_GRP_AE 0x162348
1743#define _CNL_PORT_TX_DW2_GRP_B 0x1623C8
1744#define _CNL_PORT_TX_DW2_GRP_C 0x162B48
1745#define _CNL_PORT_TX_DW2_GRP_D 0x162BC8
1746#define _CNL_PORT_TX_DW2_GRP_F 0x162A48
1747#define _CNL_PORT_TX_DW2_LN0_AE 0x162448
1748#define _CNL_PORT_TX_DW2_LN0_B 0x162648
1749#define _CNL_PORT_TX_DW2_LN0_C 0x162C48
1750#define _CNL_PORT_TX_DW2_LN0_D 0x162E48
1751#define _CNL_PORT_TX_DW2_LN0_F 0x162A48
1752#define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \
1753 _CNL_PORT_TX_DW2_GRP_AE, \
1754 _CNL_PORT_TX_DW2_GRP_B, \
1755 _CNL_PORT_TX_DW2_GRP_C, \
1756 _CNL_PORT_TX_DW2_GRP_D, \
1757 _CNL_PORT_TX_DW2_GRP_AE, \
1758 _CNL_PORT_TX_DW2_GRP_F)
1759#define CNL_PORT_TX_DW2_LN0(port) _MMIO_PORT6(port, \
1760 _CNL_PORT_TX_DW2_LN0_AE, \
1761 _CNL_PORT_TX_DW2_LN0_B, \
1762 _CNL_PORT_TX_DW2_LN0_C, \
1763 _CNL_PORT_TX_DW2_LN0_D, \
1764 _CNL_PORT_TX_DW2_LN0_AE, \
1765 _CNL_PORT_TX_DW2_LN0_F)
1766#define SWING_SEL_UPPER(x) ((x >> 3) << 15)
1767#define SWING_SEL_LOWER(x) ((x & 0x7) << 11)
1768#define RCOMP_SCALAR(x) ((x) << 0)
1769
1770#define _CNL_PORT_TX_DW4_GRP_AE 0x162350
1771#define _CNL_PORT_TX_DW4_GRP_B 0x1623D0
1772#define _CNL_PORT_TX_DW4_GRP_C 0x162B50
1773#define _CNL_PORT_TX_DW4_GRP_D 0x162BD0
1774#define _CNL_PORT_TX_DW4_GRP_F 0x162A50
1775#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
1776#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
1777#define _CNL_PORT_TX_DW4_LN0_B 0x162650
1778#define _CNL_PORT_TX_DW4_LN0_C 0x162C50
1779#define _CNL_PORT_TX_DW4_LN0_D 0x162E50
1780#define _CNL_PORT_TX_DW4_LN0_F 0x162850
1781#define CNL_PORT_TX_DW4_GRP(port) _MMIO_PORT6(port, \
1782 _CNL_PORT_TX_DW4_GRP_AE, \
1783 _CNL_PORT_TX_DW4_GRP_B, \
1784 _CNL_PORT_TX_DW4_GRP_C, \
1785 _CNL_PORT_TX_DW4_GRP_D, \
1786 _CNL_PORT_TX_DW4_GRP_AE, \
1787 _CNL_PORT_TX_DW4_GRP_F)
1788#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO_PORT6_LN(port, ln, \
1789 _CNL_PORT_TX_DW4_LN0_AE, \
1790 _CNL_PORT_TX_DW4_LN1_AE, \
1791 _CNL_PORT_TX_DW4_LN0_B, \
1792 _CNL_PORT_TX_DW4_LN0_C, \
1793 _CNL_PORT_TX_DW4_LN0_D, \
1794 _CNL_PORT_TX_DW4_LN0_AE, \
1795 _CNL_PORT_TX_DW4_LN0_F)
1796#define LOADGEN_SELECT (1 << 31)
1797#define POST_CURSOR_1(x) ((x) << 12)
1798#define POST_CURSOR_2(x) ((x) << 6)
1799#define CURSOR_COEFF(x) ((x) << 0)
1800
1801#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
1802#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
1803#define _CNL_PORT_TX_DW5_GRP_C 0x162B54
1804#define _CNL_PORT_TX_DW5_GRP_D 0x162BD4
1805#define _CNL_PORT_TX_DW5_GRP_F 0x162A54
1806#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
1807#define _CNL_PORT_TX_DW5_LN0_B 0x162654
1808#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
1809#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4
1810#define _CNL_PORT_TX_DW5_LN0_F 0x162854
1811#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
1812 _CNL_PORT_TX_DW5_GRP_AE, \
1813 _CNL_PORT_TX_DW5_GRP_B, \
1814 _CNL_PORT_TX_DW5_GRP_C, \
1815 _CNL_PORT_TX_DW5_GRP_D, \
1816 _CNL_PORT_TX_DW5_GRP_AE, \
1817 _CNL_PORT_TX_DW5_GRP_F)
1818#define CNL_PORT_TX_DW5_LN0(port) _MMIO_PORT6(port, \
1819 _CNL_PORT_TX_DW5_LN0_AE, \
1820 _CNL_PORT_TX_DW5_LN0_B, \
1821 _CNL_PORT_TX_DW5_LN0_C, \
1822 _CNL_PORT_TX_DW5_LN0_D, \
1823 _CNL_PORT_TX_DW5_LN0_AE, \
1824 _CNL_PORT_TX_DW5_LN0_F)
1825#define TX_TRAINING_EN (1 << 31)
1826#define TAP3_DISABLE (1 << 29)
1827#define SCALING_MODE_SEL(x) ((x) << 18)
1828#define RTERM_SELECT(x) ((x) << 3)
1829
1830#define _CNL_PORT_TX_DW7_GRP_AE 0x16235C
1831#define _CNL_PORT_TX_DW7_GRP_B 0x1623DC
1832#define _CNL_PORT_TX_DW7_GRP_C 0x162B5C
1833#define _CNL_PORT_TX_DW7_GRP_D 0x162BDC
1834#define _CNL_PORT_TX_DW7_GRP_F 0x162A5C
1835#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
1836#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
1837#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
1838#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC
1839#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
1840#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
1841 _CNL_PORT_TX_DW7_GRP_AE, \
1842 _CNL_PORT_TX_DW7_GRP_B, \
1843 _CNL_PORT_TX_DW7_GRP_C, \
1844 _CNL_PORT_TX_DW7_GRP_D, \
1845 _CNL_PORT_TX_DW7_GRP_AE, \
1846 _CNL_PORT_TX_DW7_GRP_F)
1847#define CNL_PORT_TX_DW7_LN0(port) _MMIO_PORT6(port, \
1848 _CNL_PORT_TX_DW7_LN0_AE, \
1849 _CNL_PORT_TX_DW7_LN0_B, \
1850 _CNL_PORT_TX_DW7_LN0_C, \
1851 _CNL_PORT_TX_DW7_LN0_D, \
1852 _CNL_PORT_TX_DW7_LN0_AE, \
1853 _CNL_PORT_TX_DW7_LN0_F)
1854#define N_SCALAR(x) ((x) << 24)
1855
1685/* The spec defines this only for BXT PHY0, but lets assume that this 1856/* The spec defines this only for BXT PHY0, but lets assume that this
1686 * would exist for PHY1 too if it had a second channel. 1857 * would exist for PHY1 too if it had a second channel.
1687 */ 1858 */
@@ -1690,6 +1861,23 @@ enum skl_disp_power_wells {
1690#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) 1861#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
1691#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) 1862#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
1692 1863
1864#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
1865#define COMP_INIT (1 << 31)
1866#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
1867#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
1868#define PROCESS_INFO_DOT_0 (0 << 26)
1869#define PROCESS_INFO_DOT_1 (1 << 26)
1870#define PROCESS_INFO_DOT_4 (2 << 26)
1871#define PROCESS_INFO_MASK (7 << 26)
1872#define PROCESS_INFO_SHIFT 26
1873#define VOLTAGE_INFO_0_85V (0 << 24)
1874#define VOLTAGE_INFO_0_95V (1 << 24)
1875#define VOLTAGE_INFO_1_05V (2 << 24)
1876#define VOLTAGE_INFO_MASK (3 << 24)
1877#define VOLTAGE_INFO_SHIFT 24
1878#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
1879#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
1880
1693/* BXT PHY Ref registers */ 1881/* BXT PHY Ref registers */
1694#define _PORT_REF_DW3_A 0x16218C 1882#define _PORT_REF_DW3_A 0x16218C
1695#define _PORT_REF_DW3_BC 0x6C18C 1883#define _PORT_REF_DW3_BC 0x6C18C
@@ -2325,6 +2513,9 @@ enum skl_disp_power_wells {
2325#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 2513#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
2326#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 2514#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
2327 2515
2516#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
2517#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
2518
2328/* Fuse readout registers for GT */ 2519/* Fuse readout registers for GT */
2329#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168) 2520#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
2330#define CHV_FGT_DISABLE_SS0 (1 << 10) 2521#define CHV_FGT_DISABLE_SS0 (1 << 10)
@@ -2507,10 +2698,6 @@ enum skl_disp_power_wells {
2507#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ 2698#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
2508#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) 2699#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
2509 2700
2510#define FBC_STATUS2 _MMIO(0x43214)
2511#define IVB_FBC_COMPRESSION_MASK 0x7ff
2512#define BDW_FBC_COMPRESSION_MASK 0xfff
2513
2514#define FBC_LL_SIZE (1536) 2701#define FBC_LL_SIZE (1536)
2515 2702
2516#define FBC_LLC_READ_CTRL _MMIO(0x9044) 2703#define FBC_LLC_READ_CTRL _MMIO(0x9044)
@@ -2539,7 +2726,7 @@ enum skl_disp_power_wells {
2539#define DPFC_INVAL_SEG_SHIFT (16) 2726#define DPFC_INVAL_SEG_SHIFT (16)
2540#define DPFC_INVAL_SEG_MASK (0x07ff0000) 2727#define DPFC_INVAL_SEG_MASK (0x07ff0000)
2541#define DPFC_COMP_SEG_SHIFT (0) 2728#define DPFC_COMP_SEG_SHIFT (0)
2542#define DPFC_COMP_SEG_MASK (0x000003ff) 2729#define DPFC_COMP_SEG_MASK (0x000007ff)
2543#define DPFC_STATUS2 _MMIO(0x3214) 2730#define DPFC_STATUS2 _MMIO(0x3214)
2544#define DPFC_FENCE_YOFF _MMIO(0x3218) 2731#define DPFC_FENCE_YOFF _MMIO(0x3218)
2545#define DPFC_CHICKEN _MMIO(0x3224) 2732#define DPFC_CHICKEN _MMIO(0x3224)
@@ -2553,6 +2740,10 @@ enum skl_disp_power_wells {
2553#define DPFC_RESERVED (0x1FFFFF00) 2740#define DPFC_RESERVED (0x1FFFFF00)
2554#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) 2741#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
2555#define ILK_DPFC_STATUS _MMIO(0x43210) 2742#define ILK_DPFC_STATUS _MMIO(0x43210)
2743#define ILK_DPFC_COMP_SEG_MASK 0x7ff
2744#define IVB_FBC_STATUS2 _MMIO(0x43214)
2745#define IVB_FBC_COMP_SEG_MASK 0x7ff
2746#define BDW_FBC_COMP_SEG_MASK 0xfff
2556#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) 2747#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
2557#define ILK_DPFC_CHICKEN _MMIO(0x43224) 2748#define ILK_DPFC_CHICKEN _MMIO(0x43224)
2558#define ILK_DPFC_DISABLE_DUMMY0 (1<<8) 2749#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
@@ -2626,9 +2817,10 @@ enum skl_disp_power_wells {
2626#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */ 2817#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
2627#define GMBUS_PIN_DPD 6 /* HDMID */ 2818#define GMBUS_PIN_DPD 6 /* HDMID */
2628#define GMBUS_PIN_RESERVED 7 /* 7 reserved */ 2819#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
2629#define GMBUS_PIN_1_BXT 1 2820#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
2630#define GMBUS_PIN_2_BXT 2 2821#define GMBUS_PIN_2_BXT 2
2631#define GMBUS_PIN_3_BXT 3 2822#define GMBUS_PIN_3_BXT 3
2823#define GMBUS_PIN_4_CNP 4
2632#define GMBUS_NUM_PINS 7 /* including 0 */ 2824#define GMBUS_NUM_PINS 7 /* including 0 */
2633#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ 2825#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
2634#define GMBUS_SW_CLR_INT (1<<31) 2826#define GMBUS_SW_CLR_INT (1<<31)
@@ -6506,6 +6698,9 @@ enum {
6506#define GLK_CL1_PWR_DOWN (1 << 11) 6698#define GLK_CL1_PWR_DOWN (1 << 11)
6507#define GLK_CL2_PWR_DOWN (1 << 12) 6699#define GLK_CL2_PWR_DOWN (1 << 12)
6508 6700
6701#define CHICKEN_MISC_2 _MMIO(0x42084)
6702#define COMP_PWR_DOWN (1 << 23)
6703
6509#define _CHICKEN_PIPESL_1_A 0x420b0 6704#define _CHICKEN_PIPESL_1_A 0x420b0
6510#define _CHICKEN_PIPESL_1_B 0x420b4 6705#define _CHICKEN_PIPESL_1_B 0x420b4
6511#define HSW_FBCQ_DIS (1 << 22) 6706#define HSW_FBCQ_DIS (1 << 22)
@@ -6546,6 +6741,9 @@ enum {
6546#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) 6741#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
6547#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) 6742#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
6548 6743
6744#define SKL_DSSM _MMIO(0x51004)
6745#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
6746
6549#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) 6747#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
6550#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14) 6748#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
6551 6749
@@ -6838,6 +7036,10 @@ enum {
6838#define FDL_TP2_TIMER_SHIFT 10 7036#define FDL_TP2_TIMER_SHIFT 10
6839#define FDL_TP2_TIMER_MASK (3<<10) 7037#define FDL_TP2_TIMER_MASK (3<<10)
6840#define RAWCLK_FREQ_MASK 0x3ff 7038#define RAWCLK_FREQ_MASK 0x3ff
7039#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
7040#define CNP_RAWCLK_DIV(div) ((div) << 16)
7041#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
7042#define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
6841 7043
6842#define PCH_DPLL_TMR_CFG _MMIO(0xc6208) 7044#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
6843 7045
@@ -7792,13 +7994,6 @@ enum {
7792#define SKL_FUSE_PG1_DIST_STATUS (1<<26) 7994#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
7793#define SKL_FUSE_PG2_DIST_STATUS (1<<25) 7995#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
7794 7996
7795/* Decoupled MMIO register pair for kernel driver */
7796#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
7797#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
7798#define GEN9_DECOUPLED_DW1_GO (1<<31)
7799#define GEN9_DECOUPLED_PD_SHIFT 28
7800#define GEN9_DECOUPLED_OP_SHIFT 24
7801
7802/* Per-pipe DDI Function Control */ 7997/* Per-pipe DDI Function Control */
7803#define _TRANS_DDI_FUNC_CTL_A 0x60400 7998#define _TRANS_DDI_FUNC_CTL_A 0x60400
7804#define _TRANS_DDI_FUNC_CTL_B 0x61400 7999#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -8107,6 +8302,61 @@ enum {
8107#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) 8302#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
8108#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) 8303#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
8109 8304
8305/*
8306 * CNL Clocks
8307 */
8308#define DPCLKA_CFGCR0 _MMIO(0x6C200)
8309#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port)+10))
8310#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << ((port)*2))
8311#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port)*2)
8312#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << ((port)*2))
8313
8314/* CNL PLL */
8315#define DPLL0_ENABLE 0x46010
8316#define DPLL1_ENABLE 0x46014
8317#define PLL_ENABLE (1 << 31)
8318#define PLL_LOCK (1 << 30)
8319#define PLL_POWER_ENABLE (1 << 27)
8320#define PLL_POWER_STATE (1 << 26)
8321#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
8322
8323#define _CNL_DPLL0_CFGCR0 0x6C000
8324#define _CNL_DPLL1_CFGCR0 0x6C080
8325#define DPLL_CFGCR0_HDMI_MODE (1 << 30)
8326#define DPLL_CFGCR0_SSC_ENABLE (1 << 29)
8327#define DPLL_CFGCR0_LINK_RATE_MASK (0xf << 25)
8328#define DPLL_CFGCR0_LINK_RATE_2700 (0 << 25)
8329#define DPLL_CFGCR0_LINK_RATE_1350 (1 << 25)
8330#define DPLL_CFGCR0_LINK_RATE_810 (2 << 25)
8331#define DPLL_CFGCR0_LINK_RATE_1620 (3 << 25)
8332#define DPLL_CFGCR0_LINK_RATE_1080 (4 << 25)
8333#define DPLL_CFGCR0_LINK_RATE_2160 (5 << 25)
8334#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
8335#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
8336#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
8337#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
8338#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
8339#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
8340
8341#define _CNL_DPLL0_CFGCR1 0x6C004
8342#define _CNL_DPLL1_CFGCR1 0x6C084
8343#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
8344#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
8345#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
8346#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
8347#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
8348#define DPLL_CFGCR1_KDIV_1 (1 << 6)
8349#define DPLL_CFGCR1_KDIV_2 (2 << 6)
8350#define DPLL_CFGCR1_KDIV_4 (4 << 6)
8351#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
8352#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
8353#define DPLL_CFGCR1_PDIV_2 (1 << 2)
8354#define DPLL_CFGCR1_PDIV_3 (2 << 2)
8355#define DPLL_CFGCR1_PDIV_5 (4 << 2)
8356#define DPLL_CFGCR1_PDIV_7 (8 << 2)
8357#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
8358#define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
8359
8110/* BXT display engine PLL */ 8360/* BXT display engine PLL */
8111#define BXT_DE_PLL_CTL _MMIO(0x6d000) 8361#define BXT_DE_PLL_CTL _MMIO(0x6d000)
8112#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ 8362#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -8115,6 +8365,8 @@ enum {
8115#define BXT_DE_PLL_ENABLE _MMIO(0x46070) 8365#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
8116#define BXT_DE_PLL_PLL_ENABLE (1 << 31) 8366#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
8117#define BXT_DE_PLL_LOCK (1 << 30) 8367#define BXT_DE_PLL_LOCK (1 << 30)
8368#define CNL_CDCLK_PLL_RATIO(x) (x)
8369#define CNL_CDCLK_PLL_RATIO_MASK 0xff
8118 8370
8119/* GEN9 DC */ 8371/* GEN9 DC */
8120#define DC_STATE_EN _MMIO(0x45504) 8372#define DC_STATE_EN _MMIO(0x45504)
@@ -8148,6 +8400,7 @@ enum {
8148/* SFUSE_STRAP */ 8400/* SFUSE_STRAP */
8149#define SFUSE_STRAP _MMIO(0xc2014) 8401#define SFUSE_STRAP _MMIO(0xc2014)
8150#define SFUSE_STRAP_FUSE_LOCK (1<<13) 8402#define SFUSE_STRAP_FUSE_LOCK (1<<13)
8403#define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
8151#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) 8404#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
8152#define SFUSE_STRAP_CRT_DISABLED (1<<6) 8405#define SFUSE_STRAP_CRT_DISABLED (1<<6)
8153#define SFUSE_STRAP_DDIB_DETECTED (1<<2) 8406#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 16ecd1ab108d..12fc250b47b9 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -99,6 +99,11 @@
99 __T; \ 99 __T; \
100}) 100})
101 101
102#define u64_to_ptr(T, x) ({ \
103 typecheck(u64, x); \
104 (T *)(uintptr_t)(x); \
105})
106
102#define __mask_next_bit(mask) ({ \ 107#define __mask_next_bit(mask) ({ \
103 int __idx = ffs(mask) - 1; \ 108 int __idx = ffs(mask) - 1; \
104 mask &= ~BIT(__idx); \ 109 mask &= ~BIT(__idx); \
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 2e739018fb4c..cf7a958e4d3c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -90,6 +90,18 @@ struct _balloon_info_ {
90 90
91static struct _balloon_info_ bl_info; 91static struct _balloon_info_ bl_info;
92 92
93static void vgt_deballoon_space(struct i915_ggtt *ggtt,
94 struct drm_mm_node *node)
95{
96 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
97 node->start,
98 node->start + node->size,
99 node->size / 1024);
100
101 ggtt->base.reserved -= node->size;
102 drm_mm_remove_node(node);
103}
104
93/** 105/**
94 * intel_vgt_deballoon - deballoon reserved graphics address trunks 106 * intel_vgt_deballoon - deballoon reserved graphics address trunks
95 * @dev_priv: i915 device private data 107 * @dev_priv: i915 device private data
@@ -106,12 +118,8 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
106 118
107 DRM_DEBUG("VGT deballoon.\n"); 119 DRM_DEBUG("VGT deballoon.\n");
108 120
109 for (i = 0; i < 4; i++) { 121 for (i = 0; i < 4; i++)
110 if (bl_info.space[i].allocated) 122 vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
111 drm_mm_remove_node(&bl_info.space[i]);
112 }
113
114 memset(&bl_info, 0, sizeof(bl_info));
115} 123}
116 124
117static int vgt_balloon_space(struct i915_ggtt *ggtt, 125static int vgt_balloon_space(struct i915_ggtt *ggtt,
@@ -119,15 +127,20 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
119 unsigned long start, unsigned long end) 127 unsigned long start, unsigned long end)
120{ 128{
121 unsigned long size = end - start; 129 unsigned long size = end - start;
130 int ret;
122 131
123 if (start >= end) 132 if (start >= end)
124 return -EINVAL; 133 return -EINVAL;
125 134
126 DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", 135 DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
127 start, end, size / 1024); 136 start, end, size / 1024);
128 return i915_gem_gtt_reserve(&ggtt->base, node, 137 ret = i915_gem_gtt_reserve(&ggtt->base, node,
129 size, start, I915_COLOR_UNEVICTABLE, 138 size, start, I915_COLOR_UNEVICTABLE,
130 0); 139 0);
140 if (!ret)
141 ggtt->base.reserved += size;
142
143 return ret;
131} 144}
132 145
133/** 146/**
@@ -220,7 +233,7 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
220 ret = vgt_balloon_space(ggtt, &bl_info.space[3], 233 ret = vgt_balloon_space(ggtt, &bl_info.space[3],
221 unmappable_end, ggtt_end); 234 unmappable_end, ggtt_end);
222 if (ret) 235 if (ret)
223 goto err; 236 goto err_upon_mappable;
224 } 237 }
225 238
226 /* Mappable graphic memory ballooning */ 239 /* Mappable graphic memory ballooning */
@@ -229,7 +242,7 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
229 0, mappable_base); 242 0, mappable_base);
230 243
231 if (ret) 244 if (ret)
232 goto err; 245 goto err_upon_unmappable;
233 } 246 }
234 247
235 if (mappable_end < ggtt->mappable_end) { 248 if (mappable_end < ggtt->mappable_end) {
@@ -237,14 +250,19 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
237 mappable_end, ggtt->mappable_end); 250 mappable_end, ggtt->mappable_end);
238 251
239 if (ret) 252 if (ret)
240 goto err; 253 goto err_below_mappable;
241 } 254 }
242 255
243 DRM_INFO("VGT balloon successfully\n"); 256 DRM_INFO("VGT balloon successfully\n");
244 return 0; 257 return 0;
245 258
259err_below_mappable:
260 vgt_deballoon_space(ggtt, &bl_info.space[0]);
261err_upon_unmappable:
262 vgt_deballoon_space(ggtt, &bl_info.space[3]);
263err_upon_mappable:
264 vgt_deballoon_space(ggtt, &bl_info.space[2]);
246err: 265err:
247 DRM_ERROR("VGT balloon fail\n"); 266 DRM_ERROR("VGT balloon fail\n");
248 intel_vgt_deballoon(dev_priv);
249 return ret; 267 return ret;
250} 268}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1aba47024656..532c709febbd 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -85,12 +85,12 @@ vma_create(struct drm_i915_gem_object *obj,
85 if (vma == NULL) 85 if (vma == NULL)
86 return ERR_PTR(-ENOMEM); 86 return ERR_PTR(-ENOMEM);
87 87
88 INIT_LIST_HEAD(&vma->exec_list);
89 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) 88 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
90 init_request_active(&vma->last_read[i], i915_vma_retire); 89 init_request_active(&vma->last_read[i], i915_vma_retire);
91 init_request_active(&vma->last_fence, NULL); 90 init_request_active(&vma->last_fence, NULL);
92 vma->vm = vm; 91 vma->vm = vm;
93 vma->obj = obj; 92 vma->obj = obj;
93 vma->resv = obj->resv;
94 vma->size = obj->base.size; 94 vma->size = obj->base.size;
95 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 95 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
96 96
@@ -464,7 +464,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
464 size, obj->base.size, 464 size, obj->base.size,
465 flags & PIN_MAPPABLE ? "mappable" : "total", 465 flags & PIN_MAPPABLE ? "mappable" : "total",
466 end); 466 end);
467 return -E2BIG; 467 return -ENOSPC;
468 } 468 }
469 469
470 ret = i915_gem_object_pin_pages(obj); 470 ret = i915_gem_object_pin_pages(obj);
@@ -577,7 +577,7 @@ err_unpin:
577 return ret; 577 return ret;
578} 578}
579 579
580void i915_vma_destroy(struct i915_vma *vma) 580static void i915_vma_destroy(struct i915_vma *vma)
581{ 581{
582 GEM_BUG_ON(vma->node.allocated); 582 GEM_BUG_ON(vma->node.allocated);
583 GEM_BUG_ON(i915_vma_is_active(vma)); 583 GEM_BUG_ON(i915_vma_is_active(vma));
@@ -591,11 +591,33 @@ void i915_vma_destroy(struct i915_vma *vma)
591 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); 591 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
592} 592}
593 593
594void i915_vma_unlink_ctx(struct i915_vma *vma)
595{
596 struct i915_gem_context *ctx = vma->ctx;
597
598 if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
599 cancel_work_sync(&ctx->vma_lut.resize);
600 ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
601 }
602
603 __hlist_del(&vma->ctx_node);
604 ctx->vma_lut.ht_count--;
605
606 if (i915_vma_is_ggtt(vma))
607 vma->obj->vma_hashed = NULL;
608 vma->ctx = NULL;
609
610 i915_vma_put(vma);
611}
612
594void i915_vma_close(struct i915_vma *vma) 613void i915_vma_close(struct i915_vma *vma)
595{ 614{
596 GEM_BUG_ON(i915_vma_is_closed(vma)); 615 GEM_BUG_ON(i915_vma_is_closed(vma));
597 vma->flags |= I915_VMA_CLOSED; 616 vma->flags |= I915_VMA_CLOSED;
598 617
618 if (vma->ctx)
619 i915_vma_unlink_ctx(vma);
620
599 list_del(&vma->obj_link); 621 list_del(&vma->obj_link);
600 rb_erase(&vma->obj_node, &vma->obj->vma_tree); 622 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
601 623
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 2e03f81dddbe..4a673fc1a432 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -50,6 +50,7 @@ struct i915_vma {
50 struct drm_i915_gem_object *obj; 50 struct drm_i915_gem_object *obj;
51 struct i915_address_space *vm; 51 struct i915_address_space *vm;
52 struct drm_i915_fence_reg *fence; 52 struct drm_i915_fence_reg *fence;
53 struct reservation_object *resv; /** Alias of obj->resv */
53 struct sg_table *pages; 54 struct sg_table *pages;
54 void __iomem *iomap; 55 void __iomem *iomap;
55 u64 size; 56 u64 size;
@@ -99,16 +100,25 @@ struct i915_vma {
99 100
100 struct list_head obj_link; /* Link in the object's VMA list */ 101 struct list_head obj_link; /* Link in the object's VMA list */
101 struct rb_node obj_node; 102 struct rb_node obj_node;
103 struct hlist_node obj_hash;
102 104
103 /** This vma's place in the batchbuffer or on the eviction list */ 105 /** This vma's place in the execbuf reservation list */
104 struct list_head exec_list; 106 struct list_head exec_link;
107 struct list_head reloc_link;
108
109 /** This vma's place in the eviction list */
110 struct list_head evict_link;
105 111
106 /** 112 /**
107 * Used for performing relocations during execbuffer insertion. 113 * Used for performing relocations during execbuffer insertion.
108 */ 114 */
109 struct hlist_node exec_node;
110 unsigned long exec_handle;
111 struct drm_i915_gem_exec_object2 *exec_entry; 115 struct drm_i915_gem_exec_object2 *exec_entry;
116 struct hlist_node exec_node;
117 u32 exec_handle;
118
119 struct i915_gem_context *ctx;
120 struct hlist_node ctx_node;
121 u32 ctx_handle;
112}; 122};
113 123
114struct i915_vma * 124struct i915_vma *
@@ -232,8 +242,8 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
232 u64 size, u64 alignment, u64 flags); 242 u64 size, u64 alignment, u64 flags);
233void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 243void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
234int __must_check i915_vma_unbind(struct i915_vma *vma); 244int __must_check i915_vma_unbind(struct i915_vma *vma);
245void i915_vma_unlink_ctx(struct i915_vma *vma);
235void i915_vma_close(struct i915_vma *vma); 246void i915_vma_close(struct i915_vma *vma);
236void i915_vma_destroy(struct i915_vma *vma);
237 247
238int __i915_vma_do_pin(struct i915_vma *vma, 248int __i915_vma_do_pin(struct i915_vma *vma,
239 u64 size, u64 alignment, u64 flags); 249 u64 size, u64 alignment, u64 flags);
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50fb1f76cc5f..36d4e635e4ce 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -36,44 +36,121 @@
36#include "intel_drv.h" 36#include "intel_drv.h"
37 37
38/** 38/**
39 * intel_connector_atomic_get_property - fetch connector property value 39 * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
40 * @connector: connector to fetch property for 40 * @connector: Connector to get the property for.
41 * @state: state containing the property value 41 * @state: Connector state to retrieve the property from.
42 * @property: property to look up 42 * @property: Property to retrieve.
43 * @val: pointer to write property value into 43 * @val: Return value for the property.
44 * 44 *
45 * The DRM core does not store shadow copies of properties for 45 * Returns the atomic property value for a digital connector.
46 * atomic-capable drivers. This entrypoint is used to fetch
47 * the current value of a driver-specific connector property.
48 */ 46 */
49int 47int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
50intel_connector_atomic_get_property(struct drm_connector *connector, 48 const struct drm_connector_state *state,
51 const struct drm_connector_state *state, 49 struct drm_property *property,
52 struct drm_property *property, 50 uint64_t *val)
53 uint64_t *val)
54{ 51{
55 int i; 52 struct drm_device *dev = connector->dev;
53 struct drm_i915_private *dev_priv = to_i915(dev);
54 struct intel_digital_connector_state *intel_conn_state =
55 to_intel_digital_connector_state(state);
56
57 if (property == dev_priv->force_audio_property)
58 *val = intel_conn_state->force_audio;
59 else if (property == dev_priv->broadcast_rgb_property)
60 *val = intel_conn_state->broadcast_rgb;
61 else {
62 DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
63 return -EINVAL;
64 }
56 65
57 /* 66 return 0;
58 * TODO: We only have atomic modeset for planes at the moment, so the 67}
59 * crtc/connector code isn't quite ready yet. Until it's ready, 68
60 * continue to look up all property values in the DRM's shadow copy 69/**
61 * in obj->properties->values[]. 70 * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
62 * 71 * @connector: Connector to set the property for.
63 * When the crtc/connector state work matures, this function should 72 * @state: Connector state to set the property on.
64 * be updated to read the values out of the state structure instead. 73 * @property: Property to set.
65 */ 74 * @val: New value for the property.
66 for (i = 0; i < connector->base.properties->count; i++) { 75 *
67 if (connector->base.properties->properties[i] == property) { 76 * Sets the atomic property value for a digital connector.
68 *val = connector->base.properties->values[i]; 77 */
69 return 0; 78int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
70 } 79 struct drm_connector_state *state,
80 struct drm_property *property,
81 uint64_t val)
82{
83 struct drm_device *dev = connector->dev;
84 struct drm_i915_private *dev_priv = to_i915(dev);
85 struct intel_digital_connector_state *intel_conn_state =
86 to_intel_digital_connector_state(state);
87
88 if (property == dev_priv->force_audio_property) {
89 intel_conn_state->force_audio = val;
90 return 0;
71 } 91 }
72 92
93 if (property == dev_priv->broadcast_rgb_property) {
94 intel_conn_state->broadcast_rgb = val;
95 return 0;
96 }
97
98 DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
73 return -EINVAL; 99 return -EINVAL;
74} 100}
75 101
76/* 102int intel_digital_connector_atomic_check(struct drm_connector *conn,
103 struct drm_connector_state *new_state)
104{
105 struct intel_digital_connector_state *new_conn_state =
106 to_intel_digital_connector_state(new_state);
107 struct drm_connector_state *old_state =
108 drm_atomic_get_old_connector_state(new_state->state, conn);
109 struct intel_digital_connector_state *old_conn_state =
110 to_intel_digital_connector_state(old_state);
111 struct drm_crtc_state *crtc_state;
112
113 if (!new_state->crtc)
114 return 0;
115
116 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
117
118 /*
119 * These properties are handled by fastset, and might not end
120 * up in a modeset.
121 */
122 if (new_conn_state->force_audio != old_conn_state->force_audio ||
123 new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
124 new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
125 new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
126 crtc_state->mode_changed = true;
127
128 return 0;
129}
130
131/**
132 * intel_digital_connector_duplicate_state - duplicate connector state
133 * @connector: digital connector
134 *
135 * Allocates and returns a copy of the connector state (both common and
136 * digital connector specific) for the specified connector.
137 *
138 * Returns: The newly allocated connector state, or NULL on failure.
139 */
140struct drm_connector_state *
141intel_digital_connector_duplicate_state(struct drm_connector *connector)
142{
143 struct intel_digital_connector_state *state;
144
145 state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
146 if (!state)
147 return NULL;
148
149 __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
150 return &state->base;
151}
152
153/**
77 * intel_crtc_duplicate_state - duplicate crtc state 154 * intel_crtc_duplicate_state - duplicate crtc state
78 * @crtc: drm crtc 155 * @crtc: drm crtc
79 * 156 *
@@ -248,7 +325,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
248 } 325 }
249 326
250 /* set scaler mode */ 327 /* set scaler mode */
251 if (IS_GEMINILAKE(dev_priv)) { 328 if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
252 scaler_state->scalers[*scaler_id].mode = 0; 329 scaler_state->scalers[*scaler_id].mode = 0;
253 } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { 330 } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
254 /* 331 /*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 183afcb036aa..4e00e5cb9fa1 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -234,7 +234,7 @@ static void enable_fake_irq(struct intel_breadcrumbs *b)
234 mod_timer(&b->hangcheck, wait_timeout()); 234 mod_timer(&b->hangcheck, wait_timeout());
235} 235}
236 236
237static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) 237static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
238{ 238{
239 struct intel_engine_cs *engine = 239 struct intel_engine_cs *engine =
240 container_of(b, struct intel_engine_cs, breadcrumbs); 240 container_of(b, struct intel_engine_cs, breadcrumbs);
@@ -242,7 +242,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
242 242
243 lockdep_assert_held(&b->irq_lock); 243 lockdep_assert_held(&b->irq_lock);
244 if (b->irq_armed) 244 if (b->irq_armed)
245 return; 245 return false;
246 246
247 /* The breadcrumb irq will be disarmed on the interrupt after the 247 /* The breadcrumb irq will be disarmed on the interrupt after the
248 * waiters are signaled. This gives us a single interrupt window in 248 * waiters are signaled. This gives us a single interrupt window in
@@ -260,7 +260,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
260 * implementation to call intel_engine_wakeup() 260 * implementation to call intel_engine_wakeup()
261 * itself when it wants to simulate a user interrupt, 261 * itself when it wants to simulate a user interrupt,
262 */ 262 */
263 return; 263 return true;
264 } 264 }
265 265
266 /* Since we are waiting on a request, the GPU should be busy 266 /* Since we are waiting on a request, the GPU should be busy
@@ -278,6 +278,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
278 } 278 }
279 279
280 enable_fake_irq(b); 280 enable_fake_irq(b);
281 return true;
281} 282}
282 283
283static inline struct intel_wait *to_wait(struct rb_node *node) 284static inline struct intel_wait *to_wait(struct rb_node *node)
@@ -329,7 +330,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
329{ 330{
330 struct intel_breadcrumbs *b = &engine->breadcrumbs; 331 struct intel_breadcrumbs *b = &engine->breadcrumbs;
331 struct rb_node **p, *parent, *completed; 332 struct rb_node **p, *parent, *completed;
332 bool first; 333 bool first, armed;
333 u32 seqno; 334 u32 seqno;
334 335
335 /* Insert the request into the retirement ordered list 336 /* Insert the request into the retirement ordered list
@@ -344,6 +345,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
344 * removing stale elements in the tree, we may be able to reduce the 345 * removing stale elements in the tree, we may be able to reduce the
345 * ping-pong between the old bottom-half and ourselves as first-waiter. 346 * ping-pong between the old bottom-half and ourselves as first-waiter.
346 */ 347 */
348 armed = false;
347 first = true; 349 first = true;
348 parent = NULL; 350 parent = NULL;
349 completed = NULL; 351 completed = NULL;
@@ -399,7 +401,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
399 * in the unlocked read of b->irq_seqno_bh in the irq handler) 401 * in the unlocked read of b->irq_seqno_bh in the irq handler)
400 * and so we miss the wake up. 402 * and so we miss the wake up.
401 */ 403 */
402 __intel_breadcrumbs_enable_irq(b); 404 armed = __intel_breadcrumbs_enable_irq(b);
403 spin_unlock(&b->irq_lock); 405 spin_unlock(&b->irq_lock);
404 } 406 }
405 407
@@ -426,20 +428,24 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
426 GEM_BUG_ON(!b->irq_armed); 428 GEM_BUG_ON(!b->irq_armed);
427 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node); 429 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
428 430
429 return first; 431 return armed;
430} 432}
431 433
432bool intel_engine_add_wait(struct intel_engine_cs *engine, 434bool intel_engine_add_wait(struct intel_engine_cs *engine,
433 struct intel_wait *wait) 435 struct intel_wait *wait)
434{ 436{
435 struct intel_breadcrumbs *b = &engine->breadcrumbs; 437 struct intel_breadcrumbs *b = &engine->breadcrumbs;
436 bool first; 438 bool armed;
437 439
438 spin_lock_irq(&b->rb_lock); 440 spin_lock_irq(&b->rb_lock);
439 first = __intel_engine_add_wait(engine, wait); 441 armed = __intel_engine_add_wait(engine, wait);
440 spin_unlock_irq(&b->rb_lock); 442 spin_unlock_irq(&b->rb_lock);
443 if (armed)
444 return armed;
441 445
442 return first; 446 /* Make the caller recheck if its request has already started. */
447 return i915_seqno_passed(intel_engine_get_seqno(engine),
448 wait->seqno - 1);
443} 449}
444 450
445static inline bool chain_wakeup(struct rb_node *rb, int priority) 451static inline bool chain_wakeup(struct rb_node *rb, int priority)
@@ -672,8 +678,6 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
672{ 678{
673 struct intel_engine_cs *engine = request->engine; 679 struct intel_engine_cs *engine = request->engine;
674 struct intel_breadcrumbs *b = &engine->breadcrumbs; 680 struct intel_breadcrumbs *b = &engine->breadcrumbs;
675 struct rb_node *parent, **p;
676 bool first;
677 u32 seqno; 681 u32 seqno;
678 682
679 /* Note that we may be called from an interrupt handler on another 683 /* Note that we may be called from an interrupt handler on another
@@ -708,27 +712,36 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
708 */ 712 */
709 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait); 713 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
710 714
711 /* Now insert ourselves into the retirement ordered list of signals 715 if (!__i915_gem_request_completed(request, seqno)) {
712 * on this engine. We track the oldest seqno as that will be the 716 struct rb_node *parent, **p;
713 * first signal to complete. 717 bool first;
714 */ 718
715 parent = NULL; 719 /* Now insert ourselves into the retirement ordered list of
716 first = true; 720 * signals on this engine. We track the oldest seqno as that
717 p = &b->signals.rb_node; 721 * will be the first signal to complete.
718 while (*p) { 722 */
719 parent = *p; 723 parent = NULL;
720 if (i915_seqno_passed(seqno, 724 first = true;
721 to_signaler(parent)->signaling.wait.seqno)) { 725 p = &b->signals.rb_node;
722 p = &parent->rb_right; 726 while (*p) {
723 first = false; 727 parent = *p;
724 } else { 728 if (i915_seqno_passed(seqno,
725 p = &parent->rb_left; 729 to_signaler(parent)->signaling.wait.seqno)) {
730 p = &parent->rb_right;
731 first = false;
732 } else {
733 p = &parent->rb_left;
734 }
726 } 735 }
736 rb_link_node(&request->signaling.node, parent, p);
737 rb_insert_color(&request->signaling.node, &b->signals);
738 if (first)
739 rcu_assign_pointer(b->first_signal, request);
740 } else {
741 __intel_engine_remove_wait(engine, &request->signaling.wait);
742 i915_gem_request_put(request);
743 wakeup = false;
727 } 744 }
728 rb_link_node(&request->signaling.node, parent, p);
729 rb_insert_color(&request->signaling.node, &b->signals);
730 if (first)
731 rcu_assign_pointer(b->first_signal, request);
732 745
733 spin_unlock(&b->rb_lock); 746 spin_unlock(&b->rb_lock);
734 747
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29792972d55d..b8914db7d2e1 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1400,6 +1400,280 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
1400 bxt_set_cdclk(dev_priv, &cdclk_state); 1400 bxt_set_cdclk(dev_priv, &cdclk_state);
1401} 1401}
1402 1402
1403static int cnl_calc_cdclk(int max_pixclk)
1404{
1405 if (max_pixclk > 336000)
1406 return 528000;
1407 else if (max_pixclk > 168000)
1408 return 336000;
1409 else
1410 return 168000;
1411}
1412
1413static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
1414 struct intel_cdclk_state *cdclk_state)
1415{
1416 u32 val;
1417
1418 if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
1419 cdclk_state->ref = 24000;
1420 else
1421 cdclk_state->ref = 19200;
1422
1423 cdclk_state->vco = 0;
1424
1425 val = I915_READ(BXT_DE_PLL_ENABLE);
1426 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
1427 return;
1428
1429 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
1430 return;
1431
1432 cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
1433}
1434
1435static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
1436 struct intel_cdclk_state *cdclk_state)
1437{
1438 u32 divider;
1439 int div;
1440
1441 cnl_cdclk_pll_update(dev_priv, cdclk_state);
1442
1443 cdclk_state->cdclk = cdclk_state->ref;
1444
1445 if (cdclk_state->vco == 0)
1446 return;
1447
1448 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
1449
1450 switch (divider) {
1451 case BXT_CDCLK_CD2X_DIV_SEL_1:
1452 div = 2;
1453 break;
1454 case BXT_CDCLK_CD2X_DIV_SEL_2:
1455 div = 4;
1456 break;
1457 default:
1458 MISSING_CASE(divider);
1459 return;
1460 }
1461
1462 cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
1463}
1464
1465static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
1466{
1467 u32 val;
1468
1469 val = I915_READ(BXT_DE_PLL_ENABLE);
1470 val &= ~BXT_DE_PLL_PLL_ENABLE;
1471 I915_WRITE(BXT_DE_PLL_ENABLE, val);
1472
1473 /* Timeout 200us */
1474 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
1475 DRM_ERROR("timout waiting for CDCLK PLL unlock\n");
1476
1477 dev_priv->cdclk.hw.vco = 0;
1478}
1479
1480static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
1481{
1482 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
1483 u32 val;
1484
1485 val = CNL_CDCLK_PLL_RATIO(ratio);
1486 I915_WRITE(BXT_DE_PLL_ENABLE, val);
1487
1488 val |= BXT_DE_PLL_PLL_ENABLE;
1489 I915_WRITE(BXT_DE_PLL_ENABLE, val);
1490
1491 /* Timeout 200us */
1492 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
1493 DRM_ERROR("timout waiting for CDCLK PLL lock\n");
1494
1495 dev_priv->cdclk.hw.vco = vco;
1496}
1497
1498static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
1499 const struct intel_cdclk_state *cdclk_state)
1500{
1501 int cdclk = cdclk_state->cdclk;
1502 int vco = cdclk_state->vco;
1503 u32 val, divider, pcu_ack;
1504 int ret;
1505
1506 mutex_lock(&dev_priv->rps.hw_lock);
1507 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
1508 SKL_CDCLK_PREPARE_FOR_CHANGE,
1509 SKL_CDCLK_READY_FOR_CHANGE,
1510 SKL_CDCLK_READY_FOR_CHANGE, 3);
1511 mutex_unlock(&dev_priv->rps.hw_lock);
1512 if (ret) {
1513 DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
1514 ret);
1515 return;
1516 }
1517
1518 /* cdclk = vco / 2 / div{1,2} */
1519 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
1520 case 4:
1521 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
1522 break;
1523 case 2:
1524 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
1525 break;
1526 default:
1527 WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
1528 WARN_ON(vco != 0);
1529
1530 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
1531 break;
1532 }
1533
1534 switch (cdclk) {
1535 case 528000:
1536 pcu_ack = 2;
1537 break;
1538 case 336000:
1539 pcu_ack = 1;
1540 break;
1541 case 168000:
1542 default:
1543 pcu_ack = 0;
1544 break;
1545 }
1546
1547 if (dev_priv->cdclk.hw.vco != 0 &&
1548 dev_priv->cdclk.hw.vco != vco)
1549 cnl_cdclk_pll_disable(dev_priv);
1550
1551 if (dev_priv->cdclk.hw.vco != vco)
1552 cnl_cdclk_pll_enable(dev_priv, vco);
1553
1554 val = divider | skl_cdclk_decimal(cdclk);
1555 /*
1556 * FIXME if only the cd2x divider needs changing, it could be done
1557 * without shutting off the pipe (if only one pipe is active).
1558 */
1559 val |= BXT_CDCLK_CD2X_PIPE_NONE;
1560 I915_WRITE(CDCLK_CTL, val);
1561
1562 /* inform PCU of the change */
1563 mutex_lock(&dev_priv->rps.hw_lock);
1564 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
1565 mutex_unlock(&dev_priv->rps.hw_lock);
1566
1567 intel_update_cdclk(dev_priv);
1568}
1569
1570static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
1571{
1572 int ratio;
1573
1574 if (cdclk == dev_priv->cdclk.hw.ref)
1575 return 0;
1576
1577 switch (cdclk) {
1578 default:
1579 MISSING_CASE(cdclk);
1580 case 168000:
1581 case 336000:
1582 ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
1583 break;
1584 case 528000:
1585 ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
1586 break;
1587 }
1588
1589 return dev_priv->cdclk.hw.ref * ratio;
1590}
1591
1592static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
1593{
1594 u32 cdctl, expected;
1595
1596 intel_update_cdclk(dev_priv);
1597
1598 if (dev_priv->cdclk.hw.vco == 0 ||
1599 dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
1600 goto sanitize;
1601
1602 /* DPLL okay; verify the cdclock
1603 *
1604 * Some BIOS versions leave an incorrect decimal frequency value and
1605 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
1606 * so sanitize this register.
1607 */
1608 cdctl = I915_READ(CDCLK_CTL);
1609 /*
1610 * Let's ignore the pipe field, since BIOS could have configured the
1611 * dividers both synching to an active pipe, or asynchronously
1612 * (PIPE_NONE).
1613 */
1614 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
1615
1616 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
1617 skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
1618
1619 if (cdctl == expected)
1620 /* All well; nothing to sanitize */
1621 return;
1622
1623sanitize:
1624 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
1625
1626 /* force cdclk programming */
1627 dev_priv->cdclk.hw.cdclk = 0;
1628
1629 /* force full PLL disable + enable */
1630 dev_priv->cdclk.hw.vco = -1;
1631}
1632
1633/**
1634 * cnl_init_cdclk - Initialize CDCLK on CNL
1635 * @dev_priv: i915 device
1636 *
1637 * Initialize CDCLK for CNL. This is generally
1638 * done only during the display core initialization sequence,
1639 * after which the DMC will take care of turning CDCLK off/on
1640 * as needed.
1641 */
1642void cnl_init_cdclk(struct drm_i915_private *dev_priv)
1643{
1644 struct intel_cdclk_state cdclk_state;
1645
1646 cnl_sanitize_cdclk(dev_priv);
1647
1648 if (dev_priv->cdclk.hw.cdclk != 0 &&
1649 dev_priv->cdclk.hw.vco != 0)
1650 return;
1651
1652 cdclk_state = dev_priv->cdclk.hw;
1653
1654 cdclk_state.cdclk = cnl_calc_cdclk(0);
1655 cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
1656
1657 cnl_set_cdclk(dev_priv, &cdclk_state);
1658}
1659
1660/**
1661 * cnl_uninit_cdclk - Uninitialize CDCLK on CNL
1662 * @dev_priv: i915 device
1663 *
1664 * Uninitialize CDCLK for CNL. This is done only
1665 * during the display core uninitialization sequence.
1666 */
1667void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
1668{
1669 struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
1670
1671 cdclk_state.cdclk = cdclk_state.ref;
1672 cdclk_state.vco = 0;
1673
1674 cnl_set_cdclk(dev_priv, &cdclk_state);
1675}
1676
1403/** 1677/**
1404 * intel_cdclk_state_compare - Determine if two CDCLK states differ 1678 * intel_cdclk_state_compare - Determine if two CDCLK states differ
1405 * @a: first CDCLK state 1679 * @a: first CDCLK state
@@ -1458,7 +1732,9 @@ static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
1458 crtc_state->has_audio && 1732 crtc_state->has_audio &&
1459 crtc_state->port_clock >= 540000 && 1733 crtc_state->port_clock >= 540000 &&
1460 crtc_state->lane_count == 4) { 1734 crtc_state->lane_count == 4) {
1461 if (IS_GEMINILAKE(dev_priv)) 1735 if (IS_CANNONLAKE(dev_priv))
1736 pixel_rate = max(316800, pixel_rate);
1737 else if (IS_GEMINILAKE(dev_priv))
1462 pixel_rate = max(2 * 316800, pixel_rate); 1738 pixel_rate = max(2 * 316800, pixel_rate);
1463 else 1739 else
1464 pixel_rate = max(432000, pixel_rate); 1740 pixel_rate = max(432000, pixel_rate);
@@ -1504,7 +1780,7 @@ static int intel_max_pixel_rate(struct drm_atomic_state *state)
1504 1780
1505 pixel_rate = crtc_state->pixel_rate; 1781 pixel_rate = crtc_state->pixel_rate;
1506 1782
1507 if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) 1783 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1508 pixel_rate = 1784 pixel_rate =
1509 bdw_adjust_min_pipe_pixel_rate(crtc_state, 1785 bdw_adjust_min_pipe_pixel_rate(crtc_state,
1510 pixel_rate); 1786 pixel_rate);
@@ -1665,6 +1941,40 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
1665 return 0; 1941 return 0;
1666} 1942}
1667 1943
1944static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
1945{
1946 struct drm_i915_private *dev_priv = to_i915(state->dev);
1947 struct intel_atomic_state *intel_state =
1948 to_intel_atomic_state(state);
1949 int max_pixclk = intel_max_pixel_rate(state);
1950 int cdclk, vco;
1951
1952 cdclk = cnl_calc_cdclk(max_pixclk);
1953 vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
1954
1955 if (cdclk > dev_priv->max_cdclk_freq) {
1956 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
1957 cdclk, dev_priv->max_cdclk_freq);
1958 return -EINVAL;
1959 }
1960
1961 intel_state->cdclk.logical.vco = vco;
1962 intel_state->cdclk.logical.cdclk = cdclk;
1963
1964 if (!intel_state->active_crtcs) {
1965 cdclk = cnl_calc_cdclk(0);
1966 vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
1967
1968 intel_state->cdclk.actual.vco = vco;
1969 intel_state->cdclk.actual.cdclk = cdclk;
1970 } else {
1971 intel_state->cdclk.actual =
1972 intel_state->cdclk.logical;
1973 }
1974
1975 return 0;
1976}
1977
1668static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 1978static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
1669{ 1979{
1670 int max_cdclk_freq = dev_priv->max_cdclk_freq; 1980 int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -1696,7 +2006,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
1696 */ 2006 */
1697void intel_update_max_cdclk(struct drm_i915_private *dev_priv) 2007void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
1698{ 2008{
1699 if (IS_GEN9_BC(dev_priv)) { 2009 if (IS_CANNONLAKE(dev_priv)) {
2010 dev_priv->max_cdclk_freq = 528000;
2011 } else if (IS_GEN9_BC(dev_priv)) {
1700 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 2012 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
1701 int max_cdclk, vco; 2013 int max_cdclk, vco;
1702 2014
@@ -1780,6 +2092,30 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
1780 DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); 2092 DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
1781} 2093}
1782 2094
2095static int cnp_rawclk(struct drm_i915_private *dev_priv)
2096{
2097 u32 rawclk;
2098 int divider, fraction;
2099
2100 if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
2101 /* 24 MHz */
2102 divider = 24000;
2103 fraction = 0;
2104 } else {
2105 /* 19.2 MHz */
2106 divider = 19000;
2107 fraction = 200;
2108 }
2109
2110 rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
2111 if (fraction)
2112 rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
2113 fraction) - 1);
2114
2115 I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
2116 return divider + fraction;
2117}
2118
1783static int pch_rawclk(struct drm_i915_private *dev_priv) 2119static int pch_rawclk(struct drm_i915_private *dev_priv)
1784{ 2120{
1785 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; 2121 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
@@ -1827,7 +2163,10 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
1827 */ 2163 */
1828void intel_update_rawclk(struct drm_i915_private *dev_priv) 2164void intel_update_rawclk(struct drm_i915_private *dev_priv)
1829{ 2165{
1830 if (HAS_PCH_SPLIT(dev_priv)) 2166
2167 if (HAS_PCH_CNP(dev_priv))
2168 dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
2169 else if (HAS_PCH_SPLIT(dev_priv))
1831 dev_priv->rawclk_freq = pch_rawclk(dev_priv); 2170 dev_priv->rawclk_freq = pch_rawclk(dev_priv);
1832 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2171 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1833 dev_priv->rawclk_freq = vlv_hrawclk(dev_priv); 2172 dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
@@ -1866,9 +2205,15 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
1866 dev_priv->display.set_cdclk = skl_set_cdclk; 2205 dev_priv->display.set_cdclk = skl_set_cdclk;
1867 dev_priv->display.modeset_calc_cdclk = 2206 dev_priv->display.modeset_calc_cdclk =
1868 skl_modeset_calc_cdclk; 2207 skl_modeset_calc_cdclk;
2208 } else if (IS_CANNONLAKE(dev_priv)) {
2209 dev_priv->display.set_cdclk = cnl_set_cdclk;
2210 dev_priv->display.modeset_calc_cdclk =
2211 cnl_modeset_calc_cdclk;
1869 } 2212 }
1870 2213
1871 if (IS_GEN9_BC(dev_priv)) 2214 if (IS_CANNONLAKE(dev_priv))
2215 dev_priv->display.get_cdclk = cnl_get_cdclk;
2216 else if (IS_GEN9_BC(dev_priv))
1872 dev_priv->display.get_cdclk = skl_get_cdclk; 2217 dev_priv->display.get_cdclk = skl_get_cdclk;
1873 else if (IS_GEN9_LP(dev_priv)) 2218 else if (IS_GEN9_LP(dev_priv))
1874 dev_priv->display.get_cdclk = bxt_get_cdclk; 2219 dev_priv->display.get_cdclk = bxt_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1575bde0cf90..965988f79a55 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -37,6 +37,9 @@
37#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" 37#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
38#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 38#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
39 39
40#define I915_CSR_CNL "i915/cnl_dmc_ver1_04.bin"
41#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
42
40#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" 43#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
41MODULE_FIRMWARE(I915_CSR_KBL); 44MODULE_FIRMWARE(I915_CSR_KBL);
42#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 45#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
@@ -238,7 +241,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
238 u32 *payload = dev_priv->csr.dmc_payload; 241 u32 *payload = dev_priv->csr.dmc_payload;
239 uint32_t i, fw_size; 242 uint32_t i, fw_size;
240 243
241 if (!IS_GEN9(dev_priv)) { 244 if (!HAS_CSR(dev_priv)) {
242 DRM_ERROR("No CSR support available for this platform\n"); 245 DRM_ERROR("No CSR support available for this platform\n");
243 return; 246 return;
244 } 247 }
@@ -289,9 +292,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
289 292
290 csr->version = css_header->version; 293 csr->version = css_header->version;
291 294
292 if (IS_GEMINILAKE(dev_priv)) { 295 if (IS_CANNONLAKE(dev_priv)) {
296 required_version = CNL_CSR_VERSION_REQUIRED;
297 } else if (IS_GEMINILAKE(dev_priv)) {
293 required_version = GLK_CSR_VERSION_REQUIRED; 298 required_version = GLK_CSR_VERSION_REQUIRED;
294 } else if (IS_KABYLAKE(dev_priv)) { 299 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
295 required_version = KBL_CSR_VERSION_REQUIRED; 300 required_version = KBL_CSR_VERSION_REQUIRED;
296 } else if (IS_SKYLAKE(dev_priv)) { 301 } else if (IS_SKYLAKE(dev_priv)) {
297 required_version = SKL_CSR_VERSION_REQUIRED; 302 required_version = SKL_CSR_VERSION_REQUIRED;
@@ -438,9 +443,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
438 if (!HAS_CSR(dev_priv)) 443 if (!HAS_CSR(dev_priv))
439 return; 444 return;
440 445
441 if (IS_GEMINILAKE(dev_priv)) 446 if (IS_CANNONLAKE(dev_priv))
447 csr->fw_path = I915_CSR_CNL;
448 else if (IS_GEMINILAKE(dev_priv))
442 csr->fw_path = I915_CSR_GLK; 449 csr->fw_path = I915_CSR_GLK;
443 else if (IS_KABYLAKE(dev_priv)) 450 else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
444 csr->fw_path = I915_CSR_KBL; 451 csr->fw_path = I915_CSR_KBL;
445 else if (IS_SKYLAKE(dev_priv)) 452 else if (IS_SKYLAKE(dev_priv))
446 csr->fw_path = I915_CSR_SKL; 453 csr->fw_path = I915_CSR_SKL;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0914ad96a71b..db8093863f0c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -353,6 +353,146 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
353 { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ 353 { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
354}; 354};
355 355
356struct cnl_ddi_buf_trans {
357 u32 dw2_swing_sel;
358 u32 dw7_n_scalar;
359 u32 dw4_cursor_coeff;
360 u32 dw4_post_cursor_2;
361 u32 dw4_post_cursor_1;
362};
363
364/* Voltage Swing Programming for VccIO 0.85V for DP */
365static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
366 /* NT mV Trans mV db */
367 { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
368 { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
369 { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
370 { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
371 { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
372 { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
373 { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
374 { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
375 { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
376 { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
377};
378
379/* Voltage Swing Programming for VccIO 0.85V for HDMI */
380static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
381 /* NT mV Trans mV db */
382 { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
383 { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
384 { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
385 { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
386 { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
387 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
388 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
389};
390
391/* Voltage Swing Programming for VccIO 0.85V for eDP */
392static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
393 /* NT mV Trans mV db */
394 { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
395 { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
396 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
397 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
398 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
399 { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
400 { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
401 { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
402 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
403};
404
405/* Voltage Swing Programming for VccIO 0.95V for DP */
406static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
407 /* NT mV Trans mV db */
408 { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
409 { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
410 { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
411 { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
412 { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
413 { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
414 { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
415 { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
416 { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
417 { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
418};
419
420/* Voltage Swing Programming for VccIO 0.95V for HDMI */
421static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
422 /* NT mV Trans mV db */
423 { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
424 { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
425 { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
426 { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
427 { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
428 { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
429 { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
430 { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
431 { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
432 { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
433 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
434};
435
436/* Voltage Swing Programming for VccIO 0.95V for eDP */
437static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
438 /* NT mV Trans mV db */
439 { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
440 { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
441 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
442 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
443 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
444 { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
445 { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
446 { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
447 { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
448 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
449};
450
451/* Voltage Swing Programming for VccIO 1.05V for DP */
452static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
453 /* NT mV Trans mV db */
454 { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
455 { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
456 { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
457 { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
458 { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
459 { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
460 { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
461 { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
462 { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
463 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
464};
465
466/* Voltage Swing Programming for VccIO 1.05V for HDMI */
467static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
468 /* NT mV Trans mV db */
469 { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
470 { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
471 { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
472 { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
473 { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
474 { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
475 { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
476 { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
477 { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
478 { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
479 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
480};
481
482/* Voltage Swing Programming for VccIO 1.05V for eDP */
483static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
484 /* NT mV Trans mV db */
485 { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
486 { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
487 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
488 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
489 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
490 { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
491 { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
492 { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
493 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
494};
495
356enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder) 496enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
357{ 497{
358 switch (encoder->type) { 498 switch (encoder->type) {
@@ -404,7 +544,7 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
404 if (IS_KBL_ULX(dev_priv)) { 544 if (IS_KBL_ULX(dev_priv)) {
405 *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); 545 *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
406 return kbl_y_ddi_translations_dp; 546 return kbl_y_ddi_translations_dp;
407 } else if (IS_KBL_ULT(dev_priv)) { 547 } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
408 *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp); 548 *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
409 return kbl_u_ddi_translations_dp; 549 return kbl_u_ddi_translations_dp;
410 } else { 550 } else {
@@ -420,7 +560,8 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
420 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { 560 if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
421 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); 561 *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
422 return skl_y_ddi_translations_edp; 562 return skl_y_ddi_translations_edp;
423 } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { 563 } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
564 IS_CFL_ULT(dev_priv)) {
424 *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); 565 *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
425 return skl_u_ddi_translations_edp; 566 return skl_u_ddi_translations_edp;
426 } else { 567 } else {
@@ -429,7 +570,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
429 } 570 }
430 } 571 }
431 572
432 if (IS_KABYLAKE(dev_priv)) 573 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
433 return kbl_get_buf_trans_dp(dev_priv, n_entries); 574 return kbl_get_buf_trans_dp(dev_priv, n_entries);
434 else 575 else
435 return skl_get_buf_trans_dp(dev_priv, n_entries); 576 return skl_get_buf_trans_dp(dev_priv, n_entries);
@@ -485,7 +626,7 @@ static const struct ddi_buf_trans *
485intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv, 626intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
486 int *n_entries) 627 int *n_entries)
487{ 628{
488 if (IS_KABYLAKE(dev_priv)) { 629 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
489 return kbl_get_buf_trans_dp(dev_priv, n_entries); 630 return kbl_get_buf_trans_dp(dev_priv, n_entries);
490 } else if (IS_SKYLAKE(dev_priv)) { 631 } else if (IS_SKYLAKE(dev_priv)) {
491 return skl_get_buf_trans_dp(dev_priv, n_entries); 632 return skl_get_buf_trans_dp(dev_priv, n_entries);
@@ -505,7 +646,7 @@ static const struct ddi_buf_trans *
505intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv, 646intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
506 int *n_entries) 647 int *n_entries)
507{ 648{
508 if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) { 649 if (IS_GEN9_BC(dev_priv)) {
509 return skl_get_buf_trans_edp(dev_priv, n_entries); 650 return skl_get_buf_trans_edp(dev_priv, n_entries);
510 } else if (IS_BROADWELL(dev_priv)) { 651 } else if (IS_BROADWELL(dev_priv)) {
511 return bdw_get_buf_trans_edp(dev_priv, n_entries); 652 return bdw_get_buf_trans_edp(dev_priv, n_entries);
@@ -1478,7 +1619,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
1478 if (dp_iboost) { 1619 if (dp_iboost) {
1479 iboost = dp_iboost; 1620 iboost = dp_iboost;
1480 } else { 1621 } else {
1481 if (IS_KABYLAKE(dev_priv)) 1622 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
1482 ddi_translations = kbl_get_buf_trans_dp(dev_priv, 1623 ddi_translations = kbl_get_buf_trans_dp(dev_priv,
1483 &n_entries); 1624 &n_entries);
1484 else 1625 else
@@ -1580,6 +1721,200 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
1580 DP_TRAIN_VOLTAGE_SWING_MASK; 1721 DP_TRAIN_VOLTAGE_SWING_MASK;
1581} 1722}
1582 1723
1724static const struct cnl_ddi_buf_trans *
1725cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
1726 u32 voltage, int *n_entries)
1727{
1728 if (voltage == VOLTAGE_INFO_0_85V) {
1729 *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
1730 return cnl_ddi_translations_hdmi_0_85V;
1731 } else if (voltage == VOLTAGE_INFO_0_95V) {
1732 *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
1733 return cnl_ddi_translations_hdmi_0_95V;
1734 } else if (voltage == VOLTAGE_INFO_1_05V) {
1735 *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
1736 return cnl_ddi_translations_hdmi_1_05V;
1737 }
1738 return NULL;
1739}
1740
1741static const struct cnl_ddi_buf_trans *
1742cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv,
1743 u32 voltage, int *n_entries)
1744{
1745 if (voltage == VOLTAGE_INFO_0_85V) {
1746 *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
1747 return cnl_ddi_translations_dp_0_85V;
1748 } else if (voltage == VOLTAGE_INFO_0_95V) {
1749 *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
1750 return cnl_ddi_translations_dp_0_95V;
1751 } else if (voltage == VOLTAGE_INFO_1_05V) {
1752 *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
1753 return cnl_ddi_translations_dp_1_05V;
1754 }
1755 return NULL;
1756}
1757
1758static const struct cnl_ddi_buf_trans *
1759cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
1760 u32 voltage, int *n_entries)
1761{
1762 if (dev_priv->vbt.edp.low_vswing) {
1763 if (voltage == VOLTAGE_INFO_0_85V) {
1764 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
1765 return cnl_ddi_translations_dp_0_85V;
1766 } else if (voltage == VOLTAGE_INFO_0_95V) {
1767 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
1768 return cnl_ddi_translations_edp_0_95V;
1769 } else if (voltage == VOLTAGE_INFO_1_05V) {
1770 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
1771 return cnl_ddi_translations_edp_1_05V;
1772 }
1773 return NULL;
1774 } else {
1775 return cnl_get_buf_trans_dp(dev_priv, voltage, n_entries);
1776 }
1777}
1778
1779static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
1780 u32 level, enum port port, int type)
1781{
1782 const struct cnl_ddi_buf_trans *ddi_translations = NULL;
1783 u32 n_entries, val, voltage;
1784 int ln;
1785
1786 /*
1787 * Values for each port type are listed in
1788 * voltage swing programming tables.
1789 * Vccio voltage found in PORT_COMP_DW3.
1790 */
1791 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
1792
1793 if (type == INTEL_OUTPUT_HDMI) {
1794 ddi_translations = cnl_get_buf_trans_hdmi(dev_priv,
1795 voltage, &n_entries);
1796 } else if (type == INTEL_OUTPUT_DP) {
1797 ddi_translations = cnl_get_buf_trans_dp(dev_priv,
1798 voltage, &n_entries);
1799 } else if (type == INTEL_OUTPUT_EDP) {
1800 ddi_translations = cnl_get_buf_trans_edp(dev_priv,
1801 voltage, &n_entries);
1802 }
1803
1804 if (ddi_translations == NULL) {
1805 MISSING_CASE(voltage);
1806 return;
1807 }
1808
1809 if (level >= n_entries) {
1810 DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
1811 level = n_entries - 1;
1812 }
1813
1814 /* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
1815 val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
1816 val |= SCALING_MODE_SEL(2);
1817 I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
1818
1819 /* Program PORT_TX_DW2 */
1820 val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
1821 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
1822 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
1823 /* Rcomp scalar is fixed as 0x98 for every table entry */
1824 val |= RCOMP_SCALAR(0x98);
1825 I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
1826
1827 /* Program PORT_TX_DW4 */
1828 /* We cannot write to GRP. It would overrite individual loadgen */
1829 for (ln = 0; ln < 4; ln++) {
1830 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
1831 val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
1832 val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
1833 val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
1834 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
1835 }
1836
1837 /* Program PORT_TX_DW5 */
1838 /* All DW5 values are fixed for every table entry */
1839 val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
1840 val |= RTERM_SELECT(6);
1841 val |= TAP3_DISABLE;
1842 I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
1843
1844 /* Program PORT_TX_DW7 */
1845 val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
1846 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
1847 I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
1848}
1849
1850static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
1851{
1852 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1853 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1854 enum port port = intel_ddi_get_encoder_port(encoder);
1855 int type = encoder->type;
1856 int width = 0;
1857 int rate = 0;
1858 u32 val;
1859 int ln = 0;
1860
1861 if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
1862 width = intel_dp->lane_count;
1863 rate = intel_dp->link_rate;
1864 } else {
1865 width = 4;
1866 /* Rate is always < than 6GHz for HDMI */
1867 }
1868
1869 /*
1870 * 1. If port type is eDP or DP,
1871 * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
1872 * else clear to 0b.
1873 */
1874 val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
1875 if (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)
1876 val |= COMMON_KEEPER_EN;
1877 else
1878 val &= ~COMMON_KEEPER_EN;
1879 I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val);
1880
1881 /* 2. Program loadgen select */
1882 /*
1883 * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
1884 * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
1885 * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
1886 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
1887 */
1888 for (ln = 0; ln <= 3; ln++) {
1889 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
1890 val &= ~LOADGEN_SELECT;
1891
1892 if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
1893 ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
1894 val |= LOADGEN_SELECT;
1895 }
1896 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
1897 }
1898
1899 /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
1900 val = I915_READ(CNL_PORT_CL1CM_DW5);
1901 val |= SUS_CLOCK_CONFIG;
1902 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
1903
1904 /* 4. Clear training enable to change swing values */
1905 val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
1906 val &= ~TX_TRAINING_EN;
1907 I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
1908
1909 /* 5. Program swing and de-emphasis */
1910 cnl_ddi_vswing_program(dev_priv, level, port, type);
1911
1912 /* 6. Set training enable to trigger update */
1913 val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
1914 val |= TX_TRAINING_EN;
1915 I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
1916}
1917
1583static uint32_t translate_signal_level(int signal_levels) 1918static uint32_t translate_signal_level(int signal_levels)
1584{ 1919{
1585 int i; 1920 int i;
@@ -1612,7 +1947,11 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
1612 skl_ddi_set_iboost(encoder, level); 1947 skl_ddi_set_iboost(encoder, level);
1613 else if (IS_GEN9_LP(dev_priv)) 1948 else if (IS_GEN9_LP(dev_priv))
1614 bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); 1949 bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
1615 1950 else if (IS_CANNONLAKE(dev_priv)) {
1951 cnl_ddi_vswing_sequence(encoder, level);
1952 /* DDI_BUF_CTL bits 27:24 are reserved on CNL */
1953 return 0;
1954 }
1616 return DDI_BUF_TRANS_SELECT(level); 1955 return DDI_BUF_TRANS_SELECT(level);
1617} 1956}
1618 1957
@@ -1621,13 +1960,27 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
1621{ 1960{
1622 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1961 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1623 enum port port = intel_ddi_get_encoder_port(encoder); 1962 enum port port = intel_ddi_get_encoder_port(encoder);
1963 uint32_t val;
1624 1964
1625 if (WARN_ON(!pll)) 1965 if (WARN_ON(!pll))
1626 return; 1966 return;
1627 1967
1628 if (IS_GEN9_BC(dev_priv)) { 1968 if (IS_CANNONLAKE(dev_priv)) {
1629 uint32_t val; 1969 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
1970 val = I915_READ(DPCLKA_CFGCR0);
1971 val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
1972 I915_WRITE(DPCLKA_CFGCR0, val);
1630 1973
1974 /*
1975 * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
1976 * This step and the step before must be done with separate
1977 * register writes.
1978 */
1979 val = I915_READ(DPCLKA_CFGCR0);
1980 val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
1981 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
1982 I915_WRITE(DPCLKA_CFGCR0, val);
1983 } else if (IS_GEN9_BC(dev_priv)) {
1631 /* DDI -> PLL mapping */ 1984 /* DDI -> PLL mapping */
1632 val = I915_READ(DPLL_CTRL2); 1985 val = I915_READ(DPLL_CTRL2);
1633 1986
@@ -1696,6 +2049,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
1696 else if (IS_GEN9_LP(dev_priv)) 2049 else if (IS_GEN9_LP(dev_priv))
1697 bxt_ddi_vswing_sequence(dev_priv, level, port, 2050 bxt_ddi_vswing_sequence(dev_priv, level, port,
1698 INTEL_OUTPUT_HDMI); 2051 INTEL_OUTPUT_HDMI);
2052 else if (IS_CANNONLAKE(dev_priv))
2053 cnl_ddi_vswing_sequence(encoder, level);
1699 2054
1700 intel_hdmi->set_infoframes(drm_encoder, 2055 intel_hdmi->set_infoframes(drm_encoder,
1701 has_hdmi_sink, 2056 has_hdmi_sink,
@@ -1732,12 +2087,18 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
1732 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 2087 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
1733 enum port port = intel_ddi_get_encoder_port(intel_encoder); 2088 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1734 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2089 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2090 struct intel_dp *intel_dp = NULL;
1735 int type = intel_encoder->type; 2091 int type = intel_encoder->type;
1736 uint32_t val; 2092 uint32_t val;
1737 bool wait = false; 2093 bool wait = false;
1738 2094
1739 /* old_crtc_state and old_conn_state are NULL when called from DP_MST */ 2095 /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
1740 2096
2097 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
2098 intel_dp = enc_to_intel_dp(encoder);
2099 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2100 }
2101
1741 val = I915_READ(DDI_BUF_CTL(port)); 2102 val = I915_READ(DDI_BUF_CTL(port));
1742 if (val & DDI_BUF_CTL_ENABLE) { 2103 if (val & DDI_BUF_CTL_ENABLE) {
1743 val &= ~DDI_BUF_CTL_ENABLE; 2104 val &= ~DDI_BUF_CTL_ENABLE;
@@ -1753,9 +2114,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
1753 if (wait) 2114 if (wait)
1754 intel_wait_ddi_buf_idle(dev_priv, port); 2115 intel_wait_ddi_buf_idle(dev_priv, port);
1755 2116
1756 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { 2117 if (intel_dp) {
1757 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1758 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1759 intel_edp_panel_vdd_on(intel_dp); 2118 intel_edp_panel_vdd_on(intel_dp);
1760 intel_edp_panel_off(intel_dp); 2119 intel_edp_panel_off(intel_dp);
1761 } 2120 }
@@ -1763,7 +2122,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
1763 if (dig_port) 2122 if (dig_port)
1764 intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); 2123 intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
1765 2124
1766 if (IS_GEN9_BC(dev_priv)) 2125 if (IS_CANNONLAKE(dev_priv))
2126 I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
2127 DPCLKA_CFGCR0_DDI_CLK_OFF(port));
2128 else if (IS_GEN9_BC(dev_priv))
1767 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | 2129 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
1768 DPLL_CTRL2_DDI_CLK_OFF(port))); 2130 DPLL_CTRL2_DDI_CLK_OFF(port)));
1769 else if (INTEL_GEN(dev_priv) < 9) 2131 else if (INTEL_GEN(dev_priv) < 9)
@@ -1841,7 +2203,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
1841 if (port == PORT_A && INTEL_GEN(dev_priv) < 9) 2203 if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
1842 intel_dp_stop_link_train(intel_dp); 2204 intel_dp_stop_link_train(intel_dp);
1843 2205
1844 intel_edp_backlight_on(intel_dp); 2206 intel_edp_backlight_on(pipe_config, conn_state);
1845 intel_psr_enable(intel_dp); 2207 intel_psr_enable(intel_dp);
1846 intel_edp_drrs_enable(intel_dp, pipe_config); 2208 intel_edp_drrs_enable(intel_dp, pipe_config);
1847 } 2209 }
@@ -1871,7 +2233,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
1871 2233
1872 intel_edp_drrs_disable(intel_dp, old_crtc_state); 2234 intel_edp_drrs_disable(intel_dp, old_crtc_state);
1873 intel_psr_disable(intel_dp); 2235 intel_psr_disable(intel_dp);
1874 intel_edp_backlight_off(intel_dp); 2236 intel_edp_backlight_off(old_conn_state);
1875 } 2237 }
1876} 2238}
1877 2239
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 3718341662c2..77d3214e1a77 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -51,6 +51,8 @@ static const char * const platform_names[] = {
51 PLATFORM_NAME(BROXTON), 51 PLATFORM_NAME(BROXTON),
52 PLATFORM_NAME(KABYLAKE), 52 PLATFORM_NAME(KABYLAKE),
53 PLATFORM_NAME(GEMINILAKE), 53 PLATFORM_NAME(GEMINILAKE),
54 PLATFORM_NAME(COFFEELAKE),
55 PLATFORM_NAME(CANNONLAKE),
54}; 56};
55#undef PLATFORM_NAME 57#undef PLATFORM_NAME
56 58
@@ -183,16 +185,15 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
183 DIV_ROUND_UP(sseu->eu_total, 185 DIV_ROUND_UP(sseu->eu_total,
184 sseu_subslice_total(sseu)) : 0; 186 sseu_subslice_total(sseu)) : 0;
185 /* 187 /*
186 * SKL supports slice power gating on devices with more than 188 * SKL+ supports slice power gating on devices with more than
187 * one slice, and supports EU power gating on devices with 189 * one slice, and supports EU power gating on devices with
188 * more than one EU pair per subslice. BXT supports subslice 190 * more than one EU pair per subslice. BXT+ supports subslice
189 * power gating on devices with more than one subslice, and 191 * power gating on devices with more than one subslice, and
190 * supports EU power gating on devices with more than one EU 192 * supports EU power gating on devices with more than one EU
191 * pair per subslice. 193 * pair per subslice.
192 */ 194 */
193 sseu->has_slice_pg = 195 sseu->has_slice_pg =
194 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 196 !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
195 hweight8(sseu->slice_mask) > 1;
196 sseu->has_subslice_pg = 197 sseu->has_subslice_pg =
197 IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1; 198 IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
198 sseu->has_eu_pg = sseu->eu_per_subslice > 2; 199 sseu->has_eu_pg = sseu->eu_per_subslice > 2;
@@ -327,7 +328,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
327 * we don't expose the topmost plane at all to prevent ABI breakage 328 * we don't expose the topmost plane at all to prevent ABI breakage
328 * down the line. 329 * down the line.
329 */ 330 */
330 if (IS_GEMINILAKE(dev_priv)) 331 if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
331 for_each_pipe(dev_priv, pipe) 332 for_each_pipe(dev_priv, pipe)
332 info->num_sprites[pipe] = 3; 333 info->num_sprites[pipe] = 3;
333 else if (IS_BROXTON(dev_priv)) { 334 else if (IS_BROXTON(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ce34fb794cc0..dec9e58545a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
120static void skylake_pfit_enable(struct intel_crtc *crtc); 120static void skylake_pfit_enable(struct intel_crtc *crtc);
121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
123static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev,
124 struct drm_modeset_acquire_ctx *ctx);
124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 125static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125 126
126struct intel_limit { 127struct intel_limit {
@@ -1192,9 +1193,8 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1192 pipe); 1193 pipe);
1193 enum intel_display_power_domain power_domain; 1194 enum intel_display_power_domain power_domain;
1194 1195
1195 /* if we need the pipe quirk it must be always on */ 1196 /* we keep both pipes enabled on 830 */
1196 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1197 if (IS_I830(dev_priv))
1197 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1198 state = true; 1198 state = true;
1199 1199
1200 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1200 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
@@ -1549,6 +1549,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1550 i915_reg_t reg = DPLL(crtc->pipe); 1550 i915_reg_t reg = DPLL(crtc->pipe);
1551 u32 dpll = crtc->config->dpll_hw_state.dpll; 1551 u32 dpll = crtc->config->dpll_hw_state.dpll;
1552 int i;
1552 1553
1553 assert_pipe_disabled(dev_priv, crtc->pipe); 1554 assert_pipe_disabled(dev_priv, crtc->pipe);
1554 1555
@@ -1595,15 +1596,11 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1595 } 1596 }
1596 1597
1597 /* We do this three times for luck */ 1598 /* We do this three times for luck */
1598 I915_WRITE(reg, dpll); 1599 for (i = 0; i < 3; i++) {
1599 POSTING_READ(reg); 1600 I915_WRITE(reg, dpll);
1600 udelay(150); /* wait for warmup */ 1601 POSTING_READ(reg);
1601 I915_WRITE(reg, dpll); 1602 udelay(150); /* wait for warmup */
1602 POSTING_READ(reg); 1603 }
1603 udelay(150); /* wait for warmup */
1604 I915_WRITE(reg, dpll);
1605 POSTING_READ(reg);
1606 udelay(150); /* wait for warmup */
1607} 1604}
1608 1605
1609/** 1606/**
@@ -1631,8 +1628,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
1631 } 1628 }
1632 1629
1633 /* Don't disable pipe or pipe PLLs if needed */ 1630 /* Don't disable pipe or pipe PLLs if needed */
1634 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1631 if (IS_I830(dev_priv))
1635 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1636 return; 1632 return;
1637 1633
1638 /* Make sure the pipe isn't still relying on us */ 1634 /* Make sure the pipe isn't still relying on us */
@@ -1915,8 +1911,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1915 reg = PIPECONF(cpu_transcoder); 1911 reg = PIPECONF(cpu_transcoder);
1916 val = I915_READ(reg); 1912 val = I915_READ(reg);
1917 if (val & PIPECONF_ENABLE) { 1913 if (val & PIPECONF_ENABLE) {
1918 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1914 /* we keep both pipes enabled on 830 */
1919 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 1915 WARN_ON(!IS_I830(dev_priv));
1920 return; 1916 return;
1921 } 1917 }
1922 1918
@@ -1976,8 +1972,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
1976 val &= ~PIPECONF_DOUBLE_WIDE; 1972 val &= ~PIPECONF_DOUBLE_WIDE;
1977 1973
1978 /* Don't disable pipe or pipe PLLs if needed */ 1974 /* Don't disable pipe or pipe PLLs if needed */
1979 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 1975 if (!IS_I830(dev_priv))
1980 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1981 val &= ~PIPECONF_ENABLE; 1976 val &= ~PIPECONF_ENABLE;
1982 1977
1983 I915_WRITE(reg, val); 1978 I915_WRITE(reg, val);
@@ -3461,7 +3456,7 @@ __intel_display_resume(struct drm_device *dev,
3461 struct drm_crtc *crtc; 3456 struct drm_crtc *crtc;
3462 int i, ret; 3457 int i, ret;
3463 3458
3464 intel_modeset_setup_hw_state(dev); 3459 intel_modeset_setup_hw_state(dev, ctx);
3465 i915_redisable_vga(to_i915(dev)); 3460 i915_redisable_vga(to_i915(dev));
3466 3461
3467 if (!state) 3462 if (!state)
@@ -5838,9 +5833,14 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
5838 5833
5839 if (!dev_priv->display.initial_watermarks) 5834 if (!dev_priv->display.initial_watermarks)
5840 intel_update_watermarks(intel_crtc); 5835 intel_update_watermarks(intel_crtc);
5836
5837 /* clock the pipe down to 640x480@60 to potentially save power */
5838 if (IS_I830(dev_priv))
5839 i830_enable_pipe(dev_priv, pipe);
5841} 5840}
5842 5841
5843static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 5842static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5843 struct drm_modeset_acquire_ctx *ctx)
5844{ 5844{
5845 struct intel_encoder *encoder; 5845 struct intel_encoder *encoder;
5846 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5846 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5870,7 +5870,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
5870 return; 5870 return;
5871 } 5871 }
5872 5872
5873 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 5873 state->acquire_ctx = ctx;
5874 5874
5875 /* Everything's already locked, -EDEADLK can't happen. */ 5875 /* Everything's already locked, -EDEADLK can't happen. */
5876 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5876 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -5976,11 +5976,21 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
5976 5976
5977int intel_connector_init(struct intel_connector *connector) 5977int intel_connector_init(struct intel_connector *connector)
5978{ 5978{
5979 drm_atomic_helper_connector_reset(&connector->base); 5979 struct intel_digital_connector_state *conn_state;
5980 5980
5981 if (!connector->base.state) 5981 /*
5982 * Allocate enough memory to hold intel_digital_connector_state,
5983 * This might be a few bytes too many, but for connectors that don't
5984 * need it we'll free the state and allocate a smaller one on the first
5985 * succesful commit anyway.
5986 */
5987 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
5988 if (!conn_state)
5982 return -ENOMEM; 5989 return -ENOMEM;
5983 5990
5991 __drm_atomic_helper_connector_reset(&connector->base,
5992 &conn_state->base);
5993
5984 return 0; 5994 return 0;
5985} 5995}
5986 5996
@@ -7038,8 +7048,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7038 7048
7039 pipeconf = 0; 7049 pipeconf = 0;
7040 7050
7041 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7051 /* we keep both pipes enabled on 830 */
7042 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7052 if (IS_I830(dev_priv))
7043 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7053 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7044 7054
7045 if (intel_crtc->config->double_wide) 7055 if (intel_crtc->config->double_wide)
@@ -8864,6 +8874,22 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
8864 return 0; 8874 return 0;
8865} 8875}
8866 8876
8877static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
8878 enum port port,
8879 struct intel_crtc_state *pipe_config)
8880{
8881 enum intel_dpll_id id;
8882 u32 temp;
8883
8884 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
8885 id = temp >> (port * 2);
8886
8887 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
8888 return;
8889
8890 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
8891}
8892
8867static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 8893static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
8868 enum port port, 8894 enum port port,
8869 struct intel_crtc_state *pipe_config) 8895 struct intel_crtc_state *pipe_config)
@@ -9051,7 +9077,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9051 9077
9052 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9078 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9053 9079
9054 if (IS_GEN9_BC(dev_priv)) 9080 if (IS_CANNONLAKE(dev_priv))
9081 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9082 else if (IS_GEN9_BC(dev_priv))
9055 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9083 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9056 else if (IS_GEN9_LP(dev_priv)) 9084 else if (IS_GEN9_LP(dev_priv))
9057 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9085 bxt_get_ddi_pll(dev_priv, port, pipe_config);
@@ -11185,6 +11213,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11185 ret = skl_update_scaler_crtc(pipe_config); 11213 ret = skl_update_scaler_crtc(pipe_config);
11186 11214
11187 if (!ret) 11215 if (!ret)
11216 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11217 pipe_config);
11218 if (!ret)
11188 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 11219 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11189 pipe_config); 11220 pipe_config);
11190 } 11221 }
@@ -12206,9 +12237,8 @@ verify_crtc_state(struct drm_crtc *crtc,
12206 12237
12207 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 12238 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12208 12239
12209 /* hw state is inconsistent with the pipe quirk */ 12240 /* we keep both pipes enabled on 830 */
12210 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12241 if (IS_I830(dev_priv))
12211 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12212 active = new_crtc_state->active; 12242 active = new_crtc_state->active;
12213 12243
12214 I915_STATE_WARN(new_crtc_state->active != active, 12244 I915_STATE_WARN(new_crtc_state->active != active,
@@ -13117,8 +13147,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13117 13147
13118 drm_atomic_helper_commit_hw_done(state); 13148 drm_atomic_helper_commit_hw_done(state);
13119 13149
13120 if (intel_state->modeset) 13150 if (intel_state->modeset) {
13151 /* As one of the primary mmio accessors, KMS has a high
13152 * likelihood of triggering bugs in unclaimed access. After we
13153 * finish modesetting, see if an error has been flagged, and if
13154 * so enable debugging for the next modeset - and hope we catch
13155 * the culprit.
13156 */
13157 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13121 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13158 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13159 }
13122 13160
13123 mutex_lock(&dev->struct_mutex); 13161 mutex_lock(&dev->struct_mutex);
13124 drm_atomic_helper_cleanup_planes(dev, state); 13162 drm_atomic_helper_cleanup_planes(dev, state);
@@ -13128,19 +13166,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13128 13166
13129 drm_atomic_state_put(state); 13167 drm_atomic_state_put(state);
13130 13168
13131 /* As one of the primary mmio accessors, KMS has a high likelihood
13132 * of triggering bugs in unclaimed access. After we finish
13133 * modesetting, see if an error has been flagged, and if so
13134 * enable debugging for the next modeset - and hope we catch
13135 * the culprit.
13136 *
13137 * XXX note that we assume display power is on at this point.
13138 * This might hold true now but we need to add pm helper to check
13139 * unclaimed only when the hardware is on, as atomic commits
13140 * can happen also when the device is completely off.
13141 */
13142 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13143
13144 intel_atomic_helper_free_state(dev_priv); 13169 intel_atomic_helper_free_state(dev_priv);
13145} 13170}
13146 13171
@@ -13272,43 +13297,6 @@ static int intel_atomic_commit(struct drm_device *dev,
13272 return 0; 13297 return 0;
13273} 13298}
13274 13299
13275void intel_crtc_restore_mode(struct drm_crtc *crtc)
13276{
13277 struct drm_device *dev = crtc->dev;
13278 struct drm_atomic_state *state;
13279 struct drm_crtc_state *crtc_state;
13280 int ret;
13281
13282 state = drm_atomic_state_alloc(dev);
13283 if (!state) {
13284 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13285 crtc->base.id, crtc->name);
13286 return;
13287 }
13288
13289 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
13290
13291retry:
13292 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13293 ret = PTR_ERR_OR_ZERO(crtc_state);
13294 if (!ret) {
13295 if (!crtc_state->active)
13296 goto out;
13297
13298 crtc_state->mode_changed = true;
13299 ret = drm_atomic_commit(state);
13300 }
13301
13302 if (ret == -EDEADLK) {
13303 drm_atomic_state_clear(state);
13304 drm_modeset_backoff(state->acquire_ctx);
13305 goto retry;
13306 }
13307
13308out:
13309 drm_atomic_state_put(state);
13310}
13311
13312static const struct drm_crtc_funcs intel_crtc_funcs = { 13300static const struct drm_crtc_funcs intel_crtc_funcs = {
13313 .gamma_set = drm_atomic_helper_legacy_gamma_set, 13301 .gamma_set = drm_atomic_helper_legacy_gamma_set,
13314 .set_config = drm_atomic_helper_set_config, 13302 .set_config = drm_atomic_helper_set_config,
@@ -14749,27 +14737,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14749} 14737}
14750 14738
14751/* 14739/*
14752 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14753 * resume, or other times. This quirk makes sure that's the case for
14754 * affected systems.
14755 */
14756static void quirk_pipea_force(struct drm_device *dev)
14757{
14758 struct drm_i915_private *dev_priv = to_i915(dev);
14759
14760 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14761 DRM_INFO("applying pipe a force quirk\n");
14762}
14763
14764static void quirk_pipeb_force(struct drm_device *dev)
14765{
14766 struct drm_i915_private *dev_priv = to_i915(dev);
14767
14768 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14769 DRM_INFO("applying pipe b force quirk\n");
14770}
14771
14772/*
14773 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14740 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14774 */ 14741 */
14775static void quirk_ssc_force_disable(struct drm_device *dev) 14742static void quirk_ssc_force_disable(struct drm_device *dev)
@@ -14834,18 +14801,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14834}; 14801};
14835 14802
14836static struct intel_quirk intel_quirks[] = { 14803static struct intel_quirk intel_quirks[] = {
14837 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14838 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14839
14840 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14841 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14842
14843 /* 830 needs to leave pipe A & dpll A up */
14844 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14845
14846 /* 830 needs to leave pipe B & dpll B up */
14847 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14848
14849 /* Lenovo U160 cannot use SSC on LVDS */ 14804 /* Lenovo U160 cannot use SSC on LVDS */
14850 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14805 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14851 14806
@@ -15129,7 +15084,7 @@ int intel_modeset_init(struct drm_device *dev)
15129 intel_setup_outputs(dev_priv); 15084 intel_setup_outputs(dev_priv);
15130 15085
15131 drm_modeset_lock_all(dev); 15086 drm_modeset_lock_all(dev);
15132 intel_modeset_setup_hw_state(dev); 15087 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15133 drm_modeset_unlock_all(dev); 15088 drm_modeset_unlock_all(dev);
15134 15089
15135 for_each_intel_crtc(dev, crtc) { 15090 for_each_intel_crtc(dev, crtc) {
@@ -15166,35 +15121,89 @@ int intel_modeset_init(struct drm_device *dev)
15166 return 0; 15121 return 0;
15167} 15122}
15168 15123
15169static void intel_enable_pipe_a(struct drm_device *dev) 15124void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15170{ 15125{
15171 struct intel_connector *connector; 15126 /* 640x480@60Hz, ~25175 kHz */
15172 struct drm_connector_list_iter conn_iter; 15127 struct dpll clock = {
15173 struct drm_connector *crt = NULL; 15128 .m1 = 18,
15174 struct intel_load_detect_pipe load_detect_temp; 15129 .m2 = 7,
15175 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15130 .p1 = 13,
15176 int ret; 15131 .p2 = 4,
15132 .n = 2,
15133 };
15134 u32 dpll, fp;
15135 int i;
15177 15136
15178 /* We can't just switch on the pipe A, we need to set things up with a 15137 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15179 * proper mode and output configuration. As a gross hack, enable pipe A 15138
15180 * by enabling the load detect pipe once. */ 15139 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15181 drm_connector_list_iter_begin(dev, &conn_iter); 15140 pipe_name(pipe), clock.vco, clock.dot);
15182 for_each_intel_connector_iter(connector, &conn_iter) { 15141
15183 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15142 fp = i9xx_dpll_compute_fp(&clock);
15184 crt = &connector->base; 15143 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15185 break; 15144 DPLL_VGA_MODE_DIS |
15186 } 15145 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15146 PLL_P2_DIVIDE_BY_4 |
15147 PLL_REF_INPUT_DREFCLK |
15148 DPLL_VCO_ENABLE;
15149
15150 I915_WRITE(FP0(pipe), fp);
15151 I915_WRITE(FP1(pipe), fp);
15152
15153 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15154 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15155 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15156 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15157 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15158 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15159 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15160
15161 /*
15162 * Apparently we need to have VGA mode enabled prior to changing
15163 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15164 * dividers, even though the register value does change.
15165 */
15166 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15167 I915_WRITE(DPLL(pipe), dpll);
15168
15169 /* Wait for the clocks to stabilize. */
15170 POSTING_READ(DPLL(pipe));
15171 udelay(150);
15172
15173 /* The pixel multiplier can only be updated once the
15174 * DPLL is enabled and the clocks are stable.
15175 *
15176 * So write it again.
15177 */
15178 I915_WRITE(DPLL(pipe), dpll);
15179
15180 /* We do this three times for luck */
15181 for (i = 0; i < 3 ; i++) {
15182 I915_WRITE(DPLL(pipe), dpll);
15183 POSTING_READ(DPLL(pipe));
15184 udelay(150); /* wait for warmup */
15187 } 15185 }
15188 drm_connector_list_iter_end(&conn_iter);
15189 15186
15190 if (!crt) 15187 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15191 return; 15188 POSTING_READ(PIPECONF(pipe));
15189}
15190
15191void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15192{
15193 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15194 pipe_name(pipe));
15192 15195
15193 ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx); 15196 assert_plane_disabled(dev_priv, PLANE_A);
15194 WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n"); 15197 assert_plane_disabled(dev_priv, PLANE_B);
15195 15198
15196 if (ret > 0) 15199 I915_WRITE(PIPECONF(pipe), 0);
15197 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15200 POSTING_READ(PIPECONF(pipe));
15201
15202 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
15203 DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
15204
15205 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15206 POSTING_READ(DPLL(pipe));
15198} 15207}
15199 15208
15200static bool 15209static bool
@@ -15244,7 +15253,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15244 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15253 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
15245} 15254}
15246 15255
15247static void intel_sanitize_crtc(struct intel_crtc *crtc) 15256static void intel_sanitize_crtc(struct intel_crtc *crtc,
15257 struct drm_modeset_acquire_ctx *ctx)
15248{ 15258{
15249 struct drm_device *dev = crtc->base.dev; 15259 struct drm_device *dev = crtc->base.dev;
15250 struct drm_i915_private *dev_priv = to_i915(dev); 15260 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15290,23 +15300,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15290 plane = crtc->plane; 15300 plane = crtc->plane;
15291 crtc->base.primary->state->visible = true; 15301 crtc->base.primary->state->visible = true;
15292 crtc->plane = !plane; 15302 crtc->plane = !plane;
15293 intel_crtc_disable_noatomic(&crtc->base); 15303 intel_crtc_disable_noatomic(&crtc->base, ctx);
15294 crtc->plane = plane; 15304 crtc->plane = plane;
15295 } 15305 }
15296 15306
15297 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15298 crtc->pipe == PIPE_A && !crtc->active) {
15299 /* BIOS forgot to enable pipe A, this mostly happens after
15300 * resume. Force-enable the pipe to fix this, the update_dpms
15301 * call below we restore the pipe to the right state, but leave
15302 * the required bits on. */
15303 intel_enable_pipe_a(dev);
15304 }
15305
15306 /* Adjust the state of the output pipe according to whether we 15307 /* Adjust the state of the output pipe according to whether we
15307 * have active connectors/encoders. */ 15308 * have active connectors/encoders. */
15308 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15309 if (crtc->active && !intel_crtc_has_encoders(crtc))
15309 intel_crtc_disable_noatomic(&crtc->base); 15310 intel_crtc_disable_noatomic(&crtc->base, ctx);
15310 15311
15311 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15312 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15312 /* 15313 /*
@@ -15603,7 +15604,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
15603 * and sanitizes it to the current state 15604 * and sanitizes it to the current state
15604 */ 15605 */
15605static void 15606static void
15606intel_modeset_setup_hw_state(struct drm_device *dev) 15607intel_modeset_setup_hw_state(struct drm_device *dev,
15608 struct drm_modeset_acquire_ctx *ctx)
15607{ 15609{
15608 struct drm_i915_private *dev_priv = to_i915(dev); 15610 struct drm_i915_private *dev_priv = to_i915(dev);
15609 enum pipe pipe; 15611 enum pipe pipe;
@@ -15623,7 +15625,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
15623 for_each_pipe(dev_priv, pipe) { 15625 for_each_pipe(dev_priv, pipe) {
15624 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15626 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15625 15627
15626 intel_sanitize_crtc(crtc); 15628 intel_sanitize_crtc(crtc, ctx);
15627 intel_dump_pipe_config(crtc, crtc->config, 15629 intel_dump_pipe_config(crtc, crtc->config,
15628 "[setup_hw_state]"); 15630 "[setup_hw_state]");
15629 } 15631 }
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index de4b1f2d367d..64fa774c855b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -798,7 +798,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
798 regs->pp_stat = PP_STATUS(pps_idx); 798 regs->pp_stat = PP_STATUS(pps_idx);
799 regs->pp_on = PP_ON_DELAYS(pps_idx); 799 regs->pp_on = PP_ON_DELAYS(pps_idx);
800 regs->pp_off = PP_OFF_DELAYS(pps_idx); 800 regs->pp_off = PP_OFF_DELAYS(pps_idx);
801 if (!IS_GEN9_LP(dev_priv)) 801 if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv))
802 regs->pp_div = PP_DIVISOR(pps_idx); 802 regs->pp_div = PP_DIVISOR(pps_idx);
803} 803}
804 804
@@ -1548,17 +1548,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
1548 DRM_DEBUG_KMS("common rates: %s\n", str); 1548 DRM_DEBUG_KMS("common rates: %s\n", str);
1549} 1549}
1550 1550
1551static int rate_to_index(int find, const int *rates)
1552{
1553 int i = 0;
1554
1555 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1556 if (find == rates[i])
1557 break;
1558
1559 return i;
1560}
1561
1562int 1551int
1563intel_dp_max_link_rate(struct intel_dp *intel_dp) 1552intel_dp_max_link_rate(struct intel_dp *intel_dp)
1564{ 1553{
@@ -1628,6 +1617,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1628 enum port port = dp_to_dig_port(intel_dp)->port; 1617 enum port port = dp_to_dig_port(intel_dp)->port;
1629 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 1618 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1630 struct intel_connector *intel_connector = intel_dp->attached_connector; 1619 struct intel_connector *intel_connector = intel_dp->attached_connector;
1620 struct intel_digital_connector_state *intel_conn_state =
1621 to_intel_digital_connector_state(conn_state);
1631 int lane_count, clock; 1622 int lane_count, clock;
1632 int min_lane_count = 1; 1623 int min_lane_count = 1;
1633 int max_lane_count = intel_dp_max_lane_count(intel_dp); 1624 int max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -1653,7 +1644,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1653 pipe_config->has_pch_encoder = true; 1644 pipe_config->has_pch_encoder = true;
1654 1645
1655 pipe_config->has_drrs = false; 1646 pipe_config->has_drrs = false;
1656 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; 1647 if (port == PORT_A)
1648 pipe_config->has_audio = false;
1649 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1650 pipe_config->has_audio = intel_dp->has_audio;
1651 else
1652 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1657 1653
1658 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1654 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1659 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1655 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -1668,10 +1664,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1668 1664
1669 if (HAS_GMCH_DISPLAY(dev_priv)) 1665 if (HAS_GMCH_DISPLAY(dev_priv))
1670 intel_gmch_panel_fitting(intel_crtc, pipe_config, 1666 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1671 intel_connector->panel.fitting_mode); 1667 conn_state->scaling_mode);
1672 else 1668 else
1673 intel_pch_panel_fitting(intel_crtc, pipe_config, 1669 intel_pch_panel_fitting(intel_crtc, pipe_config,
1674 intel_connector->panel.fitting_mode); 1670 conn_state->scaling_mode);
1675 } 1671 }
1676 1672
1677 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1673 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
@@ -1740,7 +1736,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1740 return false; 1736 return false;
1741 1737
1742found: 1738found:
1743 if (intel_dp->color_range_auto) { 1739 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1744 /* 1740 /*
1745 * See: 1741 * See:
1746 * CEA-861-E - 5.1 Default Encoding Parameters 1742 * CEA-861-E - 5.1 Default Encoding Parameters
@@ -1752,7 +1748,7 @@ found:
1752 HDMI_QUANTIZATION_RANGE_LIMITED; 1748 HDMI_QUANTIZATION_RANGE_LIMITED;
1753 } else { 1749 } else {
1754 pipe_config->limited_color_range = 1750 pipe_config->limited_color_range =
1755 intel_dp->limited_color_range; 1751 intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
1756 } 1752 }
1757 1753
1758 pipe_config->lane_count = lane_count; 1754 pipe_config->lane_count = lane_count;
@@ -2315,14 +2311,17 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2315} 2311}
2316 2312
2317/* Enable backlight PWM and backlight PP control. */ 2313/* Enable backlight PWM and backlight PP control. */
2318void intel_edp_backlight_on(struct intel_dp *intel_dp) 2314void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2315 const struct drm_connector_state *conn_state)
2319{ 2316{
2317 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2318
2320 if (!is_edp(intel_dp)) 2319 if (!is_edp(intel_dp))
2321 return; 2320 return;
2322 2321
2323 DRM_DEBUG_KMS("\n"); 2322 DRM_DEBUG_KMS("\n");
2324 2323
2325 intel_panel_enable_backlight(intel_dp->attached_connector); 2324 intel_panel_enable_backlight(crtc_state, conn_state);
2326 _intel_edp_backlight_on(intel_dp); 2325 _intel_edp_backlight_on(intel_dp);
2327} 2326}
2328 2327
@@ -2354,15 +2353,17 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2354} 2353}
2355 2354
2356/* Disable backlight PP control and backlight PWM. */ 2355/* Disable backlight PP control and backlight PWM. */
2357void intel_edp_backlight_off(struct intel_dp *intel_dp) 2356void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2358{ 2357{
2358 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2359
2359 if (!is_edp(intel_dp)) 2360 if (!is_edp(intel_dp))
2360 return; 2361 return;
2361 2362
2362 DRM_DEBUG_KMS("\n"); 2363 DRM_DEBUG_KMS("\n");
2363 2364
2364 _intel_edp_backlight_off(intel_dp); 2365 _intel_edp_backlight_off(intel_dp);
2365 intel_panel_disable_backlight(intel_dp->attached_connector); 2366 intel_panel_disable_backlight(old_conn_state);
2366} 2367}
2367 2368
2368/* 2369/*
@@ -2658,7 +2659,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
2658 /* Make sure the panel is off before trying to change the mode. But also 2659 /* Make sure the panel is off before trying to change the mode. But also
2659 * ensure that we have vdd while we switch off the panel. */ 2660 * ensure that we have vdd while we switch off the panel. */
2660 intel_edp_panel_vdd_on(intel_dp); 2661 intel_edp_panel_vdd_on(intel_dp);
2661 intel_edp_backlight_off(intel_dp); 2662 intel_edp_backlight_off(old_conn_state);
2662 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2663 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2663 intel_edp_panel_off(intel_dp); 2664 intel_edp_panel_off(intel_dp);
2664 2665
@@ -2872,10 +2873,8 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
2872 struct intel_crtc_state *pipe_config, 2873 struct intel_crtc_state *pipe_config,
2873 struct drm_connector_state *conn_state) 2874 struct drm_connector_state *conn_state)
2874{ 2875{
2875 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2876
2877 intel_enable_dp(encoder, pipe_config, conn_state); 2876 intel_enable_dp(encoder, pipe_config, conn_state);
2878 intel_edp_backlight_on(intel_dp); 2877 intel_edp_backlight_on(pipe_config, conn_state);
2879} 2878}
2880 2879
2881static void vlv_enable_dp(struct intel_encoder *encoder, 2880static void vlv_enable_dp(struct intel_encoder *encoder,
@@ -2884,7 +2883,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
2884{ 2883{
2885 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2884 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2886 2885
2887 intel_edp_backlight_on(intel_dp); 2886 intel_edp_backlight_on(pipe_config, conn_state);
2888 intel_psr_enable(intel_dp); 2887 intel_psr_enable(intel_dp);
2889} 2888}
2890 2889
@@ -3466,7 +3465,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3466 if (HAS_DDI(dev_priv)) { 3465 if (HAS_DDI(dev_priv)) {
3467 signal_levels = ddi_signal_levels(intel_dp); 3466 signal_levels = ddi_signal_levels(intel_dp);
3468 3467
3469 if (IS_GEN9_LP(dev_priv)) 3468 if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv))
3470 signal_levels = 0; 3469 signal_levels = 0;
3471 else 3470 else
3472 mask = DDI_BUF_EMP_MASK; 3471 mask = DDI_BUF_EMP_MASK;
@@ -4587,10 +4586,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
4587 edid = intel_dp_get_edid(intel_dp); 4586 edid = intel_dp_get_edid(intel_dp);
4588 intel_connector->detect_edid = edid; 4587 intel_connector->detect_edid = edid;
4589 4588
4590 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) 4589 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4591 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4592 else
4593 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4594} 4590}
4595 4591
4596static void 4592static void
@@ -4799,112 +4795,6 @@ static int intel_dp_get_modes(struct drm_connector *connector)
4799 return 0; 4795 return 0;
4800} 4796}
4801 4797
4802static bool
4803intel_dp_detect_audio(struct drm_connector *connector)
4804{
4805 bool has_audio = false;
4806 struct edid *edid;
4807
4808 edid = to_intel_connector(connector)->detect_edid;
4809 if (edid)
4810 has_audio = drm_detect_monitor_audio(edid);
4811
4812 return has_audio;
4813}
4814
4815static int
4816intel_dp_set_property(struct drm_connector *connector,
4817 struct drm_property *property,
4818 uint64_t val)
4819{
4820 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4821 struct intel_connector *intel_connector = to_intel_connector(connector);
4822 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4823 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4824 int ret;
4825
4826 ret = drm_object_property_set_value(&connector->base, property, val);
4827 if (ret)
4828 return ret;
4829
4830 if (property == dev_priv->force_audio_property) {
4831 int i = val;
4832 bool has_audio;
4833
4834 if (i == intel_dp->force_audio)
4835 return 0;
4836
4837 intel_dp->force_audio = i;
4838
4839 if (i == HDMI_AUDIO_AUTO)
4840 has_audio = intel_dp_detect_audio(connector);
4841 else
4842 has_audio = (i == HDMI_AUDIO_ON);
4843
4844 if (has_audio == intel_dp->has_audio)
4845 return 0;
4846
4847 intel_dp->has_audio = has_audio;
4848 goto done;
4849 }
4850
4851 if (property == dev_priv->broadcast_rgb_property) {
4852 bool old_auto = intel_dp->color_range_auto;
4853 bool old_range = intel_dp->limited_color_range;
4854
4855 switch (val) {
4856 case INTEL_BROADCAST_RGB_AUTO:
4857 intel_dp->color_range_auto = true;
4858 break;
4859 case INTEL_BROADCAST_RGB_FULL:
4860 intel_dp->color_range_auto = false;
4861 intel_dp->limited_color_range = false;
4862 break;
4863 case INTEL_BROADCAST_RGB_LIMITED:
4864 intel_dp->color_range_auto = false;
4865 intel_dp->limited_color_range = true;
4866 break;
4867 default:
4868 return -EINVAL;
4869 }
4870
4871 if (old_auto == intel_dp->color_range_auto &&
4872 old_range == intel_dp->limited_color_range)
4873 return 0;
4874
4875 goto done;
4876 }
4877
4878 if (is_edp(intel_dp) &&
4879 property == connector->dev->mode_config.scaling_mode_property) {
4880 if (val == DRM_MODE_SCALE_NONE) {
4881 DRM_DEBUG_KMS("no scaling not supported\n");
4882 return -EINVAL;
4883 }
4884 if (HAS_GMCH_DISPLAY(dev_priv) &&
4885 val == DRM_MODE_SCALE_CENTER) {
4886 DRM_DEBUG_KMS("centering not supported\n");
4887 return -EINVAL;
4888 }
4889
4890 if (intel_connector->panel.fitting_mode == val) {
4891 /* the eDP scaling property is not changed */
4892 return 0;
4893 }
4894 intel_connector->panel.fitting_mode = val;
4895
4896 goto done;
4897 }
4898
4899 return -EINVAL;
4900
4901done:
4902 if (intel_encoder->base.crtc)
4903 intel_crtc_restore_mode(intel_encoder->base.crtc);
4904
4905 return 0;
4906}
4907
4908static int 4798static int
4909intel_dp_connector_register(struct drm_connector *connector) 4799intel_dp_connector_register(struct drm_connector *connector)
4910{ 4800{
@@ -5063,19 +4953,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
5063 .dpms = drm_atomic_helper_connector_dpms, 4953 .dpms = drm_atomic_helper_connector_dpms,
5064 .force = intel_dp_force, 4954 .force = intel_dp_force,
5065 .fill_modes = drm_helper_probe_single_connector_modes, 4955 .fill_modes = drm_helper_probe_single_connector_modes,
5066 .set_property = intel_dp_set_property, 4956 .set_property = drm_atomic_helper_connector_set_property,
5067 .atomic_get_property = intel_connector_atomic_get_property, 4957 .atomic_get_property = intel_digital_connector_atomic_get_property,
4958 .atomic_set_property = intel_digital_connector_atomic_set_property,
5068 .late_register = intel_dp_connector_register, 4959 .late_register = intel_dp_connector_register,
5069 .early_unregister = intel_dp_connector_unregister, 4960 .early_unregister = intel_dp_connector_unregister,
5070 .destroy = intel_dp_connector_destroy, 4961 .destroy = intel_dp_connector_destroy,
5071 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4962 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5072 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 4963 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5073}; 4964};
5074 4965
5075static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4966static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5076 .detect_ctx = intel_dp_detect, 4967 .detect_ctx = intel_dp_detect,
5077 .get_modes = intel_dp_get_modes, 4968 .get_modes = intel_dp_get_modes,
5078 .mode_valid = intel_dp_mode_valid, 4969 .mode_valid = intel_dp_mode_valid,
4970 .atomic_check = intel_digital_connector_atomic_check,
5079}; 4971};
5080 4972
5081static const struct drm_encoder_funcs intel_dp_enc_funcs = { 4973static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -5169,19 +5061,22 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
5169static void 5061static void
5170intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 5062intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5171{ 5063{
5172 struct intel_connector *intel_connector = to_intel_connector(connector); 5064 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5173 5065
5174 intel_attach_force_audio_property(connector); 5066 intel_attach_force_audio_property(connector);
5175 intel_attach_broadcast_rgb_property(connector); 5067 intel_attach_broadcast_rgb_property(connector);
5176 intel_dp->color_range_auto = true;
5177 5068
5178 if (is_edp(intel_dp)) { 5069 if (is_edp(intel_dp)) {
5179 drm_mode_create_scaling_mode_property(connector->dev); 5070 u32 allowed_scalers;
5180 drm_object_attach_property( 5071
5181 &connector->base, 5072 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
5182 connector->dev->mode_config.scaling_mode_property, 5073 if (!HAS_GMCH_DISPLAY(dev_priv))
5183 DRM_MODE_SCALE_ASPECT); 5074 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
5184 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 5075
5076 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
5077
5078 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5079
5185 } 5080 }
5186} 5081}
5187 5082
@@ -5207,7 +5102,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5207 5102
5208 pp_on = I915_READ(regs.pp_on); 5103 pp_on = I915_READ(regs.pp_on);
5209 pp_off = I915_READ(regs.pp_off); 5104 pp_off = I915_READ(regs.pp_off);
5210 if (!IS_GEN9_LP(dev_priv)) { 5105 if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) {
5211 I915_WRITE(regs.pp_ctrl, pp_ctl); 5106 I915_WRITE(regs.pp_ctrl, pp_ctl);
5212 pp_div = I915_READ(regs.pp_div); 5107 pp_div = I915_READ(regs.pp_div);
5213 } 5108 }
@@ -5225,7 +5120,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5225 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 5120 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5226 PANEL_POWER_DOWN_DELAY_SHIFT; 5121 PANEL_POWER_DOWN_DELAY_SHIFT;
5227 5122
5228 if (IS_GEN9_LP(dev_priv)) { 5123 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
5229 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> 5124 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5230 BXT_POWER_CYCLE_DELAY_SHIFT; 5125 BXT_POWER_CYCLE_DELAY_SHIFT;
5231 if (tmp > 0) 5126 if (tmp > 0)
@@ -5382,7 +5277,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5382 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 5277 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5383 /* Compute the divisor for the pp clock, simply match the Bspec 5278 /* Compute the divisor for the pp clock, simply match the Bspec
5384 * formula. */ 5279 * formula. */
5385 if (IS_GEN9_LP(dev_priv)) { 5280 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
5386 pp_div = I915_READ(regs.pp_ctrl); 5281 pp_div = I915_READ(regs.pp_ctrl);
5387 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; 5282 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5388 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) 5283 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5408,7 +5303,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5408 5303
5409 I915_WRITE(regs.pp_on, pp_on); 5304 I915_WRITE(regs.pp_on, pp_on);
5410 I915_WRITE(regs.pp_off, pp_off); 5305 I915_WRITE(regs.pp_off, pp_off);
5411 if (IS_GEN9_LP(dev_priv)) 5306 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
5412 I915_WRITE(regs.pp_ctrl, pp_div); 5307 I915_WRITE(regs.pp_ctrl, pp_div);
5413 else 5308 else
5414 I915_WRITE(regs.pp_div, pp_div); 5309 I915_WRITE(regs.pp_div, pp_div);
@@ -5416,7 +5311,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5416 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 5311 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5417 I915_READ(regs.pp_on), 5312 I915_READ(regs.pp_on),
5418 I915_READ(regs.pp_off), 5313 I915_READ(regs.pp_off),
5419 IS_GEN9_LP(dev_priv) ? 5314 (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) ?
5420 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : 5315 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5421 I915_READ(regs.pp_div)); 5316 I915_READ(regs.pp_div));
5422} 5317}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index a0995c00fc84..228ca06d9f0b 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -78,8 +78,9 @@ static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
78 * 8-bit or 16 bit value (MSB and LSB) 78 * 8-bit or 16 bit value (MSB and LSB)
79 */ 79 */
80static void 80static void
81intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level) 81intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
82{ 82{
83 struct intel_connector *connector = to_intel_connector(conn_state->connector);
83 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 84 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
84 uint8_t vals[2] = { 0x0 }; 85 uint8_t vals[2] = { 0x0 };
85 86
@@ -97,8 +98,10 @@ intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
97 } 98 }
98} 99}
99 100
100static void intel_dp_aux_enable_backlight(struct intel_connector *connector) 101static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
102 const struct drm_connector_state *conn_state)
101{ 103{
104 struct intel_connector *connector = to_intel_connector(conn_state->connector);
102 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 105 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
103 uint8_t dpcd_buf = 0; 106 uint8_t dpcd_buf = 0;
104 uint8_t edp_backlight_mode = 0; 107 uint8_t edp_backlight_mode = 0;
@@ -131,12 +134,12 @@ static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
131 } 134 }
132 135
133 set_aux_backlight_enable(intel_dp, true); 136 set_aux_backlight_enable(intel_dp, true);
134 intel_dp_aux_set_backlight(connector, connector->panel.backlight.level); 137 intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
135} 138}
136 139
137static void intel_dp_aux_disable_backlight(struct intel_connector *connector) 140static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
138{ 141{
139 set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false); 142 set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
140} 143}
141 144
142static int intel_dp_aux_setup_backlight(struct intel_connector *connector, 145static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
@@ -145,8 +148,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
145 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 148 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
146 struct intel_panel *panel = &connector->panel; 149 struct intel_panel *panel = &connector->panel;
147 150
148 intel_dp_aux_enable_backlight(connector);
149
150 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) 151 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
151 panel->backlight.max = 0xFFFF; 152 panel->backlight.max = 0xFFFF;
152 else 153 else
@@ -165,7 +166,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
165{ 166{
166 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 167 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
167 168
168 /* Check the eDP Display control capabilities registers to determine if 169 /* Check the eDP Display control capabilities registers to determine if
169 * the panel can support backlight control over the aux channel 170 * the panel can support backlight control over the aux channel
170 */ 171 */
171 if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && 172 if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index b4de632f1158..2f7b0e64f628 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1321,7 +1321,6 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
1321 return true; 1321 return true;
1322} 1322}
1323 1323
1324
1325static bool 1324static bool
1326skl_ddi_dp_set_dpll_hw_state(int clock, 1325skl_ddi_dp_set_dpll_hw_state(int clock,
1327 struct intel_dpll_hw_state *dpll_hw_state) 1326 struct intel_dpll_hw_state *dpll_hw_state)
@@ -1967,6 +1966,438 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
1967 .dump_hw_state = bxt_dump_hw_state, 1966 .dump_hw_state = bxt_dump_hw_state,
1968}; 1967};
1969 1968
1969static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1970 struct intel_shared_dpll *pll)
1971{
1972 uint32_t val;
1973
1974 /* 1. Enable DPLL power in DPLL_ENABLE. */
1975 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
1976 val |= PLL_POWER_ENABLE;
1977 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
1978
1979 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
1980 if (intel_wait_for_register(dev_priv,
1981 CNL_DPLL_ENABLE(pll->id),
1982 PLL_POWER_STATE,
1983 PLL_POWER_STATE,
1984 5))
1985 DRM_ERROR("PLL %d Power not enabled\n", pll->id);
1986
1987 /*
1988 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
1989 * select DP mode, and set DP link rate.
1990 */
1991 val = pll->state.hw_state.cfgcr0;
1992 I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val);
1993
1994 /* 4. Reab back to ensure writes completed */
1995 POSTING_READ(CNL_DPLL_CFGCR0(pll->id));
1996
1997 /* 3. Configure DPLL_CFGCR0 */
1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1999 if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
2000 val = pll->state.hw_state.cfgcr1;
2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
2002 /* 4. Reab back to ensure writes completed */
2003 POSTING_READ(CNL_DPLL_CFGCR1(pll->id));
2004 }
2005
2006 /*
2007 * 5. If the frequency will result in a change to the voltage
2008 * requirement, follow the Display Voltage Frequency Switching
2009 * Sequence Before Frequency Change
2010 *
2011 * FIXME: (DVFS) is used to adjust the display voltage to match the
2012 * display clock frequencies
2013 */
2014
2015 /* 6. Enable DPLL in DPLL_ENABLE. */
2016 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2017 val |= PLL_ENABLE;
2018 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
2019
2020 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2021 if (intel_wait_for_register(dev_priv,
2022 CNL_DPLL_ENABLE(pll->id),
2023 PLL_LOCK,
2024 PLL_LOCK,
2025 5))
2026 DRM_ERROR("PLL %d not locked\n", pll->id);
2027
2028 /*
2029 * 8. If the frequency will result in a change to the voltage
2030 * requirement, follow the Display Voltage Frequency Switching
2031 * Sequence After Frequency Change
2032 *
2033 * FIXME: (DVFS) is used to adjust the display voltage to match the
2034 * display clock frequencies
2035 */
2036
2037 /*
2038 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2039 * Done at intel_ddi_clk_select
2040 */
2041}
2042
2043static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2044 struct intel_shared_dpll *pll)
2045{
2046 uint32_t val;
2047
2048 /*
2049 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2050 * Done at intel_ddi_post_disable
2051 */
2052
2053 /*
2054 * 2. If the frequency will result in a change to the voltage
2055 * requirement, follow the Display Voltage Frequency Switching
2056 * Sequence Before Frequency Change
2057 *
2058 * FIXME: (DVFS) is used to adjust the display voltage to match the
2059 * display clock frequencies
2060 */
2061
2062 /* 3. Disable DPLL through DPLL_ENABLE. */
2063 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2064 val &= ~PLL_ENABLE;
2065 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
2066
2067 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2068 if (intel_wait_for_register(dev_priv,
2069 CNL_DPLL_ENABLE(pll->id),
2070 PLL_LOCK,
2071 0,
2072 5))
2073 DRM_ERROR("PLL %d locked\n", pll->id);
2074
2075 /*
2076 * 5. If the frequency will result in a change to the voltage
2077 * requirement, follow the Display Voltage Frequency Switching
2078 * Sequence After Frequency Change
2079 *
2080 * FIXME: (DVFS) is used to adjust the display voltage to match the
2081 * display clock frequencies
2082 */
2083
2084 /* 6. Disable DPLL power in DPLL_ENABLE. */
2085 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2086 val &= ~PLL_POWER_ENABLE;
2087 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
2088
2089 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2090 if (intel_wait_for_register(dev_priv,
2091 CNL_DPLL_ENABLE(pll->id),
2092 PLL_POWER_STATE,
2093 0,
2094 5))
2095 DRM_ERROR("PLL %d Power not disabled\n", pll->id);
2096}
2097
2098static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2099 struct intel_shared_dpll *pll,
2100 struct intel_dpll_hw_state *hw_state)
2101{
2102 uint32_t val;
2103 bool ret;
2104
2105 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2106 return false;
2107
2108 ret = false;
2109
2110 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2111 if (!(val & PLL_ENABLE))
2112 goto out;
2113
2114 val = I915_READ(CNL_DPLL_CFGCR0(pll->id));
2115 hw_state->cfgcr0 = val;
2116
2117 /* avoid reading back stale values if HDMI mode is not enabled */
2118 if (val & DPLL_CFGCR0_HDMI_MODE) {
2119 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id));
2120 }
2121 ret = true;
2122
2123out:
2124 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2125
2126 return ret;
2127}
2128
2129static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
2130 unsigned int *pdiv,
2131 unsigned int *qdiv,
2132 unsigned int *kdiv)
2133{
2134 /* even dividers */
2135 if (bestdiv % 2 == 0) {
2136 if (bestdiv == 2) {
2137 *pdiv = 2;
2138 *qdiv = 1;
2139 *kdiv = 1;
2140 } else if (bestdiv % 4 == 0) {
2141 *pdiv = 2;
2142 *qdiv = bestdiv / 4;
2143 *kdiv = 2;
2144 } else if (bestdiv % 6 == 0) {
2145 *pdiv = 3;
2146 *qdiv = bestdiv / 6;
2147 *kdiv = 2;
2148 } else if (bestdiv % 5 == 0) {
2149 *pdiv = 5;
2150 *qdiv = bestdiv / 10;
2151 *kdiv = 2;
2152 } else if (bestdiv % 14 == 0) {
2153 *pdiv = 7;
2154 *qdiv = bestdiv / 14;
2155 *kdiv = 2;
2156 }
2157 } else {
2158 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2159 *pdiv = bestdiv;
2160 *qdiv = 1;
2161 *kdiv = 1;
2162 } else { /* 9, 15, 21 */
2163 *pdiv = bestdiv / 3;
2164 *qdiv = 1;
2165 *kdiv = 3;
2166 }
2167 }
2168}
2169
2170static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t dco_freq,
2171 uint32_t ref_freq, uint32_t pdiv, uint32_t qdiv,
2172 uint32_t kdiv)
2173{
2174 switch (kdiv) {
2175 case 1:
2176 params->kdiv = 1;
2177 break;
2178 case 2:
2179 params->kdiv = 2;
2180 break;
2181 case 3:
2182 params->kdiv = 4;
2183 break;
2184 default:
2185 WARN(1, "Incorrect KDiv\n");
2186 }
2187
2188 switch (pdiv) {
2189 case 2:
2190 params->pdiv = 1;
2191 break;
2192 case 3:
2193 params->pdiv = 2;
2194 break;
2195 case 5:
2196 params->pdiv = 4;
2197 break;
2198 case 7:
2199 params->pdiv = 8;
2200 break;
2201 default:
2202 WARN(1, "Incorrect PDiv\n");
2203 }
2204
2205 if (kdiv != 2)
2206 qdiv = 1;
2207
2208 params->qdiv_ratio = qdiv;
2209 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2210
2211 params->dco_integer = div_u64(dco_freq, ref_freq);
2212 params->dco_fraction = div_u64((div_u64((uint64_t)dco_freq<<15, (uint64_t)ref_freq) -
2213 ((uint64_t)params->dco_integer<<15)) * 0x8000, 0x8000);
2214}
2215
2216static bool
2217cnl_ddi_calculate_wrpll(int clock /* in Hz */,
2218 struct drm_i915_private *dev_priv,
2219 struct skl_wrpll_params *wrpll_params)
2220{
2221 uint64_t afe_clock = clock * 5 / KHz(1); /* clocks in kHz */
2222 unsigned int dco_min = 7998 * KHz(1);
2223 unsigned int dco_max = 10000 * KHz(1);
2224 unsigned int dco_mid = (dco_min + dco_max) / 2;
2225
2226 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2227 18, 20, 24, 28, 30, 32, 36, 40,
2228 42, 44, 48, 50, 52, 54, 56, 60,
2229 64, 66, 68, 70, 72, 76, 78, 80,
2230 84, 88, 90, 92, 96, 98, 100, 102,
2231 3, 5, 7, 9, 15, 21 };
2232 unsigned int d, dco;
2233 unsigned int dco_centrality = 0;
2234 unsigned int best_dco_centrality = 999999;
2235 unsigned int best_div = 0;
2236 unsigned int best_dco = 0;
2237 unsigned int pdiv = 0, qdiv = 0, kdiv = 0;
2238
2239 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2240 dco = afe_clock * dividers[d];
2241
2242 if ((dco <= dco_max) && (dco >= dco_min)) {
2243 dco_centrality = abs(dco - dco_mid);
2244
2245 if (dco_centrality < best_dco_centrality) {
2246 best_dco_centrality = dco_centrality;
2247 best_div = dividers[d];
2248 best_dco = dco;
2249 }
2250 }
2251 }
2252
2253 if (best_div == 0)
2254 return false;
2255
2256 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2257
2258 cnl_wrpll_params_populate(wrpll_params, best_dco,
2259 dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv);
2260
2261 return true;
2262}
2263
2264static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
2265 struct intel_crtc_state *crtc_state,
2266 int clock)
2267{
2268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2269 uint32_t cfgcr0, cfgcr1;
2270 struct skl_wrpll_params wrpll_params = { 0, };
2271
2272 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2273
2274 if (!cnl_ddi_calculate_wrpll(clock * 1000, dev_priv, &wrpll_params))
2275 return false;
2276
2277 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2278 wrpll_params.dco_integer;
2279
2280 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2281 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2282 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2283 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2284 wrpll_params.central_freq |
2285 DPLL_CFGCR1_CENTRAL_FREQ;
2286
2287 memset(&crtc_state->dpll_hw_state, 0,
2288 sizeof(crtc_state->dpll_hw_state));
2289
2290 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2291 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2292 return true;
2293}
2294
2295static bool
2296cnl_ddi_dp_set_dpll_hw_state(int clock,
2297 struct intel_dpll_hw_state *dpll_hw_state)
2298{
2299 uint32_t cfgcr0;
2300
2301 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2302
2303 switch (clock / 2) {
2304 case 81000:
2305 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2306 break;
2307 case 135000:
2308 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2309 break;
2310 case 270000:
2311 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2312 break;
2313 /* eDP 1.4 rates */
2314 case 162000:
2315 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2316 break;
2317 case 108000:
2318 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2319 break;
2320 case 216000:
2321 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2322 break;
2323 case 324000:
2324 /* Some SKUs may require elevated I/O voltage to support this */
2325 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2326 break;
2327 case 405000:
2328 /* Some SKUs may require elevated I/O voltage to support this */
2329 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2330 break;
2331 }
2332
2333 dpll_hw_state->cfgcr0 = cfgcr0;
2334 return true;
2335}
2336
2337static struct intel_shared_dpll *
2338cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2339 struct intel_encoder *encoder)
2340{
2341 struct intel_shared_dpll *pll;
2342 int clock = crtc_state->port_clock;
2343 bool bret;
2344 struct intel_dpll_hw_state dpll_hw_state;
2345
2346 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
2347
2348 if (encoder->type == INTEL_OUTPUT_HDMI) {
2349 bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
2350 if (!bret) {
2351 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2352 return NULL;
2353 }
2354 } else if (encoder->type == INTEL_OUTPUT_DP ||
2355 encoder->type == INTEL_OUTPUT_DP_MST ||
2356 encoder->type == INTEL_OUTPUT_EDP) {
2357 bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
2358 if (!bret) {
2359 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2360 return NULL;
2361 }
2362 crtc_state->dpll_hw_state = dpll_hw_state;
2363 } else {
2364 DRM_DEBUG_KMS("Skip DPLL setup for encoder %d\n",
2365 encoder->type);
2366 return NULL;
2367 }
2368
2369 pll = intel_find_shared_dpll(crtc, crtc_state,
2370 DPLL_ID_SKL_DPLL0,
2371 DPLL_ID_SKL_DPLL2);
2372 if (!pll) {
2373 DRM_DEBUG_KMS("No PLL selected\n");
2374 return NULL;
2375 }
2376
2377 intel_reference_shared_dpll(pll, crtc_state);
2378
2379 return pll;
2380}
2381
2382static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2383 .enable = cnl_ddi_pll_enable,
2384 .disable = cnl_ddi_pll_disable,
2385 .get_hw_state = cnl_ddi_pll_get_hw_state,
2386};
2387
2388static const struct dpll_info cnl_plls[] = {
2389 { "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 },
2390 { "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 },
2391 { "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 },
2392 { NULL, -1, NULL, },
2393};
2394
2395static const struct intel_dpll_mgr cnl_pll_mgr = {
2396 .dpll_info = cnl_plls,
2397 .get_dpll = cnl_get_dpll,
2398 .dump_hw_state = skl_dump_hw_state,
2399};
2400
1970/** 2401/**
1971 * intel_shared_dpll_init - Initialize shared DPLLs 2402 * intel_shared_dpll_init - Initialize shared DPLLs
1972 * @dev: drm device 2403 * @dev: drm device
@@ -1980,7 +2411,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
1980 const struct dpll_info *dpll_info; 2411 const struct dpll_info *dpll_info;
1981 int i; 2412 int i;
1982 2413
1983 if (IS_GEN9_BC(dev_priv)) 2414 if (IS_CANNONLAKE(dev_priv))
2415 dpll_mgr = &cnl_pll_mgr;
2416 else if (IS_GEN9_BC(dev_priv))
1984 dpll_mgr = &skl_pll_mgr; 2417 dpll_mgr = &skl_pll_mgr;
1985 else if (IS_GEN9_LP(dev_priv)) 2418 else if (IS_GEN9_LP(dev_priv))
1986 dpll_mgr = &bxt_pll_mgr; 2419 dpll_mgr = &bxt_pll_mgr;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index f8d13a947c13..f24ccf443d25 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -128,6 +128,10 @@ struct intel_dpll_hw_state {
128 /* HDMI only, 0 when used for DP */ 128 /* HDMI only, 0 when used for DP */
129 uint32_t cfgcr1, cfgcr2; 129 uint32_t cfgcr1, cfgcr2;
130 130
131 /* cnl */
132 uint32_t cfgcr0;
133 /* CNL also uses cfgcr1 */
134
131 /* bxt */ 135 /* bxt */
132 uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, 136 uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
133 pcsdw12; 137 pcsdw12;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 71f94a01aedd..d93efb49a2e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -266,7 +266,6 @@ struct intel_encoder {
266struct intel_panel { 266struct intel_panel {
267 struct drm_display_mode *fixed_mode; 267 struct drm_display_mode *fixed_mode;
268 struct drm_display_mode *downclock_mode; 268 struct drm_display_mode *downclock_mode;
269 int fitting_mode;
270 269
271 /* backlight */ 270 /* backlight */
272 struct { 271 struct {
@@ -289,9 +288,10 @@ struct intel_panel {
289 /* Connector and platform specific backlight functions */ 288 /* Connector and platform specific backlight functions */
290 int (*setup)(struct intel_connector *connector, enum pipe pipe); 289 int (*setup)(struct intel_connector *connector, enum pipe pipe);
291 uint32_t (*get)(struct intel_connector *connector); 290 uint32_t (*get)(struct intel_connector *connector);
292 void (*set)(struct intel_connector *connector, uint32_t level); 291 void (*set)(const struct drm_connector_state *conn_state, uint32_t level);
293 void (*disable)(struct intel_connector *connector); 292 void (*disable)(const struct drm_connector_state *conn_state);
294 void (*enable)(struct intel_connector *connector); 293 void (*enable)(const struct intel_crtc_state *crtc_state,
294 const struct drm_connector_state *conn_state);
295 uint32_t (*hz_to_pwm)(struct intel_connector *connector, 295 uint32_t (*hz_to_pwm)(struct intel_connector *connector,
296 uint32_t hz); 296 uint32_t hz);
297 void (*power)(struct intel_connector *, bool enable); 297 void (*power)(struct intel_connector *, bool enable);
@@ -331,6 +331,15 @@ struct intel_connector {
331 struct work_struct modeset_retry_work; 331 struct work_struct modeset_retry_work;
332}; 332};
333 333
334struct intel_digital_connector_state {
335 struct drm_connector_state base;
336
337 enum hdmi_force_audio force_audio;
338 int broadcast_rgb;
339};
340
341#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
342
334struct dpll { 343struct dpll {
335 /* given values */ 344 /* given values */
336 int n; 345 int n;
@@ -896,11 +905,8 @@ struct intel_hdmi {
896 enum drm_dp_dual_mode_type type; 905 enum drm_dp_dual_mode_type type;
897 int max_tmds_clock; 906 int max_tmds_clock;
898 } dp_dual_mode; 907 } dp_dual_mode;
899 bool limited_color_range;
900 bool color_range_auto;
901 bool has_hdmi_sink; 908 bool has_hdmi_sink;
902 bool has_audio; 909 bool has_audio;
903 enum hdmi_force_audio force_audio;
904 bool rgb_quant_range_selectable; 910 bool rgb_quant_range_selectable;
905 struct intel_connector *attached_connector; 911 struct intel_connector *attached_connector;
906 void (*write_infoframe)(struct drm_encoder *encoder, 912 void (*write_infoframe)(struct drm_encoder *encoder,
@@ -966,9 +972,6 @@ struct intel_dp {
966 bool detect_done; 972 bool detect_done;
967 bool channel_eq_status; 973 bool channel_eq_status;
968 bool reset_link_params; 974 bool reset_link_params;
969 enum hdmi_force_audio force_audio;
970 bool limited_color_range;
971 bool color_range_auto;
972 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 975 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
973 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 976 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
974 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 977 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1307,6 +1310,8 @@ void intel_audio_deinit(struct drm_i915_private *dev_priv);
1307/* intel_cdclk.c */ 1310/* intel_cdclk.c */
1308void skl_init_cdclk(struct drm_i915_private *dev_priv); 1311void skl_init_cdclk(struct drm_i915_private *dev_priv);
1309void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1312void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1313void cnl_init_cdclk(struct drm_i915_private *dev_priv);
1314void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
1310void bxt_init_cdclk(struct drm_i915_private *dev_priv); 1315void bxt_init_cdclk(struct drm_i915_private *dev_priv);
1311void bxt_uninit_cdclk(struct drm_i915_private *dev_priv); 1316void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
1312void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); 1317void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
@@ -1319,6 +1324,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
1319 const struct intel_cdclk_state *cdclk_state); 1324 const struct intel_cdclk_state *cdclk_state);
1320 1325
1321/* intel_display.c */ 1326/* intel_display.c */
1327void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
1328void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
1322enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc); 1329enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
1323void intel_update_rawclk(struct drm_i915_private *dev_priv); 1330void intel_update_rawclk(struct drm_i915_private *dev_priv);
1324int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); 1331int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
@@ -1339,7 +1346,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
1339bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); 1346bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
1340void intel_mark_busy(struct drm_i915_private *dev_priv); 1347void intel_mark_busy(struct drm_i915_private *dev_priv);
1341void intel_mark_idle(struct drm_i915_private *dev_priv); 1348void intel_mark_idle(struct drm_i915_private *dev_priv);
1342void intel_crtc_restore_mode(struct drm_crtc *crtc);
1343int intel_display_suspend(struct drm_device *dev); 1349int intel_display_suspend(struct drm_device *dev);
1344void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); 1350void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
1345void intel_encoder_destroy(struct drm_encoder *encoder); 1351void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1518,8 +1524,9 @@ bool intel_dp_compute_config(struct intel_encoder *encoder,
1518bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port); 1524bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
1519enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, 1525enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
1520 bool long_hpd); 1526 bool long_hpd);
1521void intel_edp_backlight_on(struct intel_dp *intel_dp); 1527void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
1522void intel_edp_backlight_off(struct intel_dp *intel_dp); 1528 const struct drm_connector_state *conn_state);
1529void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
1523void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 1530void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
1524void intel_edp_panel_on(struct intel_dp *intel_dp); 1531void intel_edp_panel_on(struct intel_dp *intel_dp);
1525void intel_edp_panel_off(struct intel_dp *intel_dp); 1532void intel_edp_panel_off(struct intel_dp *intel_dp);
@@ -1699,12 +1706,13 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
1699void intel_gmch_panel_fitting(struct intel_crtc *crtc, 1706void intel_gmch_panel_fitting(struct intel_crtc *crtc,
1700 struct intel_crtc_state *pipe_config, 1707 struct intel_crtc_state *pipe_config,
1701 int fitting_mode); 1708 int fitting_mode);
1702void intel_panel_set_backlight_acpi(struct intel_connector *connector, 1709void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
1703 u32 level, u32 max); 1710 u32 level, u32 max);
1704int intel_panel_setup_backlight(struct drm_connector *connector, 1711int intel_panel_setup_backlight(struct drm_connector *connector,
1705 enum pipe pipe); 1712 enum pipe pipe);
1706void intel_panel_enable_backlight(struct intel_connector *connector); 1713void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
1707void intel_panel_disable_backlight(struct intel_connector *connector); 1714 const struct drm_connector_state *conn_state);
1715void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
1708void intel_panel_destroy_backlight(struct drm_connector *connector); 1716void intel_panel_destroy_backlight(struct drm_connector *connector);
1709enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv); 1717enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
1710extern struct drm_display_mode *intel_find_panel_downclock( 1718extern struct drm_display_mode *intel_find_panel_downclock(
@@ -1874,6 +1882,8 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
1874 int ignore); 1882 int ignore);
1875bool ilk_disable_lp_wm(struct drm_device *dev); 1883bool ilk_disable_lp_wm(struct drm_device *dev);
1876int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); 1884int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1885int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
1886 struct intel_crtc_state *cstate);
1877static inline int intel_enable_rc6(void) 1887static inline int intel_enable_rc6(void)
1878{ 1888{
1879 return i915.enable_rc6; 1889 return i915.enable_rc6;
@@ -1898,10 +1908,19 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
1898void intel_tv_init(struct drm_i915_private *dev_priv); 1908void intel_tv_init(struct drm_i915_private *dev_priv);
1899 1909
1900/* intel_atomic.c */ 1910/* intel_atomic.c */
1901int intel_connector_atomic_get_property(struct drm_connector *connector, 1911int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
1902 const struct drm_connector_state *state, 1912 const struct drm_connector_state *state,
1903 struct drm_property *property, 1913 struct drm_property *property,
1904 uint64_t *val); 1914 uint64_t *val);
1915int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
1916 struct drm_connector_state *state,
1917 struct drm_property *property,
1918 uint64_t val);
1919int intel_digital_connector_atomic_check(struct drm_connector *conn,
1920 struct drm_connector_state *new_state);
1921struct drm_connector_state *
1922intel_digital_connector_duplicate_state(struct drm_connector *connector);
1923
1905struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); 1924struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
1906void intel_crtc_destroy_state(struct drm_crtc *crtc, 1925void intel_crtc_destroy_state(struct drm_crtc *crtc,
1907 struct drm_crtc_state *state); 1926 struct drm_crtc_state *state);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index fc0ef492252a..50ec836da8b1 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -320,10 +320,10 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
320 320
321 if (HAS_GMCH_DISPLAY(dev_priv)) 321 if (HAS_GMCH_DISPLAY(dev_priv))
322 intel_gmch_panel_fitting(crtc, pipe_config, 322 intel_gmch_panel_fitting(crtc, pipe_config,
323 intel_connector->panel.fitting_mode); 323 conn_state->scaling_mode);
324 else 324 else
325 intel_pch_panel_fitting(crtc, pipe_config, 325 intel_pch_panel_fitting(crtc, pipe_config,
326 intel_connector->panel.fitting_mode); 326 conn_state->scaling_mode);
327 } 327 }
328 328
329 /* DSI uses short packets for sync events, so clear mode flags for DSI */ 329 /* DSI uses short packets for sync events, so clear mode flags for DSI */
@@ -346,12 +346,13 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
346 return true; 346 return true;
347} 347}
348 348
349static void glk_dsi_device_ready(struct intel_encoder *encoder) 349static bool glk_dsi_enable_io(struct intel_encoder *encoder)
350{ 350{
351 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 351 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
352 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 352 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
353 enum port port; 353 enum port port;
354 u32 tmp, val; 354 u32 tmp;
355 bool cold_boot = false;
355 356
356 /* Set the MIPI mode 357 /* Set the MIPI mode
357 * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting. 358 * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
@@ -370,7 +371,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
370 /* Program LP Wake */ 371 /* Program LP Wake */
371 for_each_dsi_port(port, intel_dsi->ports) { 372 for_each_dsi_port(port, intel_dsi->ports) {
372 tmp = I915_READ(MIPI_CTRL(port)); 373 tmp = I915_READ(MIPI_CTRL(port));
373 tmp |= GLK_LP_WAKE; 374 if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
375 tmp &= ~GLK_LP_WAKE;
376 else
377 tmp |= GLK_LP_WAKE;
374 I915_WRITE(MIPI_CTRL(port), tmp); 378 I915_WRITE(MIPI_CTRL(port), tmp);
375 } 379 }
376 380
@@ -382,6 +386,22 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
382 DRM_ERROR("MIPIO port is powergated\n"); 386 DRM_ERROR("MIPIO port is powergated\n");
383 } 387 }
384 388
389 /* Check for cold boot scenario */
390 for_each_dsi_port(port, intel_dsi->ports) {
391 cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
392 DEVICE_READY);
393 }
394
395 return cold_boot;
396}
397
398static void glk_dsi_device_ready(struct intel_encoder *encoder)
399{
400 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
401 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
402 enum port port;
403 u32 val;
404
385 /* Wait for MIPI PHY status bit to set */ 405 /* Wait for MIPI PHY status bit to set */
386 for_each_dsi_port(port, intel_dsi->ports) { 406 for_each_dsi_port(port, intel_dsi->ports) {
387 if (intel_wait_for_register(dev_priv, 407 if (intel_wait_for_register(dev_priv,
@@ -391,8 +411,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
391 } 411 }
392 412
393 /* Get IO out of reset */ 413 /* Get IO out of reset */
394 tmp = I915_READ(MIPI_CTRL(PORT_A)); 414 val = I915_READ(MIPI_CTRL(PORT_A));
395 I915_WRITE(MIPI_CTRL(PORT_A), tmp | GLK_MIPIIO_RESET_RELEASED); 415 I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED);
396 416
397 /* Get IO out of Low power state*/ 417 /* Get IO out of Low power state*/
398 for_each_dsi_port(port, intel_dsi->ports) { 418 for_each_dsi_port(port, intel_dsi->ports) {
@@ -402,34 +422,34 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
402 val |= DEVICE_READY; 422 val |= DEVICE_READY;
403 I915_WRITE(MIPI_DEVICE_READY(port), val); 423 I915_WRITE(MIPI_DEVICE_READY(port), val);
404 usleep_range(10, 15); 424 usleep_range(10, 15);
405 } 425 } else {
406 426 /* Enter ULPS */
407 /* Enter ULPS */ 427 val = I915_READ(MIPI_DEVICE_READY(port));
408 val = I915_READ(MIPI_DEVICE_READY(port)); 428 val &= ~ULPS_STATE_MASK;
409 val &= ~ULPS_STATE_MASK; 429 val |= (ULPS_STATE_ENTER | DEVICE_READY);
410 val |= (ULPS_STATE_ENTER | DEVICE_READY); 430 I915_WRITE(MIPI_DEVICE_READY(port), val);
411 I915_WRITE(MIPI_DEVICE_READY(port), val);
412 431
413 /* Wait for ULPS active */ 432 /* Wait for ULPS active */
414 if (intel_wait_for_register(dev_priv, 433 if (intel_wait_for_register(dev_priv,
415 MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20)) 434 MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
416 DRM_ERROR("ULPS not active\n"); 435 DRM_ERROR("ULPS not active\n");
417 436
418 /* Exit ULPS */ 437 /* Exit ULPS */
419 val = I915_READ(MIPI_DEVICE_READY(port)); 438 val = I915_READ(MIPI_DEVICE_READY(port));
420 val &= ~ULPS_STATE_MASK; 439 val &= ~ULPS_STATE_MASK;
421 val |= (ULPS_STATE_EXIT | DEVICE_READY); 440 val |= (ULPS_STATE_EXIT | DEVICE_READY);
422 I915_WRITE(MIPI_DEVICE_READY(port), val); 441 I915_WRITE(MIPI_DEVICE_READY(port), val);
423 442
424 /* Enter Normal Mode */ 443 /* Enter Normal Mode */
425 val = I915_READ(MIPI_DEVICE_READY(port)); 444 val = I915_READ(MIPI_DEVICE_READY(port));
426 val &= ~ULPS_STATE_MASK; 445 val &= ~ULPS_STATE_MASK;
427 val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); 446 val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
428 I915_WRITE(MIPI_DEVICE_READY(port), val); 447 I915_WRITE(MIPI_DEVICE_READY(port), val);
429 448
430 tmp = I915_READ(MIPI_CTRL(port)); 449 val = I915_READ(MIPI_CTRL(port));
431 tmp &= ~GLK_LP_WAKE; 450 val &= ~GLK_LP_WAKE;
432 I915_WRITE(MIPI_CTRL(port), tmp); 451 I915_WRITE(MIPI_CTRL(port), val);
452 }
433 } 453 }
434 454
435 /* Wait for Stop state */ 455 /* Wait for Stop state */
@@ -770,6 +790,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
770 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 790 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
771 enum port port; 791 enum port port;
772 u32 val; 792 u32 val;
793 bool glk_cold_boot = false;
773 794
774 DRM_DEBUG_KMS("\n"); 795 DRM_DEBUG_KMS("\n");
775 796
@@ -800,7 +821,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
800 I915_WRITE(DSPCLK_GATE_D, val); 821 I915_WRITE(DSPCLK_GATE_D, val);
801 } 822 }
802 823
803 intel_dsi_prepare(encoder, pipe_config); 824 if (!IS_GEMINILAKE(dev_priv))
825 intel_dsi_prepare(encoder, pipe_config);
804 826
805 /* Power on, try both CRC pmic gpio and VBT */ 827 /* Power on, try both CRC pmic gpio and VBT */
806 if (intel_dsi->gpio_panel) 828 if (intel_dsi->gpio_panel)
@@ -811,9 +833,21 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
811 /* Deassert reset */ 833 /* Deassert reset */
812 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); 834 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
813 835
836 if (IS_GEMINILAKE(dev_priv)) {
837 glk_cold_boot = glk_dsi_enable_io(encoder);
838
839 /* Prepare port in cold boot(s3/s4) scenario */
840 if (glk_cold_boot)
841 intel_dsi_prepare(encoder, pipe_config);
842 }
843
814 /* Put device in ready state (LP-11) */ 844 /* Put device in ready state (LP-11) */
815 intel_dsi_device_ready(encoder); 845 intel_dsi_device_ready(encoder);
816 846
847 /* Prepare port in normal boot scenario */
848 if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
849 intel_dsi_prepare(encoder, pipe_config);
850
817 /* Send initialization commands in LP mode */ 851 /* Send initialization commands in LP mode */
818 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); 852 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
819 853
@@ -835,7 +869,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
835 intel_dsi_port_enable(encoder); 869 intel_dsi_port_enable(encoder);
836 } 870 }
837 871
838 intel_panel_enable_backlight(intel_dsi->attached_connector); 872 intel_panel_enable_backlight(pipe_config, conn_state);
839 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); 873 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
840} 874}
841 875
@@ -866,7 +900,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
866 DRM_DEBUG_KMS("\n"); 900 DRM_DEBUG_KMS("\n");
867 901
868 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); 902 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
869 intel_panel_disable_backlight(intel_dsi->attached_connector); 903 intel_panel_disable_backlight(old_conn_state);
870 904
871 /* 905 /*
872 * Disable Device ready before the port shutdown in order 906 * Disable Device ready before the port shutdown in order
@@ -1587,48 +1621,6 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
1587 return 1; 1621 return 1;
1588} 1622}
1589 1623
1590static int intel_dsi_set_property(struct drm_connector *connector,
1591 struct drm_property *property,
1592 uint64_t val)
1593{
1594 struct drm_device *dev = connector->dev;
1595 struct intel_connector *intel_connector = to_intel_connector(connector);
1596 struct drm_crtc *crtc;
1597 int ret;
1598
1599 ret = drm_object_property_set_value(&connector->base, property, val);
1600 if (ret)
1601 return ret;
1602
1603 if (property == dev->mode_config.scaling_mode_property) {
1604 if (val == DRM_MODE_SCALE_NONE) {
1605 DRM_DEBUG_KMS("no scaling not supported\n");
1606 return -EINVAL;
1607 }
1608 if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
1609 val == DRM_MODE_SCALE_CENTER) {
1610 DRM_DEBUG_KMS("centering not supported\n");
1611 return -EINVAL;
1612 }
1613
1614 if (intel_connector->panel.fitting_mode == val)
1615 return 0;
1616
1617 intel_connector->panel.fitting_mode = val;
1618 }
1619
1620 crtc = connector->state->crtc;
1621 if (crtc && crtc->state->enable) {
1622 /*
1623 * If the CRTC is enabled, the display will be changed
1624 * according to the new panel fitting mode.
1625 */
1626 intel_crtc_restore_mode(crtc);
1627 }
1628
1629 return 0;
1630}
1631
1632static void intel_dsi_connector_destroy(struct drm_connector *connector) 1624static void intel_dsi_connector_destroy(struct drm_connector *connector)
1633{ 1625{
1634 struct intel_connector *intel_connector = to_intel_connector(connector); 1626 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1657,6 +1649,7 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
1657static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { 1649static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
1658 .get_modes = intel_dsi_get_modes, 1650 .get_modes = intel_dsi_get_modes,
1659 .mode_valid = intel_dsi_mode_valid, 1651 .mode_valid = intel_dsi_mode_valid,
1652 .atomic_check = intel_digital_connector_atomic_check,
1660}; 1653};
1661 1654
1662static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1655static const struct drm_connector_funcs intel_dsi_connector_funcs = {
@@ -1665,22 +1658,28 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1665 .early_unregister = intel_connector_unregister, 1658 .early_unregister = intel_connector_unregister,
1666 .destroy = intel_dsi_connector_destroy, 1659 .destroy = intel_dsi_connector_destroy,
1667 .fill_modes = drm_helper_probe_single_connector_modes, 1660 .fill_modes = drm_helper_probe_single_connector_modes,
1668 .set_property = intel_dsi_set_property, 1661 .set_property = drm_atomic_helper_connector_set_property,
1669 .atomic_get_property = intel_connector_atomic_get_property, 1662 .atomic_get_property = intel_digital_connector_atomic_get_property,
1663 .atomic_set_property = intel_digital_connector_atomic_set_property,
1670 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1664 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1671 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1665 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
1672}; 1666};
1673 1667
1674static void intel_dsi_add_properties(struct intel_connector *connector) 1668static void intel_dsi_add_properties(struct intel_connector *connector)
1675{ 1669{
1676 struct drm_device *dev = connector->base.dev; 1670 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1677 1671
1678 if (connector->panel.fixed_mode) { 1672 if (connector->panel.fixed_mode) {
1679 drm_mode_create_scaling_mode_property(dev); 1673 u32 allowed_scalers;
1680 drm_object_attach_property(&connector->base.base, 1674
1681 dev->mode_config.scaling_mode_property, 1675 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
1682 DRM_MODE_SCALE_ASPECT); 1676 if (!HAS_GMCH_DISPLAY(dev_priv))
1683 connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 1677 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
1678
1679 drm_connector_attach_scaling_mode_property(&connector->base,
1680 allowed_scalers);
1681
1682 connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
1684 } 1683 }
1685} 1684}
1686 1685
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index ac7c6020c443..6e09ceb71500 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -60,10 +60,9 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
60 return data; 60 return data;
61} 61}
62 62
63static void dcs_set_backlight(struct intel_connector *connector, u32 level) 63static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
64{ 64{
65 struct intel_encoder *encoder = connector->encoder; 65 struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
66 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
67 struct mipi_dsi_device *dsi_device; 66 struct mipi_dsi_device *dsi_device;
68 u8 data = level; 67 u8 data = level;
69 enum port port; 68 enum port port;
@@ -76,14 +75,13 @@ static void dcs_set_backlight(struct intel_connector *connector, u32 level)
76 } 75 }
77} 76}
78 77
79static void dcs_disable_backlight(struct intel_connector *connector) 78static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
80{ 79{
81 struct intel_encoder *encoder = connector->encoder; 80 struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
82 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
83 struct mipi_dsi_device *dsi_device; 81 struct mipi_dsi_device *dsi_device;
84 enum port port; 82 enum port port;
85 83
86 dcs_set_backlight(connector, 0); 84 dcs_set_backlight(conn_state, 0);
87 85
88 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { 86 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
89 u8 cabc = POWER_SAVE_OFF; 87 u8 cabc = POWER_SAVE_OFF;
@@ -110,11 +108,11 @@ static void dcs_disable_backlight(struct intel_connector *connector)
110 } 108 }
111} 109}
112 110
113static void dcs_enable_backlight(struct intel_connector *connector) 111static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
112 const struct drm_connector_state *conn_state)
114{ 113{
115 struct intel_encoder *encoder = connector->encoder; 114 struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
116 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 115 struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
117 struct intel_panel *panel = &connector->panel;
118 struct mipi_dsi_device *dsi_device; 116 struct mipi_dsi_device *dsi_device;
119 enum port port; 117 enum port port;
120 118
@@ -142,7 +140,7 @@ static void dcs_enable_backlight(struct intel_connector *connector)
142 &cabc, sizeof(cabc)); 140 &cabc, sizeof(cabc));
143 } 141 }
144 142
145 dcs_set_backlight(connector, panel->backlight.level); 143 dcs_set_backlight(conn_state, panel->backlight.level);
146} 144}
147 145
148static int dcs_setup_backlight(struct intel_connector *connector, 146static int dcs_setup_backlight(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 699f2d3861c7..a4487c5b7e37 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -814,26 +814,27 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
814 struct drm_i915_private *dev_priv = engine->i915; 814 struct drm_i915_private *dev_priv = engine->i915;
815 int ret; 815 int ret;
816 816
817 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */ 817 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
818 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 818 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
819 819
820 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */ 820 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
821 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 821 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
822 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 822 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
823 823
824 /* WaDisableKillLogic:bxt,skl,kbl */ 824 /* WaDisableKillLogic:bxt,skl,kbl,cfl */
825 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 825 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
826 ECOCHK_DIS_TLB); 826 ECOCHK_DIS_TLB);
827 827
828 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */ 828 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
829 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */ 829 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
830 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 830 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
831 FLOW_CONTROL_ENABLE | 831 FLOW_CONTROL_ENABLE |
832 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 832 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
833 833
834 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ 834 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
835 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 835 if (!IS_COFFEELAKE(dev_priv))
836 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 836 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
837 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
837 838
838 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */ 839 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
839 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) 840 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
@@ -851,18 +852,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
851 */ 852 */
852 } 853 }
853 854
854 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk */ 855 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
855 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ 856 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
856 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 857 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
857 GEN9_ENABLE_YV12_BUGFIX | 858 GEN9_ENABLE_YV12_BUGFIX |
858 GEN9_ENABLE_GPGPU_PREEMPTION); 859 GEN9_ENABLE_GPGPU_PREEMPTION);
859 860
860 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */ 861 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
861 /* WaDisablePartialResolveInVc:skl,bxt,kbl */ 862 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
862 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 863 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
863 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 864 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
864 865
865 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */ 866 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
866 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 867 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
867 GEN9_CCS_TLB_PREFETCH_ENABLE); 868 GEN9_CCS_TLB_PREFETCH_ENABLE);
868 869
@@ -871,7 +872,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
871 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 872 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
872 PIXEL_MASK_CAMMING_DISABLE); 873 PIXEL_MASK_CAMMING_DISABLE);
873 874
874 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ 875 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
875 WA_SET_BIT_MASKED(HDC_CHICKEN0, 876 WA_SET_BIT_MASKED(HDC_CHICKEN0,
876 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 877 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
877 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 878 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
@@ -889,39 +890,41 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
889 * a TLB invalidation occurs during a PSD flush. 890 * a TLB invalidation occurs during a PSD flush.
890 */ 891 */
891 892
892 /* WaForceEnableNonCoherent:skl,bxt,kbl */ 893 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
893 WA_SET_BIT_MASKED(HDC_CHICKEN0, 894 WA_SET_BIT_MASKED(HDC_CHICKEN0,
894 HDC_FORCE_NON_COHERENT); 895 HDC_FORCE_NON_COHERENT);
895 896
896 /* WaDisableHDCInvalidation:skl,bxt,kbl */ 897 /* WaDisableHDCInvalidation:skl,bxt,kbl */
897 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 898 if (!IS_COFFEELAKE(dev_priv))
898 BDW_DISABLE_HDC_INVALIDATION); 899 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
900 BDW_DISABLE_HDC_INVALIDATION);
899 901
900 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ 902 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
901 if (IS_SKYLAKE(dev_priv) || 903 if (IS_SKYLAKE(dev_priv) ||
902 IS_KABYLAKE(dev_priv) || 904 IS_KABYLAKE(dev_priv) ||
905 IS_COFFEELAKE(dev_priv) ||
903 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 906 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
904 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 907 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
905 GEN8_SAMPLER_POWER_BYPASS_DIS); 908 GEN8_SAMPLER_POWER_BYPASS_DIS);
906 909
907 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */ 910 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
908 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 911 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
909 912
910 /* WaOCLCoherentLineFlush:skl,bxt,kbl */ 913 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
911 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 914 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
912 GEN8_LQSC_FLUSH_COHERENT_LINES)); 915 GEN8_LQSC_FLUSH_COHERENT_LINES));
913 916
914 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */ 917 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
915 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 918 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
916 if (ret) 919 if (ret)
917 return ret; 920 return ret;
918 921
919 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ 922 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
920 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 923 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
921 if (ret) 924 if (ret)
922 return ret; 925 return ret;
923 926
924 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */ 927 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
925 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 928 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
926 if (ret) 929 if (ret)
927 return ret; 930 return ret;
@@ -1140,6 +1143,38 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
1140 return 0; 1143 return 0;
1141} 1144}
1142 1145
1146static int cfl_init_workarounds(struct intel_engine_cs *engine)
1147{
1148 struct drm_i915_private *dev_priv = engine->i915;
1149 int ret;
1150
1151 ret = gen9_init_workarounds(engine);
1152 if (ret)
1153 return ret;
1154
1155 /* WaEnableGapsTsvCreditFix:cfl */
1156 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1157 GEN9_GAPS_TSV_CREDIT_DISABLE));
1158
1159 /* WaToEnableHwFixForPushConstHWBug:cfl */
1160 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1161 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1162
1163 /* WaDisableGafsUnitClkGating:cfl */
1164 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1165
1166 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1167 WA_SET_BIT_MASKED(
1168 GEN7_HALF_SLICE_CHICKEN1,
1169 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1170
1171 /* WaInPlaceDecompressionHang:cfl */
1172 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1173 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1174
1175 return 0;
1176}
1177
1143int init_workarounds_ring(struct intel_engine_cs *engine) 1178int init_workarounds_ring(struct intel_engine_cs *engine)
1144{ 1179{
1145 struct drm_i915_private *dev_priv = engine->i915; 1180 struct drm_i915_private *dev_priv = engine->i915;
@@ -1162,6 +1197,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
1162 err = kbl_init_workarounds(engine); 1197 err = kbl_init_workarounds(engine);
1163 else if (IS_GEMINILAKE(dev_priv)) 1198 else if (IS_GEMINILAKE(dev_priv))
1164 err = glk_init_workarounds(engine); 1199 err = glk_init_workarounds(engine);
1200 else if (IS_COFFEELAKE(dev_priv))
1201 err = cfl_init_workarounds(engine);
1165 else 1202 else
1166 err = 0; 1203 err = 0;
1167 if (err) 1204 if (err)
@@ -1212,6 +1249,11 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
1212 1249
1213 intel_runtime_pm_get(dev_priv); 1250 intel_runtime_pm_get(dev_priv);
1214 1251
1252 /* First check that no commands are left in the ring */
1253 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1254 (I915_READ_TAIL(engine) & TAIL_ADDR))
1255 idle = false;
1256
1215 /* No bit for gen2, so assume the CS parser is idle */ 1257 /* No bit for gen2, so assume the CS parser is idle */
1216 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 1258 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1217 idle = false; 1259 idle = false;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 966e255ca053..d484862cc7df 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -262,7 +262,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
262 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 262 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
263 else if (IS_GEN7(dev_priv)) 263 else if (IS_GEN7(dev_priv))
264 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 264 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
265 else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) 265 else if (INTEL_GEN(dev_priv) >= 8)
266 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 266 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
267 267
268 return old; 268 return old;
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index d9045b6e897b..8b0ae7fce7f2 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -402,7 +402,7 @@ int intel_guc_select_fw(struct intel_guc *guc)
402 guc->fw.path = I915_BXT_GUC_UCODE; 402 guc->fw.path = I915_BXT_GUC_UCODE;
403 guc->fw.major_ver_wanted = BXT_FW_MAJOR; 403 guc->fw.major_ver_wanted = BXT_FW_MAJOR;
404 guc->fw.minor_ver_wanted = BXT_FW_MINOR; 404 guc->fw.minor_ver_wanted = BXT_FW_MINOR;
405 } else if (IS_KABYLAKE(dev_priv)) { 405 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
406 guc->fw.path = I915_KBL_GUC_UCODE; 406 guc->fw.path = I915_KBL_GUC_UCODE;
407 guc->fw.major_ver_wanted = KBL_FW_MAJOR; 407 guc->fw.major_ver_wanted = KBL_FW_MAJOR;
408 guc->fw.minor_ver_wanted = KBL_FW_MINOR; 408 guc->fw.minor_ver_wanted = KBL_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e1ab6432a914..52d5b82790d9 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -51,6 +51,32 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
51} 51}
52 52
53/** 53/**
54 * intel_gvt_sanitize_options - sanitize GVT related options
55 * @dev_priv: drm i915 private data
56 *
57 * This function is called at the i915 options sanitize stage.
58 */
59void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
60{
61 if (!i915.enable_gvt)
62 return;
63
64 if (intel_vgpu_active(dev_priv)) {
65 DRM_INFO("GVT-g is disabled for guest\n");
66 goto bail;
67 }
68
69 if (!is_supported_device(dev_priv)) {
70 DRM_INFO("Unsupported device. GVT-g is disabled\n");
71 goto bail;
72 }
73
74 return;
75bail:
76 i915.enable_gvt = 0;
77}
78
79/**
54 * intel_gvt_init - initialize GVT components 80 * intel_gvt_init - initialize GVT components
55 * @dev_priv: drm i915 private data 81 * @dev_priv: drm i915 private data
56 * 82 *
@@ -69,19 +95,14 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
69 return 0; 95 return 0;
70 } 96 }
71 97
72 if (intel_vgpu_active(dev_priv)) { 98 if (!i915.enable_execlists) {
73 DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n"); 99 DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
74 goto bail; 100 return -EIO;
75 }
76
77 if (!is_supported_device(dev_priv)) {
78 DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
79 goto bail;
80 } 101 }
81 102
82 if (!i915.enable_execlists) { 103 if (i915.enable_guc_submission) {
83 DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n"); 104 DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
84 goto bail; 105 return -EIO;
85 } 106 }
86 107
87 /* 108 /*
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 25df2d65b985..61b246470282 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -32,6 +32,7 @@ void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
32int intel_gvt_init_device(struct drm_i915_private *dev_priv); 32int intel_gvt_init_device(struct drm_i915_private *dev_priv);
33void intel_gvt_clean_device(struct drm_i915_private *dev_priv); 33void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
34int intel_gvt_init_host(void); 34int intel_gvt_init_host(void);
35void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
35#else 36#else
36static inline int intel_gvt_init(struct drm_i915_private *dev_priv) 37static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
37{ 38{
@@ -40,6 +41,10 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
40static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv) 41static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
41{ 42{
42} 43}
44
45static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
46{
47}
43#endif 48#endif
44 49
45#endif /* _INTEL_GVT_H_ */ 50#endif /* _INTEL_GVT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 58d690393b29..ec0779a52d53 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1218,7 +1218,8 @@ static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
1218} 1218}
1219 1219
1220static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, 1220static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
1221 bool respect_downstream_limits) 1221 bool respect_downstream_limits,
1222 bool force_dvi)
1222{ 1223{
1223 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 1224 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
1224 int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev)); 1225 int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
@@ -1234,7 +1235,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
1234 if (info->max_tmds_clock) 1235 if (info->max_tmds_clock)
1235 max_tmds_clock = min(max_tmds_clock, 1236 max_tmds_clock = min(max_tmds_clock,
1236 info->max_tmds_clock); 1237 info->max_tmds_clock);
1237 else if (!hdmi->has_hdmi_sink) 1238 else if (!hdmi->has_hdmi_sink || force_dvi)
1238 max_tmds_clock = min(max_tmds_clock, 165000); 1239 max_tmds_clock = min(max_tmds_clock, 165000);
1239 } 1240 }
1240 1241
@@ -1243,13 +1244,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
1243 1244
1244static enum drm_mode_status 1245static enum drm_mode_status
1245hdmi_port_clock_valid(struct intel_hdmi *hdmi, 1246hdmi_port_clock_valid(struct intel_hdmi *hdmi,
1246 int clock, bool respect_downstream_limits) 1247 int clock, bool respect_downstream_limits,
1248 bool force_dvi)
1247{ 1249{
1248 struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); 1250 struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
1249 1251
1250 if (clock < 25000) 1252 if (clock < 25000)
1251 return MODE_CLOCK_LOW; 1253 return MODE_CLOCK_LOW;
1252 if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits)) 1254 if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
1253 return MODE_CLOCK_HIGH; 1255 return MODE_CLOCK_HIGH;
1254 1256
1255 /* BXT DPLL can't generate 223-240 MHz */ 1257 /* BXT DPLL can't generate 223-240 MHz */
@@ -1273,6 +1275,8 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1273 enum drm_mode_status status; 1275 enum drm_mode_status status;
1274 int clock; 1276 int clock;
1275 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1277 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1278 bool force_dvi =
1279 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
1276 1280
1277 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1281 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1278 return MODE_NO_DBLESCAN; 1282 return MODE_NO_DBLESCAN;
@@ -1289,11 +1293,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1289 clock *= 2; 1293 clock *= 2;
1290 1294
1291 /* check if we can do 8bpc */ 1295 /* check if we can do 8bpc */
1292 status = hdmi_port_clock_valid(hdmi, clock, true); 1296 status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
1293 1297
1294 /* if we can't do 8bpc we may still be able to do 12bpc */ 1298 /* if we can't do 8bpc we may still be able to do 12bpc */
1295 if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK) 1299 if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
1296 status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true); 1300 status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
1297 1301
1298 return status; 1302 return status;
1299} 1303}
@@ -1343,16 +1347,19 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1343 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1347 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1344 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1348 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1345 struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc; 1349 struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
1350 struct intel_digital_connector_state *intel_conn_state =
1351 to_intel_digital_connector_state(conn_state);
1346 int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; 1352 int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
1347 int clock_12bpc = clock_8bpc * 3 / 2; 1353 int clock_12bpc = clock_8bpc * 3 / 2;
1348 int desired_bpp; 1354 int desired_bpp;
1355 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
1349 1356
1350 pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; 1357 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
1351 1358
1352 if (pipe_config->has_hdmi_sink) 1359 if (pipe_config->has_hdmi_sink)
1353 pipe_config->has_infoframe = true; 1360 pipe_config->has_infoframe = true;
1354 1361
1355 if (intel_hdmi->color_range_auto) { 1362 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1356 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1363 /* See CEA-861-E - 5.1 Default Encoding Parameters */
1357 pipe_config->limited_color_range = 1364 pipe_config->limited_color_range =
1358 pipe_config->has_hdmi_sink && 1365 pipe_config->has_hdmi_sink &&
@@ -1360,7 +1367,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1360 HDMI_QUANTIZATION_RANGE_LIMITED; 1367 HDMI_QUANTIZATION_RANGE_LIMITED;
1361 } else { 1368 } else {
1362 pipe_config->limited_color_range = 1369 pipe_config->limited_color_range =
1363 intel_hdmi->limited_color_range; 1370 intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
1364 } 1371 }
1365 1372
1366 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { 1373 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
@@ -1372,8 +1379,13 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1372 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv)) 1379 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
1373 pipe_config->has_pch_encoder = true; 1380 pipe_config->has_pch_encoder = true;
1374 1381
1375 if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio) 1382 if (pipe_config->has_hdmi_sink) {
1376 pipe_config->has_audio = true; 1383 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1384 pipe_config->has_audio = intel_hdmi->has_audio;
1385 else
1386 pipe_config->has_audio =
1387 intel_conn_state->force_audio == HDMI_AUDIO_ON;
1388 }
1377 1389
1378 /* 1390 /*
1379 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 1391 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
@@ -1381,8 +1393,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1381 * outputs. We also need to check that the higher clock still fits 1393 * outputs. We also need to check that the higher clock still fits
1382 * within limits. 1394 * within limits.
1383 */ 1395 */
1384 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && 1396 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && !force_dvi &&
1385 hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK && 1397 hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK &&
1386 hdmi_12bpc_possible(pipe_config)) { 1398 hdmi_12bpc_possible(pipe_config)) {
1387 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 1399 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
1388 desired_bpp = 12*3; 1400 desired_bpp = 12*3;
@@ -1402,7 +1414,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1402 } 1414 }
1403 1415
1404 if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock, 1416 if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
1405 false) != MODE_OK) { 1417 false, force_dvi) != MODE_OK) {
1406 DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n"); 1418 DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
1407 return false; 1419 return false;
1408 } 1420 }
@@ -1509,13 +1521,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
1509 drm_rgb_quant_range_selectable(edid); 1521 drm_rgb_quant_range_selectable(edid);
1510 1522
1511 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 1523 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
1512 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) 1524 intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
1513 intel_hdmi->has_audio =
1514 intel_hdmi->force_audio == HDMI_AUDIO_ON;
1515
1516 if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
1517 intel_hdmi->has_hdmi_sink =
1518 drm_detect_hdmi_monitor(edid);
1519 1525
1520 connected = true; 1526 connected = true;
1521 } 1527 }
@@ -1577,96 +1583,6 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
1577 return intel_connector_update_modes(connector, edid); 1583 return intel_connector_update_modes(connector, edid);
1578} 1584}
1579 1585
1580static bool
1581intel_hdmi_detect_audio(struct drm_connector *connector)
1582{
1583 bool has_audio = false;
1584 struct edid *edid;
1585
1586 edid = to_intel_connector(connector)->detect_edid;
1587 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
1588 has_audio = drm_detect_monitor_audio(edid);
1589
1590 return has_audio;
1591}
1592
1593static int
1594intel_hdmi_set_property(struct drm_connector *connector,
1595 struct drm_property *property,
1596 uint64_t val)
1597{
1598 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1599 struct intel_digital_port *intel_dig_port =
1600 hdmi_to_dig_port(intel_hdmi);
1601 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1602 int ret;
1603
1604 ret = drm_object_property_set_value(&connector->base, property, val);
1605 if (ret)
1606 return ret;
1607
1608 if (property == dev_priv->force_audio_property) {
1609 enum hdmi_force_audio i = val;
1610 bool has_audio;
1611
1612 if (i == intel_hdmi->force_audio)
1613 return 0;
1614
1615 intel_hdmi->force_audio = i;
1616
1617 if (i == HDMI_AUDIO_AUTO)
1618 has_audio = intel_hdmi_detect_audio(connector);
1619 else
1620 has_audio = (i == HDMI_AUDIO_ON);
1621
1622 if (i == HDMI_AUDIO_OFF_DVI)
1623 intel_hdmi->has_hdmi_sink = 0;
1624
1625 intel_hdmi->has_audio = has_audio;
1626 goto done;
1627 }
1628
1629 if (property == dev_priv->broadcast_rgb_property) {
1630 bool old_auto = intel_hdmi->color_range_auto;
1631 bool old_range = intel_hdmi->limited_color_range;
1632
1633 switch (val) {
1634 case INTEL_BROADCAST_RGB_AUTO:
1635 intel_hdmi->color_range_auto = true;
1636 break;
1637 case INTEL_BROADCAST_RGB_FULL:
1638 intel_hdmi->color_range_auto = false;
1639 intel_hdmi->limited_color_range = false;
1640 break;
1641 case INTEL_BROADCAST_RGB_LIMITED:
1642 intel_hdmi->color_range_auto = false;
1643 intel_hdmi->limited_color_range = true;
1644 break;
1645 default:
1646 return -EINVAL;
1647 }
1648
1649 if (old_auto == intel_hdmi->color_range_auto &&
1650 old_range == intel_hdmi->limited_color_range)
1651 return 0;
1652
1653 goto done;
1654 }
1655
1656 if (property == connector->dev->mode_config.aspect_ratio_property) {
1657 connector->state->picture_aspect_ratio = val;
1658 goto done;
1659 }
1660
1661 return -EINVAL;
1662
1663done:
1664 if (intel_dig_port->base.base.crtc)
1665 intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
1666
1667 return 0;
1668}
1669
1670static void intel_hdmi_pre_enable(struct intel_encoder *encoder, 1586static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
1671 struct intel_crtc_state *pipe_config, 1587 struct intel_crtc_state *pipe_config,
1672 struct drm_connector_state *conn_state) 1588 struct drm_connector_state *conn_state)
@@ -1791,18 +1707,20 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1791 .detect = intel_hdmi_detect, 1707 .detect = intel_hdmi_detect,
1792 .force = intel_hdmi_force, 1708 .force = intel_hdmi_force,
1793 .fill_modes = drm_helper_probe_single_connector_modes, 1709 .fill_modes = drm_helper_probe_single_connector_modes,
1794 .set_property = intel_hdmi_set_property, 1710 .set_property = drm_atomic_helper_connector_set_property,
1795 .atomic_get_property = intel_connector_atomic_get_property, 1711 .atomic_get_property = intel_digital_connector_atomic_get_property,
1712 .atomic_set_property = intel_digital_connector_atomic_set_property,
1796 .late_register = intel_connector_register, 1713 .late_register = intel_connector_register,
1797 .early_unregister = intel_connector_unregister, 1714 .early_unregister = intel_connector_unregister,
1798 .destroy = intel_hdmi_destroy, 1715 .destroy = intel_hdmi_destroy,
1799 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1716 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1800 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1717 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
1801}; 1718};
1802 1719
1803static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 1720static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
1804 .get_modes = intel_hdmi_get_modes, 1721 .get_modes = intel_hdmi_get_modes,
1805 .mode_valid = intel_hdmi_mode_valid, 1722 .mode_valid = intel_hdmi_mode_valid,
1723 .atomic_check = intel_digital_connector_atomic_check,
1806}; 1724};
1807 1725
1808static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 1726static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -1814,7 +1732,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
1814{ 1732{
1815 intel_attach_force_audio_property(connector); 1733 intel_attach_force_audio_property(connector);
1816 intel_attach_broadcast_rgb_property(connector); 1734 intel_attach_broadcast_rgb_property(connector);
1817 intel_hdmi->color_range_auto = true;
1818 intel_attach_aspect_ratio_property(connector); 1735 intel_attach_aspect_ratio_property(connector);
1819 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 1736 connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1820} 1737}
@@ -1885,19 +1802,21 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
1885 1802
1886 switch (port) { 1803 switch (port) {
1887 case PORT_B: 1804 case PORT_B:
1888 if (IS_GEN9_LP(dev_priv)) 1805 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
1889 ddc_pin = GMBUS_PIN_1_BXT; 1806 ddc_pin = GMBUS_PIN_1_BXT;
1890 else 1807 else
1891 ddc_pin = GMBUS_PIN_DPB; 1808 ddc_pin = GMBUS_PIN_DPB;
1892 break; 1809 break;
1893 case PORT_C: 1810 case PORT_C:
1894 if (IS_GEN9_LP(dev_priv)) 1811 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
1895 ddc_pin = GMBUS_PIN_2_BXT; 1812 ddc_pin = GMBUS_PIN_2_BXT;
1896 else 1813 else
1897 ddc_pin = GMBUS_PIN_DPC; 1814 ddc_pin = GMBUS_PIN_DPC;
1898 break; 1815 break;
1899 case PORT_D: 1816 case PORT_D:
1900 if (IS_CHERRYVIEW(dev_priv)) 1817 if (HAS_PCH_CNP(dev_priv))
1818 ddc_pin = GMBUS_PIN_4_CNP;
1819 else if (IS_CHERRYVIEW(dev_priv))
1901 ddc_pin = GMBUS_PIN_DPD_CHV; 1820 ddc_pin = GMBUS_PIN_DPD_CHV;
1902 else 1821 else
1903 ddc_pin = GMBUS_PIN_DPD; 1822 ddc_pin = GMBUS_PIN_DPD;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index f5eb18d0e2d1..6145fa0d6773 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -167,7 +167,7 @@ void intel_huc_select_fw(struct intel_huc *huc)
167 huc->fw.path = I915_BXT_HUC_UCODE; 167 huc->fw.path = I915_BXT_HUC_UCODE;
168 huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR; 168 huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
169 huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR; 169 huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
170 } else if (IS_KABYLAKE(dev_priv)) { 170 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
171 huc->fw.path = I915_KBL_HUC_UCODE; 171 huc->fw.path = I915_KBL_HUC_UCODE;
172 huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR; 172 huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
173 huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR; 173 huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b6401e8f1bd6..3c9e00d4ba5a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -68,11 +68,20 @@ static const struct gmbus_pin gmbus_pins_bxt[] = {
68 [GMBUS_PIN_3_BXT] = { "misc", GPIOD }, 68 [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
69}; 69};
70 70
71static const struct gmbus_pin gmbus_pins_cnp[] = {
72 [GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
73 [GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
74 [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
75 [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
76};
77
71/* pin is expected to be valid */ 78/* pin is expected to be valid */
72static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, 79static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
73 unsigned int pin) 80 unsigned int pin)
74{ 81{
75 if (IS_GEN9_LP(dev_priv)) 82 if (HAS_PCH_CNP(dev_priv))
83 return &gmbus_pins_cnp[pin];
84 else if (IS_GEN9_LP(dev_priv))
76 return &gmbus_pins_bxt[pin]; 85 return &gmbus_pins_bxt[pin];
77 else if (IS_GEN9_BC(dev_priv)) 86 else if (IS_GEN9_BC(dev_priv))
78 return &gmbus_pins_skl[pin]; 87 return &gmbus_pins_skl[pin];
@@ -87,7 +96,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
87{ 96{
88 unsigned int size; 97 unsigned int size;
89 98
90 if (IS_GEN9_LP(dev_priv)) 99 if (HAS_PCH_CNP(dev_priv))
100 size = ARRAY_SIZE(gmbus_pins_cnp);
101 else if (IS_GEN9_LP(dev_priv))
91 size = ARRAY_SIZE(gmbus_pins_bxt); 102 size = ARRAY_SIZE(gmbus_pins_bxt);
92 else if (IS_GEN9_BC(dev_priv)) 103 else if (IS_GEN9_BC(dev_priv))
93 size = ARRAY_SIZE(gmbus_pins_skl); 104 size = ARRAY_SIZE(gmbus_pins_skl);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 014b30ace8a0..7404cf2aac28 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -204,6 +204,7 @@
204 204
205#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 205#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
206#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 206#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
207#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
207 208
208/* Typical size of the average request (2 pipecontrols and a MI_BB) */ 209/* Typical size of the average request (2 pipecontrols and a MI_BB) */
209#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 210#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
@@ -1861,6 +1862,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
1861 default: 1862 default:
1862 MISSING_CASE(INTEL_GEN(engine->i915)); 1863 MISSING_CASE(INTEL_GEN(engine->i915));
1863 /* fall through */ 1864 /* fall through */
1865 case 10:
1866 indirect_ctx_offset =
1867 GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1868 break;
1864 case 9: 1869 case 9:
1865 indirect_ctx_offset = 1870 indirect_ctx_offset =
1866 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 1871 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -1957,6 +1962,8 @@ static void execlists_init_reg_state(u32 *regs,
1957 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 1962 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1958 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 1963 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
1959 make_rpcs(dev_priv)); 1964 make_rpcs(dev_priv));
1965
1966 i915_oa_init_reg_state(engine, ctx, regs);
1960 } 1967 }
1961} 1968}
1962 1969
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8b942ef2b3ec..6fe5d7c3bc23 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -311,8 +311,6 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
311{ 311{
312 struct drm_device *dev = encoder->base.dev; 312 struct drm_device *dev = encoder->base.dev;
313 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 313 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
314 struct intel_connector *intel_connector =
315 &lvds_encoder->attached_connector->base;
316 struct drm_i915_private *dev_priv = to_i915(dev); 314 struct drm_i915_private *dev_priv = to_i915(dev);
317 315
318 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); 316 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
@@ -322,7 +320,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
322 if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000)) 320 if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
323 DRM_ERROR("timed out waiting for panel to power on\n"); 321 DRM_ERROR("timed out waiting for panel to power on\n");
324 322
325 intel_panel_enable_backlight(intel_connector); 323 intel_panel_enable_backlight(pipe_config, conn_state);
326} 324}
327 325
328static void intel_disable_lvds(struct intel_encoder *encoder, 326static void intel_disable_lvds(struct intel_encoder *encoder,
@@ -345,11 +343,7 @@ static void gmch_disable_lvds(struct intel_encoder *encoder,
345 struct drm_connector_state *old_conn_state) 343 struct drm_connector_state *old_conn_state)
346 344
347{ 345{
348 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 346 intel_panel_disable_backlight(old_conn_state);
349 struct intel_connector *intel_connector =
350 &lvds_encoder->attached_connector->base;
351
352 intel_panel_disable_backlight(intel_connector);
353 347
354 intel_disable_lvds(encoder, old_crtc_state, old_conn_state); 348 intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
355} 349}
@@ -358,11 +352,7 @@ static void pch_disable_lvds(struct intel_encoder *encoder,
358 struct intel_crtc_state *old_crtc_state, 352 struct intel_crtc_state *old_crtc_state,
359 struct drm_connector_state *old_conn_state) 353 struct drm_connector_state *old_conn_state)
360{ 354{
361 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 355 intel_panel_disable_backlight(old_conn_state);
362 struct intel_connector *intel_connector =
363 &lvds_encoder->attached_connector->base;
364
365 intel_panel_disable_backlight(intel_connector);
366} 356}
367 357
368static void pch_post_disable_lvds(struct intel_encoder *encoder, 358static void pch_post_disable_lvds(struct intel_encoder *encoder,
@@ -433,10 +423,10 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
433 pipe_config->has_pch_encoder = true; 423 pipe_config->has_pch_encoder = true;
434 424
435 intel_pch_panel_fitting(intel_crtc, pipe_config, 425 intel_pch_panel_fitting(intel_crtc, pipe_config,
436 intel_connector->panel.fitting_mode); 426 conn_state->scaling_mode);
437 } else { 427 } else {
438 intel_gmch_panel_fitting(intel_crtc, pipe_config, 428 intel_gmch_panel_fitting(intel_crtc, pipe_config,
439 intel_connector->panel.fitting_mode); 429 conn_state->scaling_mode);
440 430
441 } 431 }
442 432
@@ -598,56 +588,24 @@ static void intel_lvds_destroy(struct drm_connector *connector)
598 kfree(connector); 588 kfree(connector);
599} 589}
600 590
601static int intel_lvds_set_property(struct drm_connector *connector,
602 struct drm_property *property,
603 uint64_t value)
604{
605 struct intel_connector *intel_connector = to_intel_connector(connector);
606 struct drm_device *dev = connector->dev;
607
608 if (property == dev->mode_config.scaling_mode_property) {
609 struct drm_crtc *crtc;
610
611 if (value == DRM_MODE_SCALE_NONE) {
612 DRM_DEBUG_KMS("no scaling not supported\n");
613 return -EINVAL;
614 }
615
616 if (intel_connector->panel.fitting_mode == value) {
617 /* the LVDS scaling property is not changed */
618 return 0;
619 }
620 intel_connector->panel.fitting_mode = value;
621
622 crtc = intel_attached_encoder(connector)->base.crtc;
623 if (crtc && crtc->state->enable) {
624 /*
625 * If the CRTC is enabled, the display will be changed
626 * according to the new panel fitting mode.
627 */
628 intel_crtc_restore_mode(crtc);
629 }
630 }
631
632 return 0;
633}
634
635static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 591static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
636 .get_modes = intel_lvds_get_modes, 592 .get_modes = intel_lvds_get_modes,
637 .mode_valid = intel_lvds_mode_valid, 593 .mode_valid = intel_lvds_mode_valid,
594 .atomic_check = intel_digital_connector_atomic_check,
638}; 595};
639 596
640static const struct drm_connector_funcs intel_lvds_connector_funcs = { 597static const struct drm_connector_funcs intel_lvds_connector_funcs = {
641 .dpms = drm_atomic_helper_connector_dpms, 598 .dpms = drm_atomic_helper_connector_dpms,
642 .detect = intel_lvds_detect, 599 .detect = intel_lvds_detect,
643 .fill_modes = drm_helper_probe_single_connector_modes, 600 .fill_modes = drm_helper_probe_single_connector_modes,
644 .set_property = intel_lvds_set_property, 601 .set_property = drm_atomic_helper_connector_set_property,
645 .atomic_get_property = intel_connector_atomic_get_property, 602 .atomic_get_property = intel_digital_connector_atomic_get_property,
603 .atomic_set_property = intel_digital_connector_atomic_set_property,
646 .late_register = intel_connector_register, 604 .late_register = intel_connector_register,
647 .early_unregister = intel_connector_unregister, 605 .early_unregister = intel_connector_unregister,
648 .destroy = intel_lvds_destroy, 606 .destroy = intel_lvds_destroy,
649 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 607 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
650 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 608 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
651}; 609};
652 610
653static const struct drm_encoder_funcs intel_lvds_enc_funcs = { 611static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -988,6 +946,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
988 u32 lvds; 946 u32 lvds;
989 int pipe; 947 int pipe;
990 u8 pin; 948 u8 pin;
949 u32 allowed_scalers;
991 950
992 if (!intel_lvds_supported(dev_priv)) 951 if (!intel_lvds_supported(dev_priv))
993 return; 952 return;
@@ -1083,11 +1042,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
1083 lvds_encoder->reg = lvds_reg; 1042 lvds_encoder->reg = lvds_reg;
1084 1043
1085 /* create the scaling mode property */ 1044 /* create the scaling mode property */
1086 drm_mode_create_scaling_mode_property(dev); 1045 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT);
1087 drm_object_attach_property(&connector->base, 1046 allowed_scalers |= BIT(DRM_MODE_SCALE_FULLSCREEN);
1088 dev->mode_config.scaling_mode_property, 1047 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
1089 DRM_MODE_SCALE_ASPECT); 1048 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
1090 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 1049 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
1091 1050
1092 intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps); 1051 intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
1093 lvds_encoder->init_lvds_val = lvds; 1052 lvds_encoder->init_lvds_val = lvds;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 92e461c68385..f4c46b0b8f0a 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -178,7 +178,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
178{ 178{
179 bool result = false; 179 bool result = false;
180 180
181 if (IS_GEN9_BC(dev_priv)) { 181 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
182 table->size = ARRAY_SIZE(skylake_mocs_table); 182 table->size = ARRAY_SIZE(skylake_mocs_table);
183 table->table = skylake_mocs_table; 183 table->table = skylake_mocs_table;
184 result = true; 184 result = true;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d44465190dc1..2bd03001cc70 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -461,7 +461,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
461 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); 461 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
462 drm_connector_list_iter_begin(dev, &conn_iter); 462 drm_connector_list_iter_begin(dev, &conn_iter);
463 for_each_intel_connector_iter(connector, &conn_iter) 463 for_each_intel_connector_iter(connector, &conn_iter)
464 intel_panel_set_backlight_acpi(connector, bclp, 255); 464 intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
465 drm_connector_list_iter_end(&conn_iter); 465 drm_connector_list_iter_end(&conn_iter);
466 asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; 466 asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
467 467
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2e0c56ed22bb..b96aed941b97 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -270,7 +270,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
270 u32 *cs; 270 u32 *cs;
271 271
272 WARN_ON(overlay->active); 272 WARN_ON(overlay->active);
273 WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
274 273
275 req = alloc_request(overlay); 274 req = alloc_request(overlay);
276 if (IS_ERR(req)) 275 if (IS_ERR(req))
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c8103f8d4dfa..96c2cbd81869 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -561,15 +561,18 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
561 return val; 561 return val;
562} 562}
563 563
564static void lpt_set_backlight(struct intel_connector *connector, u32 level) 564static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
565{ 565{
566 struct intel_connector *connector = to_intel_connector(conn_state->connector);
566 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 567 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
568
567 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; 569 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
568 I915_WRITE(BLC_PWM_PCH_CTL2, val | level); 570 I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
569} 571}
570 572
571static void pch_set_backlight(struct intel_connector *connector, u32 level) 573static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
572{ 574{
575 struct intel_connector *connector = to_intel_connector(conn_state->connector);
573 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 576 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
574 u32 tmp; 577 u32 tmp;
575 578
@@ -577,8 +580,9 @@ static void pch_set_backlight(struct intel_connector *connector, u32 level)
577 I915_WRITE(BLC_PWM_CPU_CTL, tmp | level); 580 I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
578} 581}
579 582
580static void i9xx_set_backlight(struct intel_connector *connector, u32 level) 583static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
581{ 584{
585 struct intel_connector *connector = to_intel_connector(conn_state->connector);
582 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 586 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
583 struct intel_panel *panel = &connector->panel; 587 struct intel_panel *panel = &connector->panel;
584 u32 tmp, mask; 588 u32 tmp, mask;
@@ -604,50 +608,51 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
604 I915_WRITE(BLC_PWM_CTL, tmp | level); 608 I915_WRITE(BLC_PWM_CTL, tmp | level);
605} 609}
606 610
607static void vlv_set_backlight(struct intel_connector *connector, u32 level) 611static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
608{ 612{
613 struct intel_connector *connector = to_intel_connector(conn_state->connector);
609 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 614 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
610 enum pipe pipe = intel_get_pipe_from_connector(connector); 615 enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
611 u32 tmp; 616 u32 tmp;
612 617
613 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
614 return;
615
616 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; 618 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
617 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level); 619 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
618} 620}
619 621
620static void bxt_set_backlight(struct intel_connector *connector, u32 level) 622static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
621{ 623{
624 struct intel_connector *connector = to_intel_connector(conn_state->connector);
622 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 625 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
623 struct intel_panel *panel = &connector->panel; 626 struct intel_panel *panel = &connector->panel;
624 627
625 I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); 628 I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
626} 629}
627 630
628static void pwm_set_backlight(struct intel_connector *connector, u32 level) 631static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
629{ 632{
630 struct intel_panel *panel = &connector->panel; 633 struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
631 int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100); 634 int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
632 635
633 pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS); 636 pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
634} 637}
635 638
636static void 639static void
637intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) 640intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
638{ 641{
642 struct intel_connector *connector = to_intel_connector(conn_state->connector);
639 struct intel_panel *panel = &connector->panel; 643 struct intel_panel *panel = &connector->panel;
640 644
641 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 645 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
642 646
643 level = intel_panel_compute_brightness(connector, level); 647 level = intel_panel_compute_brightness(connector, level);
644 panel->backlight.set(connector, level); 648 panel->backlight.set(conn_state, level);
645} 649}
646 650
647/* set backlight brightness to level in range [0..max], scaling wrt hw min */ 651/* set backlight brightness to level in range [0..max], scaling wrt hw min */
648static void intel_panel_set_backlight(struct intel_connector *connector, 652static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
649 u32 user_level, u32 user_max) 653 u32 user_level, u32 user_max)
650{ 654{
655 struct intel_connector *connector = to_intel_connector(conn_state->connector);
651 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 656 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
652 struct intel_panel *panel = &connector->panel; 657 struct intel_panel *panel = &connector->panel;
653 u32 hw_level; 658 u32 hw_level;
@@ -663,7 +668,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
663 panel->backlight.level = hw_level; 668 panel->backlight.level = hw_level;
664 669
665 if (panel->backlight.enabled) 670 if (panel->backlight.enabled)
666 intel_panel_actually_set_backlight(connector, hw_level); 671 intel_panel_actually_set_backlight(conn_state, hw_level);
667 672
668 mutex_unlock(&dev_priv->backlight_lock); 673 mutex_unlock(&dev_priv->backlight_lock);
669} 674}
@@ -671,21 +676,21 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
671/* set backlight brightness to level in range [0..max], assuming hw min is 676/* set backlight brightness to level in range [0..max], assuming hw min is
672 * respected. 677 * respected.
673 */ 678 */
674void intel_panel_set_backlight_acpi(struct intel_connector *connector, 679void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
675 u32 user_level, u32 user_max) 680 u32 user_level, u32 user_max)
676{ 681{
682 struct intel_connector *connector = to_intel_connector(conn_state->connector);
677 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 683 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
678 struct intel_panel *panel = &connector->panel; 684 struct intel_panel *panel = &connector->panel;
679 enum pipe pipe = intel_get_pipe_from_connector(connector);
680 u32 hw_level; 685 u32 hw_level;
681 686
682 /* 687 /*
683 * INVALID_PIPE may occur during driver init because 688 * Lack of crtc may occur during driver init because
684 * connection_mutex isn't held across the entire backlight 689 * connection_mutex isn't held across the entire backlight
685 * setup + modeset readout, and the BIOS can issue the 690 * setup + modeset readout, and the BIOS can issue the
686 * requests at any time. 691 * requests at any time.
687 */ 692 */
688 if (!panel->backlight.present || pipe == INVALID_PIPE) 693 if (!panel->backlight.present || !conn_state->crtc)
689 return; 694 return;
690 695
691 mutex_lock(&dev_priv->backlight_lock); 696 mutex_lock(&dev_priv->backlight_lock);
@@ -702,17 +707,18 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
702 panel->backlight.device->props.max_brightness); 707 panel->backlight.device->props.max_brightness);
703 708
704 if (panel->backlight.enabled) 709 if (panel->backlight.enabled)
705 intel_panel_actually_set_backlight(connector, hw_level); 710 intel_panel_actually_set_backlight(conn_state, hw_level);
706 711
707 mutex_unlock(&dev_priv->backlight_lock); 712 mutex_unlock(&dev_priv->backlight_lock);
708} 713}
709 714
710static void lpt_disable_backlight(struct intel_connector *connector) 715static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
711{ 716{
717 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
712 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 718 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
713 u32 tmp; 719 u32 tmp;
714 720
715 intel_panel_actually_set_backlight(connector, 0); 721 intel_panel_actually_set_backlight(old_conn_state, 0);
716 722
717 /* 723 /*
718 * Although we don't support or enable CPU PWM with LPT/SPT based 724 * Although we don't support or enable CPU PWM with LPT/SPT based
@@ -732,12 +738,13 @@ static void lpt_disable_backlight(struct intel_connector *connector)
732 I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); 738 I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
733} 739}
734 740
735static void pch_disable_backlight(struct intel_connector *connector) 741static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
736{ 742{
743 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
737 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 744 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
738 u32 tmp; 745 u32 tmp;
739 746
740 intel_panel_actually_set_backlight(connector, 0); 747 intel_panel_actually_set_backlight(old_conn_state, 0);
741 748
742 tmp = I915_READ(BLC_PWM_CPU_CTL2); 749 tmp = I915_READ(BLC_PWM_CPU_CTL2);
743 I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); 750 I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@@ -746,44 +753,43 @@ static void pch_disable_backlight(struct intel_connector *connector)
746 I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); 753 I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
747} 754}
748 755
749static void i9xx_disable_backlight(struct intel_connector *connector) 756static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
750{ 757{
751 intel_panel_actually_set_backlight(connector, 0); 758 intel_panel_actually_set_backlight(old_conn_state, 0);
752} 759}
753 760
754static void i965_disable_backlight(struct intel_connector *connector) 761static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
755{ 762{
756 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 763 struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
757 u32 tmp; 764 u32 tmp;
758 765
759 intel_panel_actually_set_backlight(connector, 0); 766 intel_panel_actually_set_backlight(old_conn_state, 0);
760 767
761 tmp = I915_READ(BLC_PWM_CTL2); 768 tmp = I915_READ(BLC_PWM_CTL2);
762 I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); 769 I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
763} 770}
764 771
765static void vlv_disable_backlight(struct intel_connector *connector) 772static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
766{ 773{
774 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
767 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 775 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
768 enum pipe pipe = intel_get_pipe_from_connector(connector); 776 enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
769 u32 tmp; 777 u32 tmp;
770 778
771 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) 779 intel_panel_actually_set_backlight(old_conn_state, 0);
772 return;
773
774 intel_panel_actually_set_backlight(connector, 0);
775 780
776 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 781 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
777 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); 782 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
778} 783}
779 784
780static void bxt_disable_backlight(struct intel_connector *connector) 785static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
781{ 786{
787 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
782 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 788 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
783 struct intel_panel *panel = &connector->panel; 789 struct intel_panel *panel = &connector->panel;
784 u32 tmp, val; 790 u32 tmp, val;
785 791
786 intel_panel_actually_set_backlight(connector, 0); 792 intel_panel_actually_set_backlight(old_conn_state, 0);
787 793
788 tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); 794 tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
789 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), 795 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
@@ -796,8 +802,23 @@ static void bxt_disable_backlight(struct intel_connector *connector)
796 } 802 }
797} 803}
798 804
799static void pwm_disable_backlight(struct intel_connector *connector) 805static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
800{ 806{
807 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
808 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
809 struct intel_panel *panel = &connector->panel;
810 u32 tmp;
811
812 intel_panel_actually_set_backlight(old_conn_state, 0);
813
814 tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
815 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
816 tmp & ~BXT_BLC_PWM_ENABLE);
817}
818
819static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
820{
821 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
801 struct intel_panel *panel = &connector->panel; 822 struct intel_panel *panel = &connector->panel;
802 823
803 /* Disable the backlight */ 824 /* Disable the backlight */
@@ -806,8 +827,9 @@ static void pwm_disable_backlight(struct intel_connector *connector)
806 pwm_disable(panel->backlight.pwm); 827 pwm_disable(panel->backlight.pwm);
807} 828}
808 829
809void intel_panel_disable_backlight(struct intel_connector *connector) 830void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
810{ 831{
832 struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
811 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 833 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
812 struct intel_panel *panel = &connector->panel; 834 struct intel_panel *panel = &connector->panel;
813 835
@@ -830,13 +852,15 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
830 if (panel->backlight.device) 852 if (panel->backlight.device)
831 panel->backlight.device->props.power = FB_BLANK_POWERDOWN; 853 panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
832 panel->backlight.enabled = false; 854 panel->backlight.enabled = false;
833 panel->backlight.disable(connector); 855 panel->backlight.disable(old_conn_state);
834 856
835 mutex_unlock(&dev_priv->backlight_lock); 857 mutex_unlock(&dev_priv->backlight_lock);
836} 858}
837 859
838static void lpt_enable_backlight(struct intel_connector *connector) 860static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
861 const struct drm_connector_state *conn_state)
839{ 862{
863 struct intel_connector *connector = to_intel_connector(conn_state->connector);
840 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 864 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
841 struct intel_panel *panel = &connector->panel; 865 struct intel_panel *panel = &connector->panel;
842 u32 pch_ctl1, pch_ctl2, schicken; 866 u32 pch_ctl1, pch_ctl2, schicken;
@@ -880,22 +904,18 @@ static void lpt_enable_backlight(struct intel_connector *connector)
880 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); 904 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
881 905
882 /* This won't stick until the above enable. */ 906 /* This won't stick until the above enable. */
883 intel_panel_actually_set_backlight(connector, panel->backlight.level); 907 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
884} 908}
885 909
886static void pch_enable_backlight(struct intel_connector *connector) 910static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
911 const struct drm_connector_state *conn_state)
887{ 912{
913 struct intel_connector *connector = to_intel_connector(conn_state->connector);
888 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 914 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
889 struct intel_panel *panel = &connector->panel; 915 struct intel_panel *panel = &connector->panel;
890 enum pipe pipe = intel_get_pipe_from_connector(connector); 916 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
891 enum transcoder cpu_transcoder;
892 u32 cpu_ctl2, pch_ctl1, pch_ctl2; 917 u32 cpu_ctl2, pch_ctl1, pch_ctl2;
893 918
894 if (!WARN_ON_ONCE(pipe == INVALID_PIPE))
895 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe);
896 else
897 cpu_transcoder = TRANSCODER_EDP;
898
899 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); 919 cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
900 if (cpu_ctl2 & BLM_PWM_ENABLE) { 920 if (cpu_ctl2 & BLM_PWM_ENABLE) {
901 DRM_DEBUG_KMS("cpu backlight already enabled\n"); 921 DRM_DEBUG_KMS("cpu backlight already enabled\n");
@@ -919,7 +939,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
919 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); 939 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
920 940
921 /* This won't stick until the above enable. */ 941 /* This won't stick until the above enable. */
922 intel_panel_actually_set_backlight(connector, panel->backlight.level); 942 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
923 943
924 pch_ctl2 = panel->backlight.max << 16; 944 pch_ctl2 = panel->backlight.max << 16;
925 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); 945 I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
@@ -933,8 +953,10 @@ static void pch_enable_backlight(struct intel_connector *connector)
933 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); 953 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
934} 954}
935 955
936static void i9xx_enable_backlight(struct intel_connector *connector) 956static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
957 const struct drm_connector_state *conn_state)
937{ 958{
959 struct intel_connector *connector = to_intel_connector(conn_state->connector);
938 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 960 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
939 struct intel_panel *panel = &connector->panel; 961 struct intel_panel *panel = &connector->panel;
940 u32 ctl, freq; 962 u32 ctl, freq;
@@ -959,7 +981,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
959 POSTING_READ(BLC_PWM_CTL); 981 POSTING_READ(BLC_PWM_CTL);
960 982
961 /* XXX: combine this into above write? */ 983 /* XXX: combine this into above write? */
962 intel_panel_actually_set_backlight(connector, panel->backlight.level); 984 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
963 985
964 /* 986 /*
965 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is 987 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@@ -970,16 +992,15 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
970 I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); 992 I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
971} 993}
972 994
973static void i965_enable_backlight(struct intel_connector *connector) 995static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
996 const struct drm_connector_state *conn_state)
974{ 997{
998 struct intel_connector *connector = to_intel_connector(conn_state->connector);
975 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 999 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
976 struct intel_panel *panel = &connector->panel; 1000 struct intel_panel *panel = &connector->panel;
977 enum pipe pipe = intel_get_pipe_from_connector(connector); 1001 enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
978 u32 ctl, ctl2, freq; 1002 u32 ctl, ctl2, freq;
979 1003
980 if (WARN_ON_ONCE(pipe == INVALID_PIPE))
981 pipe = PIPE_A;
982
983 ctl2 = I915_READ(BLC_PWM_CTL2); 1004 ctl2 = I915_READ(BLC_PWM_CTL2);
984 if (ctl2 & BLM_PWM_ENABLE) { 1005 if (ctl2 & BLM_PWM_ENABLE) {
985 DRM_DEBUG_KMS("backlight already enabled\n"); 1006 DRM_DEBUG_KMS("backlight already enabled\n");
@@ -1003,19 +1024,18 @@ static void i965_enable_backlight(struct intel_connector *connector)
1003 POSTING_READ(BLC_PWM_CTL2); 1024 POSTING_READ(BLC_PWM_CTL2);
1004 I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); 1025 I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
1005 1026
1006 intel_panel_actually_set_backlight(connector, panel->backlight.level); 1027 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
1007} 1028}
1008 1029
1009static void vlv_enable_backlight(struct intel_connector *connector) 1030static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
1031 const struct drm_connector_state *conn_state)
1010{ 1032{
1033 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1011 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1034 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1012 struct intel_panel *panel = &connector->panel; 1035 struct intel_panel *panel = &connector->panel;
1013 enum pipe pipe = intel_get_pipe_from_connector(connector); 1036 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
1014 u32 ctl, ctl2; 1037 u32 ctl, ctl2;
1015 1038
1016 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
1017 return;
1018
1019 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 1039 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
1020 if (ctl2 & BLM_PWM_ENABLE) { 1040 if (ctl2 & BLM_PWM_ENABLE) {
1021 DRM_DEBUG_KMS("backlight already enabled\n"); 1041 DRM_DEBUG_KMS("backlight already enabled\n");
@@ -1027,7 +1047,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
1027 I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl); 1047 I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
1028 1048
1029 /* XXX: combine this into above write? */ 1049 /* XXX: combine this into above write? */
1030 intel_panel_actually_set_backlight(connector, panel->backlight.level); 1050 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
1031 1051
1032 ctl2 = 0; 1052 ctl2 = 0;
1033 if (panel->backlight.active_low_pwm) 1053 if (panel->backlight.active_low_pwm)
@@ -1037,16 +1057,15 @@ static void vlv_enable_backlight(struct intel_connector *connector)
1037 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE); 1057 I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
1038} 1058}
1039 1059
1040static void bxt_enable_backlight(struct intel_connector *connector) 1060static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
1061 const struct drm_connector_state *conn_state)
1041{ 1062{
1063 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1042 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1064 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1043 struct intel_panel *panel = &connector->panel; 1065 struct intel_panel *panel = &connector->panel;
1044 enum pipe pipe = intel_get_pipe_from_connector(connector); 1066 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
1045 u32 pwm_ctl, val; 1067 u32 pwm_ctl, val;
1046 1068
1047 if (WARN_ON_ONCE(pipe == INVALID_PIPE))
1048 pipe = PIPE_A;
1049
1050 /* Controller 1 uses the utility pin. */ 1069 /* Controller 1 uses the utility pin. */
1051 if (panel->backlight.controller == 1) { 1070 if (panel->backlight.controller == 1) {
1052 val = I915_READ(UTIL_PIN_CTL); 1071 val = I915_READ(UTIL_PIN_CTL);
@@ -1074,7 +1093,7 @@ static void bxt_enable_backlight(struct intel_connector *connector)
1074 I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller), 1093 I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
1075 panel->backlight.max); 1094 panel->backlight.max);
1076 1095
1077 intel_panel_actually_set_backlight(connector, panel->backlight.level); 1096 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
1078 1097
1079 pwm_ctl = 0; 1098 pwm_ctl = 0;
1080 if (panel->backlight.active_low_pwm) 1099 if (panel->backlight.active_low_pwm)
@@ -1086,25 +1105,59 @@ static void bxt_enable_backlight(struct intel_connector *connector)
1086 pwm_ctl | BXT_BLC_PWM_ENABLE); 1105 pwm_ctl | BXT_BLC_PWM_ENABLE);
1087} 1106}
1088 1107
1089static void pwm_enable_backlight(struct intel_connector *connector) 1108static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
1109 const struct drm_connector_state *conn_state)
1090{ 1110{
1111 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1112 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1113 struct intel_panel *panel = &connector->panel;
1114 u32 pwm_ctl;
1115
1116 pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
1117 if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
1118 DRM_DEBUG_KMS("backlight already enabled\n");
1119 pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
1120 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
1121 pwm_ctl);
1122 }
1123
1124 I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
1125 panel->backlight.max);
1126
1127 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
1128
1129 pwm_ctl = 0;
1130 if (panel->backlight.active_low_pwm)
1131 pwm_ctl |= BXT_BLC_PWM_POLARITY;
1132
1133 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
1134 POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
1135 I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
1136 pwm_ctl | BXT_BLC_PWM_ENABLE);
1137}
1138
1139static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
1140 const struct drm_connector_state *conn_state)
1141{
1142 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1091 struct intel_panel *panel = &connector->panel; 1143 struct intel_panel *panel = &connector->panel;
1092 1144
1093 pwm_enable(panel->backlight.pwm); 1145 pwm_enable(panel->backlight.pwm);
1094 intel_panel_actually_set_backlight(connector, panel->backlight.level); 1146 intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
1095} 1147}
1096 1148
1097void intel_panel_enable_backlight(struct intel_connector *connector) 1149void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
1150 const struct drm_connector_state *conn_state)
1098{ 1151{
1152 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1099 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1153 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1100 struct intel_panel *panel = &connector->panel; 1154 struct intel_panel *panel = &connector->panel;
1101 enum pipe pipe = intel_get_pipe_from_connector(connector); 1155 enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
1102 1156
1103 if (!panel->backlight.present) 1157 if (!panel->backlight.present)
1104 return; 1158 return;
1105 1159
1106 if (!WARN_ON_ONCE(pipe == INVALID_PIPE)) 1160 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
1107 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
1108 1161
1109 mutex_lock(&dev_priv->backlight_lock); 1162 mutex_lock(&dev_priv->backlight_lock);
1110 1163
@@ -1119,7 +1172,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
1119 panel->backlight.device->props.max_brightness); 1172 panel->backlight.device->props.max_brightness);
1120 } 1173 }
1121 1174
1122 panel->backlight.enable(connector); 1175 panel->backlight.enable(crtc_state, conn_state);
1123 panel->backlight.enabled = true; 1176 panel->backlight.enabled = true;
1124 if (panel->backlight.device) 1177 if (panel->backlight.device)
1125 panel->backlight.device->props.power = FB_BLANK_UNBLANK; 1178 panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1137,7 +1190,7 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
1137 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1190 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1138 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", 1191 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
1139 bd->props.brightness, bd->props.max_brightness); 1192 bd->props.brightness, bd->props.max_brightness);
1140 intel_panel_set_backlight(connector, bd->props.brightness, 1193 intel_panel_set_backlight(connector->base.state, bd->props.brightness,
1141 bd->props.max_brightness); 1194 bd->props.max_brightness);
1142 1195
1143 /* 1196 /*
@@ -1250,6 +1303,17 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
1250#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1303#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1251 1304
1252/* 1305/*
1306 * CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
1307 * PWM increment = 1
1308 */
1309static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1310{
1311 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1312
1313 return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
1314}
1315
1316/*
1253 * BXT: PWM clock frequency = 19.2 MHz. 1317 * BXT: PWM clock frequency = 19.2 MHz.
1254 */ 1318 */
1255static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) 1319static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
@@ -1644,6 +1708,42 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
1644 return 0; 1708 return 0;
1645} 1709}
1646 1710
1711static int
1712cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
1713{
1714 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1715 struct intel_panel *panel = &connector->panel;
1716 u32 pwm_ctl, val;
1717
1718 /*
1719 * CNP has the BXT implementation of backlight, but with only
1720 * one controller. Future platforms could have multiple controllers
1721 * so let's make this extensible and prepared for the future.
1722 */
1723 panel->backlight.controller = 0;
1724
1725 pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
1726
1727 panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
1728 panel->backlight.max =
1729 I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
1730
1731 if (!panel->backlight.max)
1732 panel->backlight.max = get_backlight_max_vbt(connector);
1733
1734 if (!panel->backlight.max)
1735 return -ENODEV;
1736
1737 val = bxt_get_backlight(connector);
1738 val = intel_panel_compute_brightness(connector, val);
1739 panel->backlight.level = clamp(val, panel->backlight.min,
1740 panel->backlight.max);
1741
1742 panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
1743
1744 return 0;
1745}
1746
1647static int pwm_setup_backlight(struct intel_connector *connector, 1747static int pwm_setup_backlight(struct intel_connector *connector,
1648 enum pipe pipe) 1748 enum pipe pipe)
1649{ 1749{
@@ -1760,6 +1860,13 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1760 panel->backlight.set = bxt_set_backlight; 1860 panel->backlight.set = bxt_set_backlight;
1761 panel->backlight.get = bxt_get_backlight; 1861 panel->backlight.get = bxt_get_backlight;
1762 panel->backlight.hz_to_pwm = bxt_hz_to_pwm; 1862 panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
1863 } else if (HAS_PCH_CNP(dev_priv)) {
1864 panel->backlight.setup = cnp_setup_backlight;
1865 panel->backlight.enable = cnp_enable_backlight;
1866 panel->backlight.disable = cnp_disable_backlight;
1867 panel->backlight.set = bxt_set_backlight;
1868 panel->backlight.get = bxt_get_backlight;
1869 panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
1763 } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) || 1870 } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
1764 HAS_PCH_KBP(dev_priv)) { 1871 HAS_PCH_KBP(dev_priv)) {
1765 panel->backlight.setup = lpt_setup_backlight; 1872 panel->backlight.setup = lpt_setup_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f2c1d030f7f9..48ea0fca1f72 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -58,24 +58,24 @@
58 58
59static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) 59static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
60{ 60{
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ 61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
62 I915_WRITE(CHICKEN_PAR1_1, 62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
64 64
65 I915_WRITE(GEN8_CONFIG0, 65 I915_WRITE(GEN8_CONFIG0,
66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); 66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
67 67
68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk */ 68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1, 69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
71 71
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */ 72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk */ 73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
75 DISP_FBC_WM_DIS | 75 DISP_FBC_WM_DIS |
76 DISP_FBC_MEMORY_WAKE); 76 DISP_FBC_MEMORY_WAKE);
77 77
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */ 78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 ILK_DPFC_DISABLE_DUMMY0); 80 ILK_DPFC_DISABLE_DUMMY0);
81} 81}
@@ -3549,7 +3549,7 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3549static bool 3549static bool
3550intel_has_sagv(struct drm_i915_private *dev_priv) 3550intel_has_sagv(struct drm_i915_private *dev_priv)
3551{ 3551{
3552 if (IS_KABYLAKE(dev_priv)) 3552 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
3553 return true; 3553 return true;
3554 3554
3555 if (IS_SKYLAKE(dev_priv) && 3555 if (IS_SKYLAKE(dev_priv) &&
@@ -3869,6 +3869,97 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3869 return mul_fixed16(downscale_w, downscale_h); 3869 return mul_fixed16(downscale_w, downscale_h);
3870} 3870}
3871 3871
3872static uint_fixed_16_16_t
3873skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
3874{
3875 uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1);
3876
3877 if (!crtc_state->base.enable)
3878 return pipe_downscale;
3879
3880 if (crtc_state->pch_pfit.enabled) {
3881 uint32_t src_w, src_h, dst_w, dst_h;
3882 uint32_t pfit_size = crtc_state->pch_pfit.size;
3883 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
3884 uint_fixed_16_16_t downscale_h, downscale_w;
3885
3886 src_w = crtc_state->pipe_src_w;
3887 src_h = crtc_state->pipe_src_h;
3888 dst_w = pfit_size >> 16;
3889 dst_h = pfit_size & 0xffff;
3890
3891 if (!dst_w || !dst_h)
3892 return pipe_downscale;
3893
3894 fp_w_ratio = fixed_16_16_div(src_w, dst_w);
3895 fp_h_ratio = fixed_16_16_div(src_h, dst_h);
3896 downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
3897 downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
3898
3899 pipe_downscale = mul_fixed16(downscale_w, downscale_h);
3900 }
3901
3902 return pipe_downscale;
3903}
3904
3905int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
3906 struct intel_crtc_state *cstate)
3907{
3908 struct drm_crtc_state *crtc_state = &cstate->base;
3909 struct drm_atomic_state *state = crtc_state->state;
3910 struct drm_plane *plane;
3911 const struct drm_plane_state *pstate;
3912 struct intel_plane_state *intel_pstate;
3913 int crtc_clock, dotclk;
3914 uint32_t pipe_max_pixel_rate;
3915 uint_fixed_16_16_t pipe_downscale;
3916 uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1);
3917
3918 if (!cstate->base.enable)
3919 return 0;
3920
3921 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
3922 uint_fixed_16_16_t plane_downscale;
3923 uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8);
3924 int bpp;
3925
3926 if (!intel_wm_plane_visible(cstate,
3927 to_intel_plane_state(pstate)))
3928 continue;
3929
3930 if (WARN_ON(!pstate->fb))
3931 return -EINVAL;
3932
3933 intel_pstate = to_intel_plane_state(pstate);
3934 plane_downscale = skl_plane_downscale_amount(cstate,
3935 intel_pstate);
3936 bpp = pstate->fb->format->cpp[0] * 8;
3937 if (bpp == 64)
3938 plane_downscale = mul_fixed16(plane_downscale,
3939 fp_9_div_8);
3940
3941 max_downscale = max_fixed_16_16(plane_downscale, max_downscale);
3942 }
3943 pipe_downscale = skl_pipe_downscale_amount(cstate);
3944
3945 pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
3946
3947 crtc_clock = crtc_state->adjusted_mode.crtc_clock;
3948 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
3949
3950 if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev)))
3951 dotclk *= 2;
3952
3953 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
3954
3955 if (pipe_max_pixel_rate < crtc_clock) {
3956 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
3957 return -EINVAL;
3958 }
3959
3960 return 0;
3961}
3962
3872static unsigned int 3963static unsigned int
3873skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 3964skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3874 const struct drm_plane_state *pstate, 3965 const struct drm_plane_state *pstate,
@@ -4289,8 +4380,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4289 fb->modifier == I915_FORMAT_MOD_Yf_TILED; 4380 fb->modifier == I915_FORMAT_MOD_Yf_TILED;
4290 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; 4381 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
4291 4382
4292 /* Display WA #1141: kbl. */ 4383 /* Display WA #1141: kbl,cfl */
4293 if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled) 4384 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
4385 dev_priv->ipc_enabled)
4294 latency += 4; 4386 latency += 4;
4295 4387
4296 if (apply_memory_bw_wa && x_tiled) 4388 if (apply_memory_bw_wa && x_tiled)
@@ -8160,7 +8252,7 @@ static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
8160 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 8252 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8161 GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 8253 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
8162 8254
8163 /* WaFbcNukeOnHostModify:kbl */ 8255 /* WaFbcNukeOnHostModify:kbl,cfl */
8164 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 8256 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8165 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 8257 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8166} 8258}
@@ -8628,7 +8720,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
8628{ 8720{
8629 if (IS_SKYLAKE(dev_priv)) 8721 if (IS_SKYLAKE(dev_priv))
8630 dev_priv->display.init_clock_gating = skylake_init_clock_gating; 8722 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
8631 else if (IS_KABYLAKE(dev_priv)) 8723 else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
8632 dev_priv->display.init_clock_gating = kabylake_init_clock_gating; 8724 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
8633 else if (IS_BROXTON(dev_priv)) 8725 else if (IS_BROXTON(dev_priv))
8634 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 8726 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f8a375f8dde6..efe80ed5fd4d 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -494,6 +494,55 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
494 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 494 BIT_ULL(POWER_DOMAIN_AUX_A) | \
495 BIT_ULL(POWER_DOMAIN_INIT)) 495 BIT_ULL(POWER_DOMAIN_INIT))
496 496
497#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
498 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
499 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
500 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
501 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
502 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
503 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
504 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
505 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
506 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
507 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
508 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
509 BIT_ULL(POWER_DOMAIN_AUX_B) | \
510 BIT_ULL(POWER_DOMAIN_AUX_C) | \
511 BIT_ULL(POWER_DOMAIN_AUX_D) | \
512 BIT_ULL(POWER_DOMAIN_AUDIO) | \
513 BIT_ULL(POWER_DOMAIN_VGA) | \
514 BIT_ULL(POWER_DOMAIN_INIT))
515#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
516 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
517 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
518 BIT_ULL(POWER_DOMAIN_INIT))
519#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
520 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
521 BIT_ULL(POWER_DOMAIN_INIT))
522#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
523 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
524 BIT_ULL(POWER_DOMAIN_INIT))
525#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
526 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
527 BIT_ULL(POWER_DOMAIN_INIT))
528#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
529 BIT_ULL(POWER_DOMAIN_AUX_A) | \
530 BIT_ULL(POWER_DOMAIN_INIT))
531#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
532 BIT_ULL(POWER_DOMAIN_AUX_B) | \
533 BIT_ULL(POWER_DOMAIN_INIT))
534#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
535 BIT_ULL(POWER_DOMAIN_AUX_C) | \
536 BIT_ULL(POWER_DOMAIN_INIT))
537#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
538 BIT_ULL(POWER_DOMAIN_AUX_D) | \
539 BIT_ULL(POWER_DOMAIN_INIT))
540#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
541 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
542 BIT_ULL(POWER_DOMAIN_MODESET) | \
543 BIT_ULL(POWER_DOMAIN_AUX_A) | \
544 BIT_ULL(POWER_DOMAIN_INIT))
545
497static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 546static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
498{ 547{
499 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 548 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
@@ -762,13 +811,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
762 } 811 }
763 break; 812 break;
764 case SKL_DISP_PW_MISC_IO: 813 case SKL_DISP_PW_MISC_IO:
765 case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */ 814 case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
766 case SKL_DISP_PW_DDI_B: 815 case SKL_DISP_PW_DDI_B:
767 case SKL_DISP_PW_DDI_C: 816 case SKL_DISP_PW_DDI_C:
768 case SKL_DISP_PW_DDI_D: 817 case SKL_DISP_PW_DDI_D:
769 case GLK_DISP_PW_AUX_A: 818 case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
770 case GLK_DISP_PW_AUX_B: 819 case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
771 case GLK_DISP_PW_AUX_C: 820 case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
821 case CNL_DISP_PW_AUX_D:
772 break; 822 break;
773 default: 823 default:
774 WARN(1, "Unknown power well %lu\n", power_well->id); 824 WARN(1, "Unknown power well %lu\n", power_well->id);
@@ -803,8 +853,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
803 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 853 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
804 } 854 }
805 855
806 if (IS_GEN9(dev_priv)) 856 gen9_sanitize_power_well_requests(dev_priv, power_well);
807 gen9_sanitize_power_well_requests(dev_priv, power_well);
808 } 857 }
809 858
810 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, 859 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
@@ -992,6 +1041,38 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
992 return true; 1041 return true;
993} 1042}
994 1043
1044static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1045 struct i915_power_well *power_well)
1046{
1047 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1048 i830_enable_pipe(dev_priv, PIPE_A);
1049 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1050 i830_enable_pipe(dev_priv, PIPE_B);
1051}
1052
1053static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1054 struct i915_power_well *power_well)
1055{
1056 i830_disable_pipe(dev_priv, PIPE_B);
1057 i830_disable_pipe(dev_priv, PIPE_A);
1058}
1059
1060static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1061 struct i915_power_well *power_well)
1062{
1063 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1064 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1065}
1066
1067static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1068 struct i915_power_well *power_well)
1069{
1070 if (power_well->count > 0)
1071 i830_pipes_power_well_enable(dev_priv, power_well);
1072 else
1073 i830_pipes_power_well_disable(dev_priv, power_well);
1074}
1075
995static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1076static void vlv_set_power_well(struct drm_i915_private *dev_priv,
996 struct i915_power_well *power_well, bool enable) 1077 struct i915_power_well *power_well, bool enable)
997{ 1078{
@@ -1880,6 +1961,15 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1880 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1961 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1881 BIT_ULL(POWER_DOMAIN_INIT)) 1962 BIT_ULL(POWER_DOMAIN_INIT))
1882 1963
1964#define I830_PIPES_POWER_DOMAINS ( \
1965 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1966 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1967 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1968 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1969 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1970 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1971 BIT_ULL(POWER_DOMAIN_INIT))
1972
1883static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1973static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1884 .sync_hw = i9xx_power_well_sync_hw_noop, 1974 .sync_hw = i9xx_power_well_sync_hw_noop,
1885 .enable = i9xx_always_on_power_well_noop, 1975 .enable = i9xx_always_on_power_well_noop,
@@ -1910,6 +2000,27 @@ static struct i915_power_well i9xx_always_on_power_well[] = {
1910 }, 2000 },
1911}; 2001};
1912 2002
2003static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2004 .sync_hw = i830_pipes_power_well_sync_hw,
2005 .enable = i830_pipes_power_well_enable,
2006 .disable = i830_pipes_power_well_disable,
2007 .is_enabled = i830_pipes_power_well_enabled,
2008};
2009
2010static struct i915_power_well i830_power_wells[] = {
2011 {
2012 .name = "always-on",
2013 .always_on = 1,
2014 .domains = POWER_DOMAIN_MASK,
2015 .ops = &i9xx_always_on_power_well_ops,
2016 },
2017 {
2018 .name = "pipes",
2019 .domains = I830_PIPES_POWER_DOMAINS,
2020 .ops = &i830_pipes_power_well_ops,
2021 },
2022};
2023
1913static const struct i915_power_well_ops hsw_power_well_ops = { 2024static const struct i915_power_well_ops hsw_power_well_ops = {
1914 .sync_hw = hsw_power_well_sync_hw, 2025 .sync_hw = hsw_power_well_sync_hw,
1915 .enable = hsw_power_well_enable, 2026 .enable = hsw_power_well_enable,
@@ -2275,6 +2386,82 @@ static struct i915_power_well glk_power_wells[] = {
2275 }, 2386 },
2276}; 2387};
2277 2388
2389static struct i915_power_well cnl_power_wells[] = {
2390 {
2391 .name = "always-on",
2392 .always_on = 1,
2393 .domains = POWER_DOMAIN_MASK,
2394 .ops = &i9xx_always_on_power_well_ops,
2395 },
2396 {
2397 .name = "power well 1",
2398 /* Handled by the DMC firmware */
2399 .domains = 0,
2400 .ops = &skl_power_well_ops,
2401 .id = SKL_DISP_PW_1,
2402 },
2403 {
2404 .name = "AUX A",
2405 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2406 .ops = &skl_power_well_ops,
2407 .id = CNL_DISP_PW_AUX_A,
2408 },
2409 {
2410 .name = "AUX B",
2411 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2412 .ops = &skl_power_well_ops,
2413 .id = CNL_DISP_PW_AUX_B,
2414 },
2415 {
2416 .name = "AUX C",
2417 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2418 .ops = &skl_power_well_ops,
2419 .id = CNL_DISP_PW_AUX_C,
2420 },
2421 {
2422 .name = "AUX D",
2423 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2424 .ops = &skl_power_well_ops,
2425 .id = CNL_DISP_PW_AUX_D,
2426 },
2427 {
2428 .name = "DC off",
2429 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2430 .ops = &gen9_dc_off_power_well_ops,
2431 .id = SKL_DISP_PW_DC_OFF,
2432 },
2433 {
2434 .name = "power well 2",
2435 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2436 .ops = &skl_power_well_ops,
2437 .id = SKL_DISP_PW_2,
2438 },
2439 {
2440 .name = "DDI A IO power well",
2441 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2442 .ops = &skl_power_well_ops,
2443 .id = CNL_DISP_PW_DDI_A,
2444 },
2445 {
2446 .name = "DDI B IO power well",
2447 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2448 .ops = &skl_power_well_ops,
2449 .id = SKL_DISP_PW_DDI_B,
2450 },
2451 {
2452 .name = "DDI C IO power well",
2453 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2454 .ops = &skl_power_well_ops,
2455 .id = SKL_DISP_PW_DDI_C,
2456 },
2457 {
2458 .name = "DDI D IO power well",
2459 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2460 .ops = &skl_power_well_ops,
2461 .id = SKL_DISP_PW_DDI_D,
2462 },
2463};
2464
2278static int 2465static int
2279sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 2466sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2280 int disable_power_well) 2467 int disable_power_well)
@@ -2369,6 +2556,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
2369 set_power_wells(power_domains, bdw_power_wells); 2556 set_power_wells(power_domains, bdw_power_wells);
2370 } else if (IS_GEN9_BC(dev_priv)) { 2557 } else if (IS_GEN9_BC(dev_priv)) {
2371 set_power_wells(power_domains, skl_power_wells); 2558 set_power_wells(power_domains, skl_power_wells);
2559 } else if (IS_CANNONLAKE(dev_priv)) {
2560 set_power_wells(power_domains, cnl_power_wells);
2372 } else if (IS_BROXTON(dev_priv)) { 2561 } else if (IS_BROXTON(dev_priv)) {
2373 set_power_wells(power_domains, bxt_power_wells); 2562 set_power_wells(power_domains, bxt_power_wells);
2374 } else if (IS_GEMINILAKE(dev_priv)) { 2563 } else if (IS_GEMINILAKE(dev_priv)) {
@@ -2377,6 +2566,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
2377 set_power_wells(power_domains, chv_power_wells); 2566 set_power_wells(power_domains, chv_power_wells);
2378 } else if (IS_VALLEYVIEW(dev_priv)) { 2567 } else if (IS_VALLEYVIEW(dev_priv)) {
2379 set_power_wells(power_domains, vlv_power_wells); 2568 set_power_wells(power_domains, vlv_power_wells);
2569 } else if (IS_I830(dev_priv)) {
2570 set_power_wells(power_domains, i830_power_wells);
2380 } else { 2571 } else {
2381 set_power_wells(power_domains, i9xx_always_on_power_well); 2572 set_power_wells(power_domains, i9xx_always_on_power_well);
2382 } 2573 }
@@ -2569,6 +2760,111 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2569 mutex_unlock(&power_domains->lock); 2760 mutex_unlock(&power_domains->lock);
2570} 2761}
2571 2762
2763#define CNL_PROCMON_IDX(val) \
2764 (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
2765#define NUM_CNL_PROCMON \
2766 (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
2767
2768static const struct cnl_procmon {
2769 u32 dw1, dw9, dw10;
2770} cnl_procmon_values[NUM_CNL_PROCMON] = {
2771 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
2772 { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2773 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
2774 { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2775 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
2776 { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2777 [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
2778 { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2779 [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
2780 { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2781};
2782
2783static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2784{
2785 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2786 const struct cnl_procmon *procmon;
2787 struct i915_power_well *well;
2788 u32 val;
2789
2790 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2791
2792 /* 1. Enable PCH Reset Handshake */
2793 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2794 val |= RESET_PCH_HANDSHAKE_ENABLE;
2795 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2796
2797 /* 2. Enable Comp */
2798 val = I915_READ(CHICKEN_MISC_2);
2799 val &= ~COMP_PWR_DOWN;
2800 I915_WRITE(CHICKEN_MISC_2, val);
2801
2802 val = I915_READ(CNL_PORT_COMP_DW3);
2803 procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
2804
2805 WARN_ON(procmon->dw10 == 0);
2806
2807 val = I915_READ(CNL_PORT_COMP_DW1);
2808 val &= ~((0xff << 16) | 0xff);
2809 val |= procmon->dw1;
2810 I915_WRITE(CNL_PORT_COMP_DW1, val);
2811
2812 I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
2813 I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
2814
2815 val = I915_READ(CNL_PORT_COMP_DW0);
2816 val |= COMP_INIT;
2817 I915_WRITE(CNL_PORT_COMP_DW0, val);
2818
2819 /* 3. */
2820 val = I915_READ(CNL_PORT_CL1CM_DW5);
2821 val |= CL_POWER_DOWN_ENABLE;
2822 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2823
2824 /* 4. Enable Power Well 1 (PG1) and Aux IO Power */
2825 mutex_lock(&power_domains->lock);
2826 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2827 intel_power_well_enable(dev_priv, well);
2828 mutex_unlock(&power_domains->lock);
2829
2830 /* 5. Enable CD clock */
2831 cnl_init_cdclk(dev_priv);
2832
2833 /* 6. Enable DBUF */
2834 gen9_dbuf_enable(dev_priv);
2835}
2836
2837#undef CNL_PROCMON_IDX
2838#undef NUM_CNL_PROCMON
2839
2840static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2841{
2842 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2843 struct i915_power_well *well;
2844 u32 val;
2845
2846 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2847
2848 /* 1. Disable all display engine functions -> aready done */
2849
2850 /* 2. Disable DBUF */
2851 gen9_dbuf_disable(dev_priv);
2852
2853 /* 3. Disable CD clock */
2854 cnl_uninit_cdclk(dev_priv);
2855
2856 /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
2857 mutex_lock(&power_domains->lock);
2858 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2859 intel_power_well_disable(dev_priv, well);
2860 mutex_unlock(&power_domains->lock);
2861
2862 /* 5. Disable Comp */
2863 val = I915_READ(CHICKEN_MISC_2);
2864 val |= COMP_PWR_DOWN;
2865 I915_WRITE(CHICKEN_MISC_2, val);
2866}
2867
2572static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2868static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2573{ 2869{
2574 struct i915_power_well *cmn_bc = 2870 struct i915_power_well *cmn_bc =
@@ -2701,7 +2997,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2701 2997
2702 power_domains->initializing = true; 2998 power_domains->initializing = true;
2703 2999
2704 if (IS_GEN9_BC(dev_priv)) { 3000 if (IS_CANNONLAKE(dev_priv)) {
3001 cnl_display_core_init(dev_priv, resume);
3002 } else if (IS_GEN9_BC(dev_priv)) {
2705 skl_display_core_init(dev_priv, resume); 3003 skl_display_core_init(dev_priv, resume);
2706 } else if (IS_GEN9_LP(dev_priv)) { 3004 } else if (IS_GEN9_LP(dev_priv)) {
2707 bxt_display_core_init(dev_priv, resume); 3005 bxt_display_core_init(dev_priv, resume);
@@ -2740,7 +3038,9 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2740 if (!i915.disable_power_well) 3038 if (!i915.disable_power_well)
2741 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3039 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2742 3040
2743 if (IS_GEN9_BC(dev_priv)) 3041 if (IS_CANNONLAKE(dev_priv))
3042 cnl_display_core_uninit(dev_priv);
3043 else if (IS_GEN9_BC(dev_priv))
2744 skl_display_core_uninit(dev_priv); 3044 skl_display_core_uninit(dev_priv);
2745 else if (IS_GEN9_LP(dev_priv)) 3045 else if (IS_GEN9_LP(dev_priv))
2746 bxt_display_core_uninit(dev_priv); 3046 bxt_display_core_uninit(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6cc181203135..3f8f30b412cd 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -100,13 +100,6 @@ struct intel_sdvo {
100 uint16_t hotplug_active; 100 uint16_t hotplug_active;
101 101
102 /** 102 /**
103 * This is used to select the color range of RBG outputs in HDMI mode.
104 * It is only valid when using TMDS encoding and 8 bit per color mode.
105 */
106 uint32_t color_range;
107 bool color_range_auto;
108
109 /**
110 * This is set if we're going to treat the device as TV-out. 103 * This is set if we're going to treat the device as TV-out.
111 * 104 *
112 * While we have these nice friendly flags for output types that ought 105 * While we have these nice friendly flags for output types that ought
@@ -117,9 +110,6 @@ struct intel_sdvo {
117 110
118 enum port port; 111 enum port port;
119 112
120 /* This is for current tv format name */
121 int tv_format_index;
122
123 /** 113 /**
124 * This is set if we treat the device as HDMI, instead of DVI. 114 * This is set if we treat the device as HDMI, instead of DVI.
125 */ 115 */
@@ -154,8 +144,6 @@ struct intel_sdvo_connector {
154 /* Mark the type of connector */ 144 /* Mark the type of connector */
155 uint16_t output_flag; 145 uint16_t output_flag;
156 146
157 enum hdmi_force_audio force_audio;
158
159 /* This contains all current supported TV format */ 147 /* This contains all current supported TV format */
160 u8 tv_format_supported[TV_FORMAT_NUM]; 148 u8 tv_format_supported[TV_FORMAT_NUM];
161 int format_supported_num; 149 int format_supported_num;
@@ -182,24 +170,19 @@ struct intel_sdvo_connector {
182 /* add the property for the SDVO-TV/LVDS */ 170 /* add the property for the SDVO-TV/LVDS */
183 struct drm_property *brightness; 171 struct drm_property *brightness;
184 172
185 /* Add variable to record current setting for the above property */
186 u32 left_margin, right_margin, top_margin, bottom_margin;
187
188 /* this is to get the range of margin.*/ 173 /* this is to get the range of margin.*/
189 u32 max_hscan, max_vscan; 174 u32 max_hscan, max_vscan;
190 u32 max_hpos, cur_hpos; 175};
191 u32 max_vpos, cur_vpos; 176
192 u32 cur_brightness, max_brightness; 177struct intel_sdvo_connector_state {
193 u32 cur_contrast, max_contrast; 178 /* base.base: tv.saturation/contrast/hue/brightness */
194 u32 cur_saturation, max_saturation; 179 struct intel_digital_connector_state base;
195 u32 cur_hue, max_hue; 180
196 u32 cur_sharpness, max_sharpness; 181 struct {
197 u32 cur_flicker_filter, max_flicker_filter; 182 unsigned overscan_h, overscan_v, hpos, vpos, sharpness;
198 u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; 183 unsigned flicker_filter, flicker_filter_2d, flicker_filter_adaptive;
199 u32 cur_flicker_filter_2d, max_flicker_filter_2d; 184 unsigned chroma_filter, luma_filter, dot_crawl;
200 u32 cur_tv_chroma_filter, max_tv_chroma_filter; 185 } tv;
201 u32 cur_tv_luma_filter, max_tv_luma_filter;
202 u32 cur_dot_crawl, max_dot_crawl;
203}; 186};
204 187
205static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder) 188static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
@@ -212,9 +195,16 @@ static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
212 return to_sdvo(intel_attached_encoder(connector)); 195 return to_sdvo(intel_attached_encoder(connector));
213} 196}
214 197
215static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) 198static struct intel_sdvo_connector *
199to_intel_sdvo_connector(struct drm_connector *connector)
216{ 200{
217 return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); 201 return container_of(connector, struct intel_sdvo_connector, base.base);
202}
203
204static struct intel_sdvo_connector_state *
205to_intel_sdvo_connector_state(struct drm_connector_state *conn_state)
206{
207 return container_of(conn_state, struct intel_sdvo_connector_state, base.base);
218} 208}
219 209
220static bool 210static bool
@@ -1030,12 +1020,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
1030 sdvo_data, sizeof(sdvo_data)); 1020 sdvo_data, sizeof(sdvo_data));
1031} 1021}
1032 1022
1033static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) 1023static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
1024 struct drm_connector_state *conn_state)
1034{ 1025{
1035 struct intel_sdvo_tv_format format; 1026 struct intel_sdvo_tv_format format;
1036 uint32_t format_map; 1027 uint32_t format_map;
1037 1028
1038 format_map = 1 << intel_sdvo->tv_format_index; 1029 format_map = 1 << conn_state->tv.mode;
1039 memset(&format, 0, sizeof(format)); 1030 memset(&format, 0, sizeof(format));
1040 memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); 1031 memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
1041 1032
@@ -1122,6 +1113,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1122 struct drm_connector_state *conn_state) 1113 struct drm_connector_state *conn_state)
1123{ 1114{
1124 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1115 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1116 struct intel_sdvo_connector_state *intel_sdvo_state =
1117 to_intel_sdvo_connector_state(conn_state);
1125 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1118 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1126 struct drm_display_mode *mode = &pipe_config->base.mode; 1119 struct drm_display_mode *mode = &pipe_config->base.mode;
1127 1120
@@ -1160,9 +1153,14 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1160 pipe_config->pixel_multiplier = 1153 pipe_config->pixel_multiplier =
1161 intel_sdvo_get_pixel_multiplier(adjusted_mode); 1154 intel_sdvo_get_pixel_multiplier(adjusted_mode);
1162 1155
1163 pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor; 1156 if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI)
1157 pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
1158
1159 if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON ||
1160 (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio))
1161 pipe_config->has_audio = true;
1164 1162
1165 if (intel_sdvo->color_range_auto) { 1163 if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1166 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1164 /* See CEA-861-E - 5.1 Default Encoding Parameters */
1167 /* FIXME: This bit is only valid when using TMDS encoding and 8 1165 /* FIXME: This bit is only valid when using TMDS encoding and 8
1168 * bit per color mode. */ 1166 * bit per color mode. */
@@ -1171,7 +1169,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1171 pipe_config->limited_color_range = true; 1169 pipe_config->limited_color_range = true;
1172 } else { 1170 } else {
1173 if (pipe_config->has_hdmi_sink && 1171 if (pipe_config->has_hdmi_sink &&
1174 intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235) 1172 intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED)
1175 pipe_config->limited_color_range = true; 1173 pipe_config->limited_color_range = true;
1176 } 1174 }
1177 1175
@@ -1186,6 +1184,68 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1186 return true; 1184 return true;
1187} 1185}
1188 1186
1187#define UPDATE_PROPERTY(input, NAME) \
1188 do { \
1189 val = input; \
1190 intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_##NAME, &val, sizeof(val)); \
1191 } while (0)
1192
1193static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
1194 struct intel_sdvo_connector_state *sdvo_state)
1195{
1196 struct drm_connector_state *conn_state = &sdvo_state->base.base;
1197 struct intel_sdvo_connector *intel_sdvo_conn =
1198 to_intel_sdvo_connector(conn_state->connector);
1199 uint16_t val;
1200
1201 if (intel_sdvo_conn->left)
1202 UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H);
1203
1204 if (intel_sdvo_conn->top)
1205 UPDATE_PROPERTY(sdvo_state->tv.overscan_v, OVERSCAN_V);
1206
1207 if (intel_sdvo_conn->hpos)
1208 UPDATE_PROPERTY(sdvo_state->tv.hpos, HPOS);
1209
1210 if (intel_sdvo_conn->vpos)
1211 UPDATE_PROPERTY(sdvo_state->tv.vpos, VPOS);
1212
1213 if (intel_sdvo_conn->saturation)
1214 UPDATE_PROPERTY(conn_state->tv.saturation, SATURATION);
1215
1216 if (intel_sdvo_conn->contrast)
1217 UPDATE_PROPERTY(conn_state->tv.contrast, CONTRAST);
1218
1219 if (intel_sdvo_conn->hue)
1220 UPDATE_PROPERTY(conn_state->tv.hue, HUE);
1221
1222 if (intel_sdvo_conn->brightness)
1223 UPDATE_PROPERTY(conn_state->tv.brightness, BRIGHTNESS);
1224
1225 if (intel_sdvo_conn->sharpness)
1226 UPDATE_PROPERTY(sdvo_state->tv.sharpness, SHARPNESS);
1227
1228 if (intel_sdvo_conn->flicker_filter)
1229 UPDATE_PROPERTY(sdvo_state->tv.flicker_filter, FLICKER_FILTER);
1230
1231 if (intel_sdvo_conn->flicker_filter_2d)
1232 UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_2d, FLICKER_FILTER_2D);
1233
1234 if (intel_sdvo_conn->flicker_filter_adaptive)
1235 UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
1236
1237 if (intel_sdvo_conn->tv_chroma_filter)
1238 UPDATE_PROPERTY(sdvo_state->tv.chroma_filter, TV_CHROMA_FILTER);
1239
1240 if (intel_sdvo_conn->tv_luma_filter)
1241 UPDATE_PROPERTY(sdvo_state->tv.luma_filter, TV_LUMA_FILTER);
1242
1243 if (intel_sdvo_conn->dot_crawl)
1244 UPDATE_PROPERTY(sdvo_state->tv.dot_crawl, DOT_CRAWL);
1245
1246#undef UPDATE_PROPERTY
1247}
1248
1189static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, 1249static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1190 struct intel_crtc_state *crtc_state, 1250 struct intel_crtc_state *crtc_state,
1191 struct drm_connector_state *conn_state) 1251 struct drm_connector_state *conn_state)
@@ -1193,6 +1253,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1193 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 1253 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
1194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1254 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1195 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 1255 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
1256 struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state);
1196 struct drm_display_mode *mode = &crtc_state->base.mode; 1257 struct drm_display_mode *mode = &crtc_state->base.mode;
1197 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1258 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1198 u32 sdvox; 1259 u32 sdvox;
@@ -1200,6 +1261,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1200 struct intel_sdvo_dtd input_dtd, output_dtd; 1261 struct intel_sdvo_dtd input_dtd, output_dtd;
1201 int rate; 1262 int rate;
1202 1263
1264 intel_sdvo_update_props(intel_sdvo, sdvo_state);
1265
1203 /* First, set the input mapping for the first input to our controlled 1266 /* First, set the input mapping for the first input to our controlled
1204 * output. This is only correct if we're a single-input device, in 1267 * output. This is only correct if we're a single-input device, in
1205 * which case the first input is the output from the appropriate SDVO 1268 * which case the first input is the output from the appropriate SDVO
@@ -1241,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1241 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); 1304 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
1242 1305
1243 if (intel_sdvo->is_tv && 1306 if (intel_sdvo->is_tv &&
1244 !intel_sdvo_set_tv_format(intel_sdvo)) 1307 !intel_sdvo_set_tv_format(intel_sdvo, conn_state))
1245 return; 1308 return;
1246 1309
1247 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1310 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
@@ -1285,7 +1348,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1285 else 1348 else
1286 sdvox |= SDVO_PIPE_SEL(crtc->pipe); 1349 sdvox |= SDVO_PIPE_SEL(crtc->pipe);
1287 1350
1288 if (intel_sdvo->has_hdmi_audio) 1351 if (crtc_state->has_audio)
1289 sdvox |= SDVO_AUDIO_ENABLE; 1352 sdvox |= SDVO_AUDIO_ENABLE;
1290 1353
1291 if (INTEL_GEN(dev_priv) >= 4) { 1354 if (INTEL_GEN(dev_priv) >= 4) {
@@ -1694,12 +1757,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1694 kfree(edid); 1757 kfree(edid);
1695 } 1758 }
1696 1759
1697 if (status == connector_status_connected) {
1698 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1699 if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
1700 intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
1701 }
1702
1703 return status; 1760 return status;
1704} 1761}
1705 1762
@@ -1879,6 +1936,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
1879static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1936static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1880{ 1937{
1881 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1938 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1939 const struct drm_connector_state *conn_state = connector->state;
1882 struct intel_sdvo_sdtv_resolution_request tv_res; 1940 struct intel_sdvo_sdtv_resolution_request tv_res;
1883 uint32_t reply = 0, format_map = 0; 1941 uint32_t reply = 0, format_map = 0;
1884 int i; 1942 int i;
@@ -1889,7 +1947,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1889 /* Read the list of supported input resolutions for the selected TV 1947 /* Read the list of supported input resolutions for the selected TV
1890 * format. 1948 * format.
1891 */ 1949 */
1892 format_map = 1 << intel_sdvo->tv_format_index; 1950 format_map = 1 << conn_state->tv.mode;
1893 memcpy(&tv_res, &format_map, 1951 memcpy(&tv_res, &format_map,
1894 min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); 1952 min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
1895 1953
@@ -1978,192 +2036,121 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1978 kfree(intel_sdvo_connector); 2036 kfree(intel_sdvo_connector);
1979} 2037}
1980 2038
1981static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1982{
1983 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1984 struct edid *edid;
1985 bool has_audio = false;
1986
1987 if (!intel_sdvo->is_hdmi)
1988 return false;
1989
1990 edid = intel_sdvo_get_edid(connector);
1991 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1992 has_audio = drm_detect_monitor_audio(edid);
1993 kfree(edid);
1994
1995 return has_audio;
1996}
1997
1998static int 2039static int
1999intel_sdvo_set_property(struct drm_connector *connector, 2040intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
2000 struct drm_property *property, 2041 const struct drm_connector_state *state,
2001 uint64_t val) 2042 struct drm_property *property,
2043 uint64_t *val)
2002{ 2044{
2003 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
2004 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 2045 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2005 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2046 const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
2006 uint16_t temp_value;
2007 uint8_t cmd;
2008 int ret;
2009
2010 ret = drm_object_property_set_value(&connector->base, property, val);
2011 if (ret)
2012 return ret;
2013
2014 if (property == dev_priv->force_audio_property) {
2015 int i = val;
2016 bool has_audio;
2017
2018 if (i == intel_sdvo_connector->force_audio)
2019 return 0;
2020
2021 intel_sdvo_connector->force_audio = i;
2022
2023 if (i == HDMI_AUDIO_AUTO)
2024 has_audio = intel_sdvo_detect_hdmi_audio(connector);
2025 else
2026 has_audio = (i == HDMI_AUDIO_ON);
2027
2028 if (has_audio == intel_sdvo->has_hdmi_audio)
2029 return 0;
2030
2031 intel_sdvo->has_hdmi_audio = has_audio;
2032 goto done;
2033 }
2034
2035 if (property == dev_priv->broadcast_rgb_property) {
2036 bool old_auto = intel_sdvo->color_range_auto;
2037 uint32_t old_range = intel_sdvo->color_range;
2038
2039 switch (val) {
2040 case INTEL_BROADCAST_RGB_AUTO:
2041 intel_sdvo->color_range_auto = true;
2042 break;
2043 case INTEL_BROADCAST_RGB_FULL:
2044 intel_sdvo->color_range_auto = false;
2045 intel_sdvo->color_range = 0;
2046 break;
2047 case INTEL_BROADCAST_RGB_LIMITED:
2048 intel_sdvo->color_range_auto = false;
2049 /* FIXME: this bit is only valid when using TMDS
2050 * encoding and 8 bit per color mode. */
2051 intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
2052 break;
2053 default:
2054 return -EINVAL;
2055 }
2056
2057 if (old_auto == intel_sdvo->color_range_auto &&
2058 old_range == intel_sdvo->color_range)
2059 return 0;
2060
2061 goto done;
2062 }
2063
2064 if (property == connector->dev->mode_config.aspect_ratio_property) {
2065 connector->state->picture_aspect_ratio = val;
2066 goto done;
2067 }
2068
2069#define CHECK_PROPERTY(name, NAME) \
2070 if (intel_sdvo_connector->name == property) { \
2071 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
2072 if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
2073 cmd = SDVO_CMD_SET_##NAME; \
2074 intel_sdvo_connector->cur_##name = temp_value; \
2075 goto set_value; \
2076 }
2077 2047
2078 if (property == intel_sdvo_connector->tv_format) { 2048 if (property == intel_sdvo_connector->tv_format) {
2079 if (val >= TV_FORMAT_NUM) 2049 int i;
2080 return -EINVAL;
2081
2082 if (intel_sdvo->tv_format_index ==
2083 intel_sdvo_connector->tv_format_supported[val])
2084 return 0;
2085
2086 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
2087 goto done;
2088 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
2089 temp_value = val;
2090 if (intel_sdvo_connector->left == property) {
2091 drm_object_property_set_value(&connector->base,
2092 intel_sdvo_connector->right, val);
2093 if (intel_sdvo_connector->left_margin == temp_value)
2094 return 0;
2095 2050
2096 intel_sdvo_connector->left_margin = temp_value; 2051 for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
2097 intel_sdvo_connector->right_margin = temp_value; 2052 if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
2098 temp_value = intel_sdvo_connector->max_hscan - 2053 *val = i;
2099 intel_sdvo_connector->left_margin;
2100 cmd = SDVO_CMD_SET_OVERSCAN_H;
2101 goto set_value;
2102 } else if (intel_sdvo_connector->right == property) {
2103 drm_object_property_set_value(&connector->base,
2104 intel_sdvo_connector->left, val);
2105 if (intel_sdvo_connector->right_margin == temp_value)
2106 return 0;
2107 2054
2108 intel_sdvo_connector->left_margin = temp_value;
2109 intel_sdvo_connector->right_margin = temp_value;
2110 temp_value = intel_sdvo_connector->max_hscan -
2111 intel_sdvo_connector->left_margin;
2112 cmd = SDVO_CMD_SET_OVERSCAN_H;
2113 goto set_value;
2114 } else if (intel_sdvo_connector->top == property) {
2115 drm_object_property_set_value(&connector->base,
2116 intel_sdvo_connector->bottom, val);
2117 if (intel_sdvo_connector->top_margin == temp_value)
2118 return 0; 2055 return 0;
2056 }
2119 2057
2120 intel_sdvo_connector->top_margin = temp_value; 2058 WARN_ON(1);
2121 intel_sdvo_connector->bottom_margin = temp_value; 2059 *val = 0;
2122 temp_value = intel_sdvo_connector->max_vscan - 2060 } else if (property == intel_sdvo_connector->top ||
2123 intel_sdvo_connector->top_margin; 2061 property == intel_sdvo_connector->bottom)
2124 cmd = SDVO_CMD_SET_OVERSCAN_V; 2062 *val = intel_sdvo_connector->max_vscan - sdvo_state->tv.overscan_v;
2125 goto set_value; 2063 else if (property == intel_sdvo_connector->left ||
2126 } else if (intel_sdvo_connector->bottom == property) { 2064 property == intel_sdvo_connector->right)
2127 drm_object_property_set_value(&connector->base, 2065 *val = intel_sdvo_connector->max_hscan - sdvo_state->tv.overscan_h;
2128 intel_sdvo_connector->top, val); 2066 else if (property == intel_sdvo_connector->hpos)
2129 if (intel_sdvo_connector->bottom_margin == temp_value) 2067 *val = sdvo_state->tv.hpos;
2130 return 0; 2068 else if (property == intel_sdvo_connector->vpos)
2069 *val = sdvo_state->tv.vpos;
2070 else if (property == intel_sdvo_connector->saturation)
2071 *val = state->tv.saturation;
2072 else if (property == intel_sdvo_connector->contrast)
2073 *val = state->tv.contrast;
2074 else if (property == intel_sdvo_connector->hue)
2075 *val = state->tv.hue;
2076 else if (property == intel_sdvo_connector->brightness)
2077 *val = state->tv.brightness;
2078 else if (property == intel_sdvo_connector->sharpness)
2079 *val = sdvo_state->tv.sharpness;
2080 else if (property == intel_sdvo_connector->flicker_filter)
2081 *val = sdvo_state->tv.flicker_filter;
2082 else if (property == intel_sdvo_connector->flicker_filter_2d)
2083 *val = sdvo_state->tv.flicker_filter_2d;
2084 else if (property == intel_sdvo_connector->flicker_filter_adaptive)
2085 *val = sdvo_state->tv.flicker_filter_adaptive;
2086 else if (property == intel_sdvo_connector->tv_chroma_filter)
2087 *val = sdvo_state->tv.chroma_filter;
2088 else if (property == intel_sdvo_connector->tv_luma_filter)
2089 *val = sdvo_state->tv.luma_filter;
2090 else if (property == intel_sdvo_connector->dot_crawl)
2091 *val = sdvo_state->tv.dot_crawl;
2092 else
2093 return intel_digital_connector_atomic_get_property(connector, state, property, val);
2131 2094
2132 intel_sdvo_connector->top_margin = temp_value; 2095 return 0;
2133 intel_sdvo_connector->bottom_margin = temp_value; 2096}
2134 temp_value = intel_sdvo_connector->max_vscan -
2135 intel_sdvo_connector->top_margin;
2136 cmd = SDVO_CMD_SET_OVERSCAN_V;
2137 goto set_value;
2138 }
2139 CHECK_PROPERTY(hpos, HPOS)
2140 CHECK_PROPERTY(vpos, VPOS)
2141 CHECK_PROPERTY(saturation, SATURATION)
2142 CHECK_PROPERTY(contrast, CONTRAST)
2143 CHECK_PROPERTY(hue, HUE)
2144 CHECK_PROPERTY(brightness, BRIGHTNESS)
2145 CHECK_PROPERTY(sharpness, SHARPNESS)
2146 CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
2147 CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
2148 CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
2149 CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
2150 CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
2151 CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
2152 }
2153 2097
2154 return -EINVAL; /* unknown property */ 2098static int
2099intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
2100 struct drm_connector_state *state,
2101 struct drm_property *property,
2102 uint64_t val)
2103{
2104 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2105 struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
2155 2106
2156set_value: 2107 if (property == intel_sdvo_connector->tv_format) {
2157 if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) 2108 state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
2158 return -EIO;
2159 2109
2110 if (state->crtc) {
2111 struct drm_crtc_state *crtc_state =
2112 drm_atomic_get_new_crtc_state(state->state, state->crtc);
2160 2113
2161done: 2114 crtc_state->connectors_changed = true;
2162 if (intel_sdvo->base.base.crtc) 2115 }
2163 intel_crtc_restore_mode(intel_sdvo->base.base.crtc); 2116 } else if (property == intel_sdvo_connector->top ||
2117 property == intel_sdvo_connector->bottom)
2118 /* Cannot set these independent from each other */
2119 sdvo_state->tv.overscan_v = intel_sdvo_connector->max_vscan - val;
2120 else if (property == intel_sdvo_connector->left ||
2121 property == intel_sdvo_connector->right)
2122 /* Cannot set these independent from each other */
2123 sdvo_state->tv.overscan_h = intel_sdvo_connector->max_hscan - val;
2124 else if (property == intel_sdvo_connector->hpos)
2125 sdvo_state->tv.hpos = val;
2126 else if (property == intel_sdvo_connector->vpos)
2127 sdvo_state->tv.vpos = val;
2128 else if (property == intel_sdvo_connector->saturation)
2129 state->tv.saturation = val;
2130 else if (property == intel_sdvo_connector->contrast)
2131 state->tv.contrast = val;
2132 else if (property == intel_sdvo_connector->hue)
2133 state->tv.hue = val;
2134 else if (property == intel_sdvo_connector->brightness)
2135 state->tv.brightness = val;
2136 else if (property == intel_sdvo_connector->sharpness)
2137 sdvo_state->tv.sharpness = val;
2138 else if (property == intel_sdvo_connector->flicker_filter)
2139 sdvo_state->tv.flicker_filter = val;
2140 else if (property == intel_sdvo_connector->flicker_filter_2d)
2141 sdvo_state->tv.flicker_filter_2d = val;
2142 else if (property == intel_sdvo_connector->flicker_filter_adaptive)
2143 sdvo_state->tv.flicker_filter_adaptive = val;
2144 else if (property == intel_sdvo_connector->tv_chroma_filter)
2145 sdvo_state->tv.chroma_filter = val;
2146 else if (property == intel_sdvo_connector->tv_luma_filter)
2147 sdvo_state->tv.luma_filter = val;
2148 else if (property == intel_sdvo_connector->dot_crawl)
2149 sdvo_state->tv.dot_crawl = val;
2150 else
2151 return intel_digital_connector_atomic_set_property(connector, state, property, val);
2164 2152
2165 return 0; 2153 return 0;
2166#undef CHECK_PROPERTY
2167} 2154}
2168 2155
2169static int 2156static int
@@ -2191,22 +2178,61 @@ intel_sdvo_connector_unregister(struct drm_connector *connector)
2191 intel_connector_unregister(connector); 2178 intel_connector_unregister(connector);
2192} 2179}
2193 2180
2181static struct drm_connector_state *
2182intel_sdvo_connector_duplicate_state(struct drm_connector *connector)
2183{
2184 struct intel_sdvo_connector_state *state;
2185
2186 state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
2187 if (!state)
2188 return NULL;
2189
2190 __drm_atomic_helper_connector_duplicate_state(connector, &state->base.base);
2191 return &state->base.base;
2192}
2193
2194static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2194static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2195 .dpms = drm_atomic_helper_connector_dpms, 2195 .dpms = drm_atomic_helper_connector_dpms,
2196 .detect = intel_sdvo_detect, 2196 .detect = intel_sdvo_detect,
2197 .fill_modes = drm_helper_probe_single_connector_modes, 2197 .fill_modes = drm_helper_probe_single_connector_modes,
2198 .set_property = intel_sdvo_set_property, 2198 .set_property = drm_atomic_helper_connector_set_property,
2199 .atomic_get_property = intel_connector_atomic_get_property, 2199 .atomic_get_property = intel_sdvo_connector_atomic_get_property,
2200 .atomic_set_property = intel_sdvo_connector_atomic_set_property,
2200 .late_register = intel_sdvo_connector_register, 2201 .late_register = intel_sdvo_connector_register,
2201 .early_unregister = intel_sdvo_connector_unregister, 2202 .early_unregister = intel_sdvo_connector_unregister,
2202 .destroy = intel_sdvo_destroy, 2203 .destroy = intel_sdvo_destroy,
2203 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2204 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2204 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 2205 .atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
2205}; 2206};
2206 2207
2208static int intel_sdvo_atomic_check(struct drm_connector *conn,
2209 struct drm_connector_state *new_conn_state)
2210{
2211 struct drm_atomic_state *state = new_conn_state->state;
2212 struct drm_connector_state *old_conn_state =
2213 drm_atomic_get_old_connector_state(state, conn);
2214 struct intel_sdvo_connector_state *old_state =
2215 to_intel_sdvo_connector_state(old_conn_state);
2216 struct intel_sdvo_connector_state *new_state =
2217 to_intel_sdvo_connector_state(new_conn_state);
2218
2219 if (new_conn_state->crtc &&
2220 (memcmp(&old_state->tv, &new_state->tv, sizeof(old_state->tv)) ||
2221 memcmp(&old_conn_state->tv, &new_conn_state->tv, sizeof(old_conn_state->tv)))) {
2222 struct drm_crtc_state *crtc_state =
2223 drm_atomic_get_new_crtc_state(new_conn_state->state,
2224 new_conn_state->crtc);
2225
2226 crtc_state->connectors_changed = true;
2227 }
2228
2229 return intel_digital_connector_atomic_check(conn, new_conn_state);
2230}
2231
2207static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2232static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2208 .get_modes = intel_sdvo_get_modes, 2233 .get_modes = intel_sdvo_get_modes,
2209 .mode_valid = intel_sdvo_mode_valid, 2234 .mode_valid = intel_sdvo_mode_valid,
2235 .atomic_check = intel_sdvo_atomic_check,
2210}; 2236};
2211 2237
2212static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2238static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2398,7 +2424,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
2398 intel_attach_force_audio_property(&connector->base.base); 2424 intel_attach_force_audio_property(&connector->base.base);
2399 if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) { 2425 if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
2400 intel_attach_broadcast_rgb_property(&connector->base.base); 2426 intel_attach_broadcast_rgb_property(&connector->base.base);
2401 intel_sdvo->color_range_auto = true;
2402 } 2427 }
2403 intel_attach_aspect_ratio_property(&connector->base.base); 2428 intel_attach_aspect_ratio_property(&connector->base.base);
2404 connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 2429 connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
@@ -2407,16 +2432,21 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
2407static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) 2432static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
2408{ 2433{
2409 struct intel_sdvo_connector *sdvo_connector; 2434 struct intel_sdvo_connector *sdvo_connector;
2435 struct intel_sdvo_connector_state *conn_state;
2410 2436
2411 sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL); 2437 sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
2412 if (!sdvo_connector) 2438 if (!sdvo_connector)
2413 return NULL; 2439 return NULL;
2414 2440
2415 if (intel_connector_init(&sdvo_connector->base) < 0) { 2441 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
2442 if (!conn_state) {
2416 kfree(sdvo_connector); 2443 kfree(sdvo_connector);
2417 return NULL; 2444 return NULL;
2418 } 2445 }
2419 2446
2447 __drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
2448 &conn_state->base.base);
2449
2420 return sdvo_connector; 2450 return sdvo_connector;
2421} 2451}
2422 2452
@@ -2708,31 +2738,31 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2708 intel_sdvo_connector->tv_format, i, 2738 intel_sdvo_connector->tv_format, i,
2709 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2739 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
2710 2740
2711 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; 2741 intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
2712 drm_object_attach_property(&intel_sdvo_connector->base.base.base, 2742 drm_object_attach_property(&intel_sdvo_connector->base.base.base,
2713 intel_sdvo_connector->tv_format, 0); 2743 intel_sdvo_connector->tv_format, 0);
2714 return true; 2744 return true;
2715 2745
2716} 2746}
2717 2747
2718#define ENHANCEMENT(name, NAME) do { \ 2748#define _ENHANCEMENT(state_assignment, name, NAME) do { \
2719 if (enhancements.name) { \ 2749 if (enhancements.name) { \
2720 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ 2750 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
2721 !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ 2751 !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
2722 return false; \ 2752 return false; \
2723 intel_sdvo_connector->max_##name = data_value[0]; \
2724 intel_sdvo_connector->cur_##name = response; \
2725 intel_sdvo_connector->name = \ 2753 intel_sdvo_connector->name = \
2726 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2754 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
2727 if (!intel_sdvo_connector->name) return false; \ 2755 if (!intel_sdvo_connector->name) return false; \
2756 state_assignment = response; \
2728 drm_object_attach_property(&connector->base, \ 2757 drm_object_attach_property(&connector->base, \
2729 intel_sdvo_connector->name, \ 2758 intel_sdvo_connector->name, 0); \
2730 intel_sdvo_connector->cur_##name); \
2731 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2759 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
2732 data_value[0], data_value[1], response); \ 2760 data_value[0], data_value[1], response); \
2733 } \ 2761 } \
2734} while (0) 2762} while (0)
2735 2763
2764#define ENHANCEMENT(state, name, NAME) _ENHANCEMENT((state)->name, name, NAME)
2765
2736static bool 2766static bool
2737intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, 2767intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2738 struct intel_sdvo_connector *intel_sdvo_connector, 2768 struct intel_sdvo_connector *intel_sdvo_connector,
@@ -2740,6 +2770,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2740{ 2770{
2741 struct drm_device *dev = intel_sdvo->base.base.dev; 2771 struct drm_device *dev = intel_sdvo->base.base.dev;
2742 struct drm_connector *connector = &intel_sdvo_connector->base.base; 2772 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2773 struct drm_connector_state *conn_state = connector->state;
2774 struct intel_sdvo_connector_state *sdvo_state =
2775 to_intel_sdvo_connector_state(conn_state);
2743 uint16_t response, data_value[2]; 2776 uint16_t response, data_value[2];
2744 2777
2745 /* when horizontal overscan is supported, Add the left/right property */ 2778 /* when horizontal overscan is supported, Add the left/right property */
@@ -2754,17 +2787,16 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2754 &response, 2)) 2787 &response, 2))
2755 return false; 2788 return false;
2756 2789
2790 sdvo_state->tv.overscan_h = response;
2791
2757 intel_sdvo_connector->max_hscan = data_value[0]; 2792 intel_sdvo_connector->max_hscan = data_value[0];
2758 intel_sdvo_connector->left_margin = data_value[0] - response;
2759 intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
2760 intel_sdvo_connector->left = 2793 intel_sdvo_connector->left =
2761 drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); 2794 drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
2762 if (!intel_sdvo_connector->left) 2795 if (!intel_sdvo_connector->left)
2763 return false; 2796 return false;
2764 2797
2765 drm_object_attach_property(&connector->base, 2798 drm_object_attach_property(&connector->base,
2766 intel_sdvo_connector->left, 2799 intel_sdvo_connector->left, 0);
2767 intel_sdvo_connector->left_margin);
2768 2800
2769 intel_sdvo_connector->right = 2801 intel_sdvo_connector->right =
2770 drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); 2802 drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
@@ -2772,8 +2804,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2772 return false; 2804 return false;
2773 2805
2774 drm_object_attach_property(&connector->base, 2806 drm_object_attach_property(&connector->base,
2775 intel_sdvo_connector->right, 2807 intel_sdvo_connector->right, 0);
2776 intel_sdvo_connector->right_margin);
2777 DRM_DEBUG_KMS("h_overscan: max %d, " 2808 DRM_DEBUG_KMS("h_overscan: max %d, "
2778 "default %d, current %d\n", 2809 "default %d, current %d\n",
2779 data_value[0], data_value[1], response); 2810 data_value[0], data_value[1], response);
@@ -2790,9 +2821,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2790 &response, 2)) 2821 &response, 2))
2791 return false; 2822 return false;
2792 2823
2824 sdvo_state->tv.overscan_v = response;
2825
2793 intel_sdvo_connector->max_vscan = data_value[0]; 2826 intel_sdvo_connector->max_vscan = data_value[0];
2794 intel_sdvo_connector->top_margin = data_value[0] - response;
2795 intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
2796 intel_sdvo_connector->top = 2827 intel_sdvo_connector->top =
2797 drm_property_create_range(dev, 0, 2828 drm_property_create_range(dev, 0,
2798 "top_margin", 0, data_value[0]); 2829 "top_margin", 0, data_value[0]);
@@ -2800,8 +2831,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2800 return false; 2831 return false;
2801 2832
2802 drm_object_attach_property(&connector->base, 2833 drm_object_attach_property(&connector->base,
2803 intel_sdvo_connector->top, 2834 intel_sdvo_connector->top, 0);
2804 intel_sdvo_connector->top_margin);
2805 2835
2806 intel_sdvo_connector->bottom = 2836 intel_sdvo_connector->bottom =
2807 drm_property_create_range(dev, 0, 2837 drm_property_create_range(dev, 0,
@@ -2810,40 +2840,37 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2810 return false; 2840 return false;
2811 2841
2812 drm_object_attach_property(&connector->base, 2842 drm_object_attach_property(&connector->base,
2813 intel_sdvo_connector->bottom, 2843 intel_sdvo_connector->bottom, 0);
2814 intel_sdvo_connector->bottom_margin);
2815 DRM_DEBUG_KMS("v_overscan: max %d, " 2844 DRM_DEBUG_KMS("v_overscan: max %d, "
2816 "default %d, current %d\n", 2845 "default %d, current %d\n",
2817 data_value[0], data_value[1], response); 2846 data_value[0], data_value[1], response);
2818 } 2847 }
2819 2848
2820 ENHANCEMENT(hpos, HPOS); 2849 ENHANCEMENT(&sdvo_state->tv, hpos, HPOS);
2821 ENHANCEMENT(vpos, VPOS); 2850 ENHANCEMENT(&sdvo_state->tv, vpos, VPOS);
2822 ENHANCEMENT(saturation, SATURATION); 2851 ENHANCEMENT(&conn_state->tv, saturation, SATURATION);
2823 ENHANCEMENT(contrast, CONTRAST); 2852 ENHANCEMENT(&conn_state->tv, contrast, CONTRAST);
2824 ENHANCEMENT(hue, HUE); 2853 ENHANCEMENT(&conn_state->tv, hue, HUE);
2825 ENHANCEMENT(sharpness, SHARPNESS); 2854 ENHANCEMENT(&conn_state->tv, brightness, BRIGHTNESS);
2826 ENHANCEMENT(brightness, BRIGHTNESS); 2855 ENHANCEMENT(&sdvo_state->tv, sharpness, SHARPNESS);
2827 ENHANCEMENT(flicker_filter, FLICKER_FILTER); 2856 ENHANCEMENT(&sdvo_state->tv, flicker_filter, FLICKER_FILTER);
2828 ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); 2857 ENHANCEMENT(&sdvo_state->tv, flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
2829 ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); 2858 ENHANCEMENT(&sdvo_state->tv, flicker_filter_2d, FLICKER_FILTER_2D);
2830 ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); 2859 _ENHANCEMENT(sdvo_state->tv.chroma_filter, tv_chroma_filter, TV_CHROMA_FILTER);
2831 ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); 2860 _ENHANCEMENT(sdvo_state->tv.luma_filter, tv_luma_filter, TV_LUMA_FILTER);
2832 2861
2833 if (enhancements.dot_crawl) { 2862 if (enhancements.dot_crawl) {
2834 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) 2863 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
2835 return false; 2864 return false;
2836 2865
2837 intel_sdvo_connector->max_dot_crawl = 1; 2866 sdvo_state->tv.dot_crawl = response & 0x1;
2838 intel_sdvo_connector->cur_dot_crawl = response & 0x1;
2839 intel_sdvo_connector->dot_crawl = 2867 intel_sdvo_connector->dot_crawl =
2840 drm_property_create_range(dev, 0, "dot_crawl", 0, 1); 2868 drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
2841 if (!intel_sdvo_connector->dot_crawl) 2869 if (!intel_sdvo_connector->dot_crawl)
2842 return false; 2870 return false;
2843 2871
2844 drm_object_attach_property(&connector->base, 2872 drm_object_attach_property(&connector->base,
2845 intel_sdvo_connector->dot_crawl, 2873 intel_sdvo_connector->dot_crawl, 0);
2846 intel_sdvo_connector->cur_dot_crawl);
2847 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2874 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
2848 } 2875 }
2849 2876
@@ -2859,11 +2886,12 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
2859 struct drm_connector *connector = &intel_sdvo_connector->base.base; 2886 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2860 uint16_t response, data_value[2]; 2887 uint16_t response, data_value[2];
2861 2888
2862 ENHANCEMENT(brightness, BRIGHTNESS); 2889 ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS);
2863 2890
2864 return true; 2891 return true;
2865} 2892}
2866#undef ENHANCEMENT 2893#undef ENHANCEMENT
2894#undef _ENHANCEMENT
2867 2895
2868static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, 2896static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2869 struct intel_sdvo_connector *intel_sdvo_connector) 2897 struct intel_sdvo_connector *intel_sdvo_connector)
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 7a7b07de28a3..27e072cc96eb 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -433,16 +433,19 @@ err_guc:
433 DRM_NOTE("Falling back from GuC submission to execlist mode\n"); 433 DRM_NOTE("Falling back from GuC submission to execlist mode\n");
434 } 434 }
435 435
436 i915.enable_guc_loading = 0;
437 DRM_NOTE("GuC firmware loading disabled\n");
438
436 return ret; 439 return ret;
437} 440}
438 441
439void intel_uc_fini_hw(struct drm_i915_private *dev_priv) 442void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
440{ 443{
444 guc_free_load_err_log(&dev_priv->guc);
445
441 if (!i915.enable_guc_loading) 446 if (!i915.enable_guc_loading)
442 return; 447 return;
443 448
444 guc_free_load_err_log(&dev_priv->guc);
445
446 if (i915.enable_guc_submission) 449 if (i915.enable_guc_submission)
447 i915_guc_submission_disable(dev_priv); 450 i915_guc_submission_disable(dev_priv);
448 451
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 47d7ee1b5d86..9882724bc2b6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -404,8 +404,6 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
404static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 404static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
405 bool restore_forcewake) 405 bool restore_forcewake)
406{ 406{
407 struct intel_device_info *info = mkwrite_device_info(dev_priv);
408
409 /* clear out unclaimed reg detection bit */ 407 /* clear out unclaimed reg detection bit */
410 if (check_for_unclaimed_mmio(dev_priv)) 408 if (check_for_unclaimed_mmio(dev_priv))
411 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 409 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
@@ -418,9 +416,6 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
418 GT_FIFO_CTL_RC6_POLICY_STALL); 416 GT_FIFO_CTL_RC6_POLICY_STALL);
419 } 417 }
420 418
421 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
422 info->has_decoupled_mmio = false;
423
424 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 419 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
425} 420}
426 421
@@ -810,78 +805,6 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
810 __unclaimed_reg_debug(dev_priv, reg, read, before); 805 __unclaimed_reg_debug(dev_priv, reg, read, before);
811} 806}
812 807
813enum decoupled_power_domain {
814 GEN9_DECOUPLED_PD_BLITTER = 0,
815 GEN9_DECOUPLED_PD_RENDER,
816 GEN9_DECOUPLED_PD_MEDIA,
817 GEN9_DECOUPLED_PD_ALL
818};
819
820enum decoupled_ops {
821 GEN9_DECOUPLED_OP_WRITE = 0,
822 GEN9_DECOUPLED_OP_READ
823};
824
825static const enum decoupled_power_domain fw2dpd_domain[] = {
826 GEN9_DECOUPLED_PD_RENDER,
827 GEN9_DECOUPLED_PD_BLITTER,
828 GEN9_DECOUPLED_PD_ALL,
829 GEN9_DECOUPLED_PD_MEDIA,
830 GEN9_DECOUPLED_PD_ALL,
831 GEN9_DECOUPLED_PD_ALL,
832 GEN9_DECOUPLED_PD_ALL
833};
834
835/*
836 * Decoupled MMIO access for only 1 DWORD
837 */
838static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
839 u32 reg,
840 enum forcewake_domains fw_domain,
841 enum decoupled_ops operation)
842{
843 enum decoupled_power_domain dp_domain;
844 u32 ctrl_reg_data = 0;
845
846 dp_domain = fw2dpd_domain[fw_domain - 1];
847
848 ctrl_reg_data |= reg;
849 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
850 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
851 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
852 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
853
854 if (wait_for_atomic((__raw_i915_read32(dev_priv,
855 GEN9_DECOUPLED_REG0_DW1) &
856 GEN9_DECOUPLED_DW1_GO) == 0,
857 FORCEWAKE_ACK_TIMEOUT_MS))
858 DRM_ERROR("Decoupled MMIO wait timed out\n");
859}
860
861static inline u32
862__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
863 u32 reg,
864 enum forcewake_domains fw_domain)
865{
866 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
867 GEN9_DECOUPLED_OP_READ);
868
869 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
870}
871
872static inline void
873__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
874 u32 reg, u32 data,
875 enum forcewake_domains fw_domain)
876{
877
878 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
879
880 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
881 GEN9_DECOUPLED_OP_WRITE);
882}
883
884
885#define GEN2_READ_HEADER(x) \ 808#define GEN2_READ_HEADER(x) \
886 u##x val = 0; \ 809 u##x val = 0; \
887 assert_rpm_wakelock_held(dev_priv); 810 assert_rpm_wakelock_held(dev_priv);
@@ -978,28 +901,6 @@ func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
978#define __gen6_read(x) __gen_read(gen6, x) 901#define __gen6_read(x) __gen_read(gen6, x)
979#define __fwtable_read(x) __gen_read(fwtable, x) 902#define __fwtable_read(x) __gen_read(fwtable, x)
980 903
981#define __gen9_decoupled_read(x) \
982static u##x \
983gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
984 i915_reg_t reg, bool trace) { \
985 enum forcewake_domains fw_engine; \
986 GEN6_READ_HEADER(x); \
987 fw_engine = __fwtable_reg_read_fw_domains(offset); \
988 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
989 unsigned i; \
990 u32 *ptr_data = (u32 *) &val; \
991 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
992 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
993 offset, \
994 fw_engine); \
995 } else { \
996 val = __raw_i915_read##x(dev_priv, reg); \
997 } \
998 GEN6_READ_FOOTER; \
999}
1000
1001__gen9_decoupled_read(32)
1002__gen9_decoupled_read(64)
1003__fwtable_read(8) 904__fwtable_read(8)
1004__fwtable_read(16) 905__fwtable_read(16)
1005__fwtable_read(32) 906__fwtable_read(32)
@@ -1086,25 +987,6 @@ func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, boo
1086#define __gen8_write(x) __gen_write(gen8, x) 987#define __gen8_write(x) __gen_write(gen8, x)
1087#define __fwtable_write(x) __gen_write(fwtable, x) 988#define __fwtable_write(x) __gen_write(fwtable, x)
1088 989
1089#define __gen9_decoupled_write(x) \
1090static void \
1091gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1092 i915_reg_t reg, u##x val, \
1093 bool trace) { \
1094 enum forcewake_domains fw_engine; \
1095 GEN6_WRITE_HEADER; \
1096 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1097 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1098 __gen9_decoupled_mmio_write(dev_priv, \
1099 offset, \
1100 val, \
1101 fw_engine); \
1102 else \
1103 __raw_i915_write##x(dev_priv, reg, val); \
1104 GEN6_WRITE_FOOTER; \
1105}
1106
1107__gen9_decoupled_write(32)
1108__fwtable_write(8) 990__fwtable_write(8)
1109__fwtable_write(16) 991__fwtable_write(16)
1110__fwtable_write(32) 992__fwtable_write(32)
@@ -1341,14 +1223,6 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
1341 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); 1223 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1342 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); 1224 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1343 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); 1225 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1344 if (HAS_DECOUPLED_MMIO(dev_priv)) {
1345 dev_priv->uncore.funcs.mmio_readl =
1346 gen9_decoupled_read32;
1347 dev_priv->uncore.funcs.mmio_readq =
1348 gen9_decoupled_read64;
1349 dev_priv->uncore.funcs.mmio_writel =
1350 gen9_decoupled_write32;
1351 }
1352 } 1226 }
1353 1227
1354 iosf_mbi_register_pmic_bus_access_notifier( 1228 iosf_mbi_register_pmic_bus_access_notifier(
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index 4e681fc13be4..caf76af36aba 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -126,9 +126,11 @@ huge_gem_object(struct drm_i915_private *i915,
126 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); 126 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
127 i915_gem_object_init(obj, &huge_ops); 127 i915_gem_object_init(obj, &huge_ops);
128 128
129 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
130 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 129 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
130 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
131 obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 131 obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
132 obj->cache_coherent = i915_gem_object_is_coherent(obj);
133 obj->cache_dirty = !obj->cache_coherent;
132 obj->scratch = phys_size; 134 obj->scratch = phys_size;
133 135
134 return obj; 136 return obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 14e9c2fbc4e6..5ea373221f49 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -304,7 +304,7 @@ static int igt_evict_vm(void *arg)
304 goto cleanup; 304 goto cleanup;
305 305
306 /* Everything is pinned, nothing should happen */ 306 /* Everything is pinned, nothing should happen */
307 err = i915_gem_evict_vm(&ggtt->base, false); 307 err = i915_gem_evict_vm(&ggtt->base);
308 if (err) { 308 if (err) {
309 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", 309 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
310 err); 310 err);
@@ -313,7 +313,7 @@ static int igt_evict_vm(void *arg)
313 313
314 unpin_ggtt(i915); 314 unpin_ggtt(i915);
315 315
316 err = i915_gem_evict_vm(&ggtt->base, false); 316 err = i915_gem_evict_vm(&ggtt->base);
317 if (err) { 317 if (err) {
318 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", 318 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
319 err); 319 err);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index ad56566e24db..fb9072d5877f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -225,14 +225,6 @@ static bool assert_pin_valid(const struct i915_vma *vma,
225} 225}
226 226
227__maybe_unused 227__maybe_unused
228static bool assert_pin_e2big(const struct i915_vma *vma,
229 const struct pin_mode *mode,
230 int result)
231{
232 return result == -E2BIG;
233}
234
235__maybe_unused
236static bool assert_pin_enospc(const struct i915_vma *vma, 228static bool assert_pin_enospc(const struct i915_vma *vma,
237 const struct pin_mode *mode, 229 const struct pin_mode *mode,
238 int result) 230 int result)
@@ -255,7 +247,6 @@ static int igt_vma_pin1(void *arg)
255#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } 247#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
256#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } 248#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
257#define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL) 249#define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
258#define TOOBIG(sz, fl) __INVALID(sz, fl, assert_pin_e2big, E2BIG)
259#define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC) 250#define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
260 VALID(0, PIN_GLOBAL), 251 VALID(0, PIN_GLOBAL),
261 VALID(0, PIN_GLOBAL | PIN_MAPPABLE), 252 VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
@@ -276,11 +267,11 @@ static int igt_vma_pin1(void *arg)
276 VALID(8192, PIN_GLOBAL), 267 VALID(8192, PIN_GLOBAL),
277 VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), 268 VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
278 VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), 269 VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
279 TOOBIG(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), 270 NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
280 VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL), 271 VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
281 VALID(i915->ggtt.base.total, PIN_GLOBAL), 272 VALID(i915->ggtt.base.total, PIN_GLOBAL),
282 TOOBIG(i915->ggtt.base.total + 4096, PIN_GLOBAL), 273 NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
283 TOOBIG(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), 274 NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
284 INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), 275 INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
285 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), 276 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
286 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), 277 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
@@ -300,7 +291,6 @@ static int igt_vma_pin1(void *arg)
300#endif 291#endif
301 { }, 292 { },
302#undef NOSPACE 293#undef NOSPACE
303#undef TOOBIG
304#undef INVALID 294#undef INVALID
305#undef __INVALID 295#undef __INVALID
306#undef VALID 296#undef VALID
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 8d3a90c3f8ac..f8b9cc212b02 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -40,10 +40,18 @@ mock_context(struct drm_i915_private *i915,
40 INIT_LIST_HEAD(&ctx->link); 40 INIT_LIST_HEAD(&ctx->link);
41 ctx->i915 = i915; 41 ctx->i915 = i915;
42 42
43 ctx->vma_lut.ht_bits = VMA_HT_BITS;
44 ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
45 ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
46 sizeof(*ctx->vma_lut.ht),
47 GFP_KERNEL);
48 if (!ctx->vma_lut.ht)
49 goto err_free;
50
43 ret = ida_simple_get(&i915->context_hw_ida, 51 ret = ida_simple_get(&i915->context_hw_ida,
44 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 52 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
45 if (ret < 0) 53 if (ret < 0)
46 goto err_free; 54 goto err_vma_ht;
47 ctx->hw_id = ret; 55 ctx->hw_id = ret;
48 56
49 if (name) { 57 if (name) {
@@ -58,6 +66,8 @@ mock_context(struct drm_i915_private *i915,
58 66
59 return ctx; 67 return ctx;
60 68
69err_vma_ht:
70 kvfree(ctx->vma_lut.ht);
61err_free: 71err_free:
62 kfree(ctx); 72 kfree(ctx);
63 return NULL; 73 return NULL;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 0a31cd6d01ce..b638d192ce5e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,6 +5,7 @@ config DRM_MSM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 depends on MMU 7 depends on MMU
8 select QCOM_MDT_LOADER
8 select REGULATOR 9 select REGULATOR
9 select DRM_KMS_HELPER 10 select DRM_KMS_HELPER
10 select DRM_PANEL 11 select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 4be092f911f9..644374c7b3e0 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) 18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) 19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
20 20
21Copyright (C) 2013-2016 by the following authors: 21Copyright (C) 2013-2017 by the following authors:
22- Rob Clark <robdclark@gmail.com> (robclark) 22- Rob Clark <robdclark@gmail.com> (robclark)
23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
24 24
@@ -352,6 +352,38 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
352#define REG_A2XX_RBBM_DEBUG 0x0000039b 352#define REG_A2XX_RBBM_DEBUG 0x0000039b
353 353
354#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c 354#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
355#define A2XX_RBBM_PM_OVERRIDE1_RBBM_AHBCLK_PM_OVERRIDE 0x00000001
356#define A2XX_RBBM_PM_OVERRIDE1_SC_REG_SCLK_PM_OVERRIDE 0x00000002
357#define A2XX_RBBM_PM_OVERRIDE1_SC_SCLK_PM_OVERRIDE 0x00000004
358#define A2XX_RBBM_PM_OVERRIDE1_SP_TOP_SCLK_PM_OVERRIDE 0x00000008
359#define A2XX_RBBM_PM_OVERRIDE1_SP_V0_SCLK_PM_OVERRIDE 0x00000010
360#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_SCLK_PM_OVERRIDE 0x00000020
361#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_FIFOS_SCLK_PM_OVERRIDE 0x00000040
362#define A2XX_RBBM_PM_OVERRIDE1_SQ_CONST_MEM_SCLK_PM_OVERRIDE 0x00000080
363#define A2XX_RBBM_PM_OVERRIDE1_SQ_SQ_SCLK_PM_OVERRIDE 0x00000100
364#define A2XX_RBBM_PM_OVERRIDE1_SX_SCLK_PM_OVERRIDE 0x00000200
365#define A2XX_RBBM_PM_OVERRIDE1_SX_REG_SCLK_PM_OVERRIDE 0x00000400
366#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCO_SCLK_PM_OVERRIDE 0x00000800
367#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCM_SCLK_PM_OVERRIDE 0x00001000
368#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCD_SCLK_PM_OVERRIDE 0x00002000
369#define A2XX_RBBM_PM_OVERRIDE1_TCM_REG_SCLK_PM_OVERRIDE 0x00004000
370#define A2XX_RBBM_PM_OVERRIDE1_TPC_TPC_SCLK_PM_OVERRIDE 0x00008000
371#define A2XX_RBBM_PM_OVERRIDE1_TPC_REG_SCLK_PM_OVERRIDE 0x00010000
372#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCA_SCLK_PM_OVERRIDE 0x00020000
373#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_SCLK_PM_OVERRIDE 0x00040000
374#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_READ_SCLK_PM_OVERRIDE 0x00080000
375#define A2XX_RBBM_PM_OVERRIDE1_TP_TP_SCLK_PM_OVERRIDE 0x00100000
376#define A2XX_RBBM_PM_OVERRIDE1_TP_REG_SCLK_PM_OVERRIDE 0x00200000
377#define A2XX_RBBM_PM_OVERRIDE1_CP_G_SCLK_PM_OVERRIDE 0x00400000
378#define A2XX_RBBM_PM_OVERRIDE1_CP_REG_SCLK_PM_OVERRIDE 0x00800000
379#define A2XX_RBBM_PM_OVERRIDE1_CP_G_REG_SCLK_PM_OVERRIDE 0x01000000
380#define A2XX_RBBM_PM_OVERRIDE1_SPI_SCLK_PM_OVERRIDE 0x02000000
381#define A2XX_RBBM_PM_OVERRIDE1_RB_REG_SCLK_PM_OVERRIDE 0x04000000
382#define A2XX_RBBM_PM_OVERRIDE1_RB_SCLK_PM_OVERRIDE 0x08000000
383#define A2XX_RBBM_PM_OVERRIDE1_MH_MH_SCLK_PM_OVERRIDE 0x10000000
384#define A2XX_RBBM_PM_OVERRIDE1_MH_REG_SCLK_PM_OVERRIDE 0x20000000
385#define A2XX_RBBM_PM_OVERRIDE1_MH_MMU_SCLK_PM_OVERRIDE 0x40000000
386#define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000
355 387
356#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d 388#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
357 389
@@ -477,12 +509,43 @@ static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
477#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81 509#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
478 510
479#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86 511#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
512#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK 0xffffffe0
513#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT 5
514static inline uint32_t A2XX_PA_SU_FACE_DATA_BASE_ADDR(uint32_t val)
515{
516 return ((val) << A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT) & A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK;
517}
480 518
481#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00 519#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
520#define A2XX_SQ_GPR_MANAGEMENT_REG_DYNAMIC 0x00000001
521#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK 0x00000ff0
522#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT 4
523static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX(uint32_t val)
524{
525 return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK;
526}
527#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK 0x000ff000
528#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT 12
529static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX(uint32_t val)
530{
531 return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK;
532}
482 533
483#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01 534#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
484 535
485#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02 536#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
537#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK 0x00000fff
538#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT 0
539static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX(uint32_t val)
540{
541 return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK;
542}
543#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK 0x0fff0000
544#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT 16
545static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX(uint32_t val)
546{
547 return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK;
548}
486 549
487#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05 550#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
488 551
@@ -742,6 +805,24 @@ static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
742#define REG_A2XX_RB_BLEND_ALPHA 0x00002108 805#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
743 806
744#define REG_A2XX_RB_FOG_COLOR 0x00002109 807#define REG_A2XX_RB_FOG_COLOR 0x00002109
808#define A2XX_RB_FOG_COLOR_FOG_RED__MASK 0x000000ff
809#define A2XX_RB_FOG_COLOR_FOG_RED__SHIFT 0
810static inline uint32_t A2XX_RB_FOG_COLOR_FOG_RED(uint32_t val)
811{
812 return ((val) << A2XX_RB_FOG_COLOR_FOG_RED__SHIFT) & A2XX_RB_FOG_COLOR_FOG_RED__MASK;
813}
814#define A2XX_RB_FOG_COLOR_FOG_GREEN__MASK 0x0000ff00
815#define A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT 8
816static inline uint32_t A2XX_RB_FOG_COLOR_FOG_GREEN(uint32_t val)
817{
818 return ((val) << A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT) & A2XX_RB_FOG_COLOR_FOG_GREEN__MASK;
819}
820#define A2XX_RB_FOG_COLOR_FOG_BLUE__MASK 0x00ff0000
821#define A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT 16
822static inline uint32_t A2XX_RB_FOG_COLOR_FOG_BLUE(uint32_t val)
823{
824 return ((val) << A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT) & A2XX_RB_FOG_COLOR_FOG_BLUE__MASK;
825}
745 826
746#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c 827#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
747#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff 828#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
@@ -890,14 +971,146 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
890#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000 971#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
891 972
892#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182 973#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
974#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK 0x0000ffff
975#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT 0
976static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE(uint32_t val)
977{
978 return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK;
979}
980#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK 0xffff0000
981#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT 16
982static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN(uint32_t val)
983{
984 return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK;
985}
893 986
894#define REG_A2XX_SQ_WRAPPING_0 0x00002183 987#define REG_A2XX_SQ_WRAPPING_0 0x00002183
988#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK 0x0000000f
989#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT 0
990static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_0(uint32_t val)
991{
992 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK;
993}
994#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK 0x000000f0
995#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT 4
996static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_1(uint32_t val)
997{
998 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK;
999}
1000#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK 0x00000f00
1001#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT 8
1002static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_2(uint32_t val)
1003{
1004 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK;
1005}
1006#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK 0x0000f000
1007#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT 12
1008static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_3(uint32_t val)
1009{
1010 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK;
1011}
1012#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK 0x000f0000
1013#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT 16
1014static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_4(uint32_t val)
1015{
1016 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK;
1017}
1018#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK 0x00f00000
1019#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT 20
1020static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_5(uint32_t val)
1021{
1022 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK;
1023}
1024#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK 0x0f000000
1025#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT 24
1026static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_6(uint32_t val)
1027{
1028 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK;
1029}
1030#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK 0xf0000000
1031#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT 28
1032static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_7(uint32_t val)
1033{
1034 return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK;
1035}
895 1036
896#define REG_A2XX_SQ_WRAPPING_1 0x00002184 1037#define REG_A2XX_SQ_WRAPPING_1 0x00002184
1038#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK 0x0000000f
1039#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT 0
1040static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_8(uint32_t val)
1041{
1042 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK;
1043}
1044#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK 0x000000f0
1045#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT 4
1046static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_9(uint32_t val)
1047{
1048 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK;
1049}
1050#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK 0x00000f00
1051#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT 8
1052static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_10(uint32_t val)
1053{
1054 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK;
1055}
1056#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK 0x0000f000
1057#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT 12
1058static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_11(uint32_t val)
1059{
1060 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK;
1061}
1062#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK 0x000f0000
1063#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT 16
1064static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_12(uint32_t val)
1065{
1066 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK;
1067}
1068#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK 0x00f00000
1069#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT 20
1070static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_13(uint32_t val)
1071{
1072 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK;
1073}
1074#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK 0x0f000000
1075#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT 24
1076static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_14(uint32_t val)
1077{
1078 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK;
1079}
1080#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK 0xf0000000
1081#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT 28
1082static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_15(uint32_t val)
1083{
1084 return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK;
1085}
897 1086
898#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6 1087#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
1088#define A2XX_SQ_PS_PROGRAM_BASE__MASK 0x00000fff
1089#define A2XX_SQ_PS_PROGRAM_BASE__SHIFT 0
1090static inline uint32_t A2XX_SQ_PS_PROGRAM_BASE(uint32_t val)
1091{
1092 return ((val) << A2XX_SQ_PS_PROGRAM_BASE__SHIFT) & A2XX_SQ_PS_PROGRAM_BASE__MASK;
1093}
1094#define A2XX_SQ_PS_PROGRAM_SIZE__MASK 0x00fff000
1095#define A2XX_SQ_PS_PROGRAM_SIZE__SHIFT 12
1096static inline uint32_t A2XX_SQ_PS_PROGRAM_SIZE(uint32_t val)
1097{
1098 return ((val) << A2XX_SQ_PS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_PS_PROGRAM_SIZE__MASK;
1099}
899 1100
900#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 1101#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
1102#define A2XX_SQ_VS_PROGRAM_BASE__MASK 0x00000fff
1103#define A2XX_SQ_VS_PROGRAM_BASE__SHIFT 0
1104static inline uint32_t A2XX_SQ_VS_PROGRAM_BASE(uint32_t val)
1105{
1106 return ((val) << A2XX_SQ_VS_PROGRAM_BASE__SHIFT) & A2XX_SQ_VS_PROGRAM_BASE__MASK;
1107}
1108#define A2XX_SQ_VS_PROGRAM_SIZE__MASK 0x00fff000
1109#define A2XX_SQ_VS_PROGRAM_SIZE__SHIFT 12
1110static inline uint32_t A2XX_SQ_VS_PROGRAM_SIZE(uint32_t val)
1111{
1112 return ((val) << A2XX_SQ_VS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_VS_PROGRAM_SIZE__MASK;
1113}
901 1114
902#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 1115#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
903 1116
@@ -1304,6 +1517,14 @@ static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_a
1304} 1517}
1305 1518
1306#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293 1519#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
1520#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ENA 0x00000001
1521#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK 0x0000007e
1522#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT 1
1523static inline uint32_t A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID(uint32_t val)
1524{
1525 return ((val) << A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT) & A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK;
1526}
1527#define A2XX_PA_SC_VIZ_QUERY_KILL_PIX_POST_EARLY_Z 0x00000100
1307 1528
1308#define REG_A2XX_VGT_ENHANCE 0x00002294 1529#define REG_A2XX_VGT_ENHANCE 0x00002294
1309 1530
@@ -1319,6 +1540,18 @@ static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
1319#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400 1540#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
1320 1541
1321#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301 1542#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
1543#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK 0x00000007
1544#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT 0
1545static inline uint32_t A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES(uint32_t val)
1546{
1547 return ((val) << A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT) & A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK;
1548}
1549#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK 0x0001e000
1550#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT 13
1551static inline uint32_t A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST(uint32_t val)
1552{
1553 return ((val) << A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT) & A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK;
1554}
1322 1555
1323#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302 1556#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
1324#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001 1557#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
@@ -1407,8 +1640,20 @@ static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
1407#define REG_A2XX_PA_SC_AA_MASK 0x00002312 1640#define REG_A2XX_PA_SC_AA_MASK 0x00002312
1408 1641
1409#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316 1642#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
1643#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK 0x00000007
1644#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT 0
1645static inline uint32_t A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH(uint32_t val)
1646{
1647 return ((val) << A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT) & A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK;
1648}
1410 1649
1411#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317 1650#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
1651#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK 0x00000003
1652#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT 0
1653static inline uint32_t A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST(uint32_t val)
1654{
1655 return ((val) << A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT) & A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK;
1656}
1412 1657
1413#define REG_A2XX_RB_COPY_CONTROL 0x00002318 1658#define REG_A2XX_RB_COPY_CONTROL 0x00002318
1414#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007 1659#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a066c8b9eccd..663a73216926 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) 18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) 19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
20 20
21Copyright (C) 2013-2016 by the following authors: 21Copyright (C) 2013-2017 by the following authors:
22- Rob Clark <robdclark@gmail.com> (robclark) 22- Rob Clark <robdclark@gmail.com> (robclark)
23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
24 24
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 7fd77958a436..0e3828ed1e46 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,6 +40,7 @@
40extern bool hang_debug; 40extern bool hang_debug;
41 41
42static void a3xx_dump(struct msm_gpu *gpu); 42static void a3xx_dump(struct msm_gpu *gpu);
43static bool a3xx_idle(struct msm_gpu *gpu);
43 44
44static bool a3xx_me_init(struct msm_gpu *gpu) 45static bool a3xx_me_init(struct msm_gpu *gpu)
45{ 46{
@@ -65,7 +66,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
65 OUT_RING(ring, 0x00000000); 66 OUT_RING(ring, 0x00000000);
66 67
67 gpu->funcs->flush(gpu); 68 gpu->funcs->flush(gpu);
68 return gpu->funcs->idle(gpu); 69 return a3xx_idle(gpu);
69} 70}
70 71
71static int a3xx_hw_init(struct msm_gpu *gpu) 72static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -446,7 +447,6 @@ static const struct adreno_gpu_funcs funcs = {
446 .last_fence = adreno_last_fence, 447 .last_fence = adreno_last_fence,
447 .submit = adreno_submit, 448 .submit = adreno_submit,
448 .flush = adreno_flush, 449 .flush = adreno_flush,
449 .idle = a3xx_idle,
450 .irq = a3xx_irq, 450 .irq = a3xx_irq,
451 .destroy = a3xx_destroy, 451 .destroy = a3xx_destroy,
452#ifdef CONFIG_DEBUG_FS 452#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 4ce21b902779..1a14f4a40b9c 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) 18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) 19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
20 20
21Copyright (C) 2013-2016 by the following authors: 21Copyright (C) 2013-2017 by the following authors:
22- Rob Clark <robdclark@gmail.com> (robclark) 22- Rob Clark <robdclark@gmail.com> (robclark)
23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
24 24
@@ -3010,11 +3010,11 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
3010static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; } 3010static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
3011 3011
3012static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; } 3012static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
3013#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xfffffff0 3013#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xffffffff
3014#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 4 3014#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 0
3015static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val) 3015static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
3016{ 3016{
3017 return ((val >> 4) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK; 3017 return ((val) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
3018} 3018}
3019 3019
3020static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; } 3020static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
@@ -3829,6 +3829,44 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
3829 3829
3830#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 3830#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
3831 3831
3832#define REG_A4XX_VBIF_PERF_CNT_EN0 0x000030c0
3833
3834#define REG_A4XX_VBIF_PERF_CNT_EN1 0x000030c1
3835
3836#define REG_A4XX_VBIF_PERF_CNT_EN2 0x000030c2
3837
3838#define REG_A4XX_VBIF_PERF_CNT_EN3 0x000030c3
3839
3840#define REG_A4XX_VBIF_PERF_CNT_SEL0 0x000030d0
3841
3842#define REG_A4XX_VBIF_PERF_CNT_SEL1 0x000030d1
3843
3844#define REG_A4XX_VBIF_PERF_CNT_SEL2 0x000030d2
3845
3846#define REG_A4XX_VBIF_PERF_CNT_SEL3 0x000030d3
3847
3848#define REG_A4XX_VBIF_PERF_CNT_LOW0 0x000030d8
3849
3850#define REG_A4XX_VBIF_PERF_CNT_LOW1 0x000030d9
3851
3852#define REG_A4XX_VBIF_PERF_CNT_LOW2 0x000030da
3853
3854#define REG_A4XX_VBIF_PERF_CNT_LOW3 0x000030db
3855
3856#define REG_A4XX_VBIF_PERF_CNT_HIGH0 0x000030e0
3857
3858#define REG_A4XX_VBIF_PERF_CNT_HIGH1 0x000030e1
3859
3860#define REG_A4XX_VBIF_PERF_CNT_HIGH2 0x000030e2
3861
3862#define REG_A4XX_VBIF_PERF_CNT_HIGH3 0x000030e3
3863
3864#define REG_A4XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
3865
3866#define REG_A4XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
3867
3868#define REG_A4XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
3869
3832#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5 3870#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
3833 3871
3834#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6 3872#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index dfe0eceaae3b..19abf229b08d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -31,6 +31,7 @@
31 31
32extern bool hang_debug; 32extern bool hang_debug;
33static void a4xx_dump(struct msm_gpu *gpu); 33static void a4xx_dump(struct msm_gpu *gpu);
34static bool a4xx_idle(struct msm_gpu *gpu);
34 35
35/* 36/*
36 * a4xx_enable_hwcg() - Program the clock control registers 37 * a4xx_enable_hwcg() - Program the clock control registers
@@ -137,7 +138,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
137 OUT_RING(ring, 0x00000000); 138 OUT_RING(ring, 0x00000000);
138 139
139 gpu->funcs->flush(gpu); 140 gpu->funcs->flush(gpu);
140 return gpu->funcs->idle(gpu); 141 return a4xx_idle(gpu);
141} 142}
142 143
143static int a4xx_hw_init(struct msm_gpu *gpu) 144static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -534,7 +535,6 @@ static const struct adreno_gpu_funcs funcs = {
534 .last_fence = adreno_last_fence, 535 .last_fence = adreno_last_fence,
535 .submit = adreno_submit, 536 .submit = adreno_submit,
536 .flush = adreno_flush, 537 .flush = adreno_flush,
537 .idle = a4xx_idle,
538 .irq = a4xx_irq, 538 .irq = a4xx_irq,
539 .destroy = a4xx_destroy, 539 .destroy = a4xx_destroy,
540#ifdef CONFIG_DEBUG_FS 540#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index b6fe763ddf34..e0e6711f4f78 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) 18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) 19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
20 20
21Copyright (C) 2013-2016 by the following authors: 21Copyright (C) 2013-2017 by the following authors:
22- Rob Clark <robdclark@gmail.com> (robclark) 22- Rob Clark <robdclark@gmail.com> (robclark)
23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
24 24
@@ -45,20 +45,50 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
45 45
46 46
47enum a5xx_color_fmt { 47enum a5xx_color_fmt {
48 RB5_A8_UNORM = 2,
48 RB5_R8_UNORM = 3, 49 RB5_R8_UNORM = 3,
50 RB5_R8_SNORM = 4,
51 RB5_R8_UINT = 5,
52 RB5_R8_SINT = 6,
49 RB5_R4G4B4A4_UNORM = 8, 53 RB5_R4G4B4A4_UNORM = 8,
50 RB5_R5G5B5A1_UNORM = 10, 54 RB5_R5G5B5A1_UNORM = 10,
51 RB5_R5G6B5_UNORM = 14, 55 RB5_R5G6B5_UNORM = 14,
56 RB5_R8G8_UNORM = 15,
57 RB5_R8G8_SNORM = 16,
58 RB5_R8G8_UINT = 17,
59 RB5_R8G8_SINT = 18,
60 RB5_R16_UNORM = 21,
61 RB5_R16_SNORM = 22,
52 RB5_R16_FLOAT = 23, 62 RB5_R16_FLOAT = 23,
63 RB5_R16_UINT = 24,
64 RB5_R16_SINT = 25,
53 RB5_R8G8B8A8_UNORM = 48, 65 RB5_R8G8B8A8_UNORM = 48,
54 RB5_R8G8B8_UNORM = 49, 66 RB5_R8G8B8_UNORM = 49,
67 RB5_R8G8B8A8_SNORM = 50,
55 RB5_R8G8B8A8_UINT = 51, 68 RB5_R8G8B8A8_UINT = 51,
69 RB5_R8G8B8A8_SINT = 52,
70 RB5_R10G10B10A2_UNORM = 55,
56 RB5_R10G10B10A2_UINT = 58, 71 RB5_R10G10B10A2_UINT = 58,
72 RB5_R11G11B10_FLOAT = 66,
73 RB5_R16G16_UNORM = 67,
74 RB5_R16G16_SNORM = 68,
57 RB5_R16G16_FLOAT = 69, 75 RB5_R16G16_FLOAT = 69,
76 RB5_R16G16_UINT = 70,
77 RB5_R16G16_SINT = 71,
58 RB5_R32_FLOAT = 74, 78 RB5_R32_FLOAT = 74,
79 RB5_R32_UINT = 75,
80 RB5_R32_SINT = 76,
81 RB5_R16G16B16A16_UNORM = 96,
82 RB5_R16G16B16A16_SNORM = 97,
59 RB5_R16G16B16A16_FLOAT = 98, 83 RB5_R16G16B16A16_FLOAT = 98,
84 RB5_R16G16B16A16_UINT = 99,
85 RB5_R16G16B16A16_SINT = 100,
60 RB5_R32G32_FLOAT = 103, 86 RB5_R32G32_FLOAT = 103,
87 RB5_R32G32_UINT = 104,
88 RB5_R32G32_SINT = 105,
61 RB5_R32G32B32A32_FLOAT = 130, 89 RB5_R32G32B32A32_FLOAT = 130,
90 RB5_R32G32B32A32_UINT = 131,
91 RB5_R32G32B32A32_SINT = 132,
62}; 92};
63 93
64enum a5xx_tile_mode { 94enum a5xx_tile_mode {
@@ -133,25 +163,55 @@ enum a5xx_vtx_fmt {
133enum a5xx_tex_fmt { 163enum a5xx_tex_fmt {
134 TFMT5_A8_UNORM = 2, 164 TFMT5_A8_UNORM = 2,
135 TFMT5_8_UNORM = 3, 165 TFMT5_8_UNORM = 3,
166 TFMT5_8_SNORM = 4,
167 TFMT5_8_UINT = 5,
168 TFMT5_8_SINT = 6,
136 TFMT5_4_4_4_4_UNORM = 8, 169 TFMT5_4_4_4_4_UNORM = 8,
137 TFMT5_5_5_5_1_UNORM = 10, 170 TFMT5_5_5_5_1_UNORM = 10,
138 TFMT5_5_6_5_UNORM = 14, 171 TFMT5_5_6_5_UNORM = 14,
139 TFMT5_8_8_UNORM = 15, 172 TFMT5_8_8_UNORM = 15,
140 TFMT5_8_8_SNORM = 16, 173 TFMT5_8_8_SNORM = 16,
174 TFMT5_8_8_UINT = 17,
175 TFMT5_8_8_SINT = 18,
141 TFMT5_L8_A8_UNORM = 19, 176 TFMT5_L8_A8_UNORM = 19,
177 TFMT5_16_UNORM = 21,
178 TFMT5_16_SNORM = 22,
142 TFMT5_16_FLOAT = 23, 179 TFMT5_16_FLOAT = 23,
180 TFMT5_16_UINT = 24,
181 TFMT5_16_SINT = 25,
143 TFMT5_8_8_8_8_UNORM = 48, 182 TFMT5_8_8_8_8_UNORM = 48,
144 TFMT5_8_8_8_UNORM = 49, 183 TFMT5_8_8_8_UNORM = 49,
145 TFMT5_8_8_8_SNORM = 50, 184 TFMT5_8_8_8_8_SNORM = 50,
185 TFMT5_8_8_8_8_UINT = 51,
186 TFMT5_8_8_8_8_SINT = 52,
146 TFMT5_9_9_9_E5_FLOAT = 53, 187 TFMT5_9_9_9_E5_FLOAT = 53,
147 TFMT5_10_10_10_2_UNORM = 54, 188 TFMT5_10_10_10_2_UNORM = 54,
189 TFMT5_10_10_10_2_UINT = 58,
148 TFMT5_11_11_10_FLOAT = 66, 190 TFMT5_11_11_10_FLOAT = 66,
191 TFMT5_16_16_UNORM = 67,
192 TFMT5_16_16_SNORM = 68,
149 TFMT5_16_16_FLOAT = 69, 193 TFMT5_16_16_FLOAT = 69,
194 TFMT5_16_16_UINT = 70,
195 TFMT5_16_16_SINT = 71,
150 TFMT5_32_FLOAT = 74, 196 TFMT5_32_FLOAT = 74,
197 TFMT5_32_UINT = 75,
198 TFMT5_32_SINT = 76,
199 TFMT5_16_16_16_16_UNORM = 96,
200 TFMT5_16_16_16_16_SNORM = 97,
151 TFMT5_16_16_16_16_FLOAT = 98, 201 TFMT5_16_16_16_16_FLOAT = 98,
202 TFMT5_16_16_16_16_UINT = 99,
203 TFMT5_16_16_16_16_SINT = 100,
152 TFMT5_32_32_FLOAT = 103, 204 TFMT5_32_32_FLOAT = 103,
205 TFMT5_32_32_UINT = 104,
206 TFMT5_32_32_SINT = 105,
153 TFMT5_32_32_32_32_FLOAT = 130, 207 TFMT5_32_32_32_32_FLOAT = 130,
208 TFMT5_32_32_32_32_UINT = 131,
209 TFMT5_32_32_32_32_SINT = 132,
154 TFMT5_X8Z24_UNORM = 160, 210 TFMT5_X8Z24_UNORM = 160,
211 TFMT5_RGTC1_UNORM = 183,
212 TFMT5_RGTC1_SNORM = 184,
213 TFMT5_RGTC2_UNORM = 187,
214 TFMT5_RGTC2_SNORM = 188,
155}; 215};
156 216
157enum a5xx_tex_fetchsize { 217enum a5xx_tex_fetchsize {
@@ -182,6 +242,565 @@ enum a5xx_blit_buf {
182 BLIT_Z32 = 9, 242 BLIT_Z32 = 9,
183}; 243};
184 244
245enum a5xx_cp_perfcounter_select {
246 PERF_CP_ALWAYS_COUNT = 0,
247 PERF_CP_BUSY_GFX_CORE_IDLE = 1,
248 PERF_CP_BUSY_CYCLES = 2,
249 PERF_CP_PFP_IDLE = 3,
250 PERF_CP_PFP_BUSY_WORKING = 4,
251 PERF_CP_PFP_STALL_CYCLES_ANY = 5,
252 PERF_CP_PFP_STARVE_CYCLES_ANY = 6,
253 PERF_CP_PFP_ICACHE_MISS = 7,
254 PERF_CP_PFP_ICACHE_HIT = 8,
255 PERF_CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
256 PERF_CP_ME_BUSY_WORKING = 10,
257 PERF_CP_ME_IDLE = 11,
258 PERF_CP_ME_STARVE_CYCLES_ANY = 12,
259 PERF_CP_ME_FIFO_EMPTY_PFP_IDLE = 13,
260 PERF_CP_ME_FIFO_EMPTY_PFP_BUSY = 14,
261 PERF_CP_ME_FIFO_FULL_ME_BUSY = 15,
262 PERF_CP_ME_FIFO_FULL_ME_NON_WORKING = 16,
263 PERF_CP_ME_STALL_CYCLES_ANY = 17,
264 PERF_CP_ME_ICACHE_MISS = 18,
265 PERF_CP_ME_ICACHE_HIT = 19,
266 PERF_CP_NUM_PREEMPTIONS = 20,
267 PERF_CP_PREEMPTION_REACTION_DELAY = 21,
268 PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 22,
269 PERF_CP_PREEMPTION_SWITCH_IN_TIME = 23,
270 PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 24,
271 PERF_CP_PREDICATED_DRAWS_KILLED = 25,
272 PERF_CP_MODE_SWITCH = 26,
273 PERF_CP_ZPASS_DONE = 27,
274 PERF_CP_CONTEXT_DONE = 28,
275 PERF_CP_CACHE_FLUSH = 29,
276 PERF_CP_LONG_PREEMPTIONS = 30,
277};
278
279enum a5xx_rbbm_perfcounter_select {
280 PERF_RBBM_ALWAYS_COUNT = 0,
281 PERF_RBBM_ALWAYS_ON = 1,
282 PERF_RBBM_TSE_BUSY = 2,
283 PERF_RBBM_RAS_BUSY = 3,
284 PERF_RBBM_PC_DCALL_BUSY = 4,
285 PERF_RBBM_PC_VSD_BUSY = 5,
286 PERF_RBBM_STATUS_MASKED = 6,
287 PERF_RBBM_COM_BUSY = 7,
288 PERF_RBBM_DCOM_BUSY = 8,
289 PERF_RBBM_VBIF_BUSY = 9,
290 PERF_RBBM_VSC_BUSY = 10,
291 PERF_RBBM_TESS_BUSY = 11,
292 PERF_RBBM_UCHE_BUSY = 12,
293 PERF_RBBM_HLSQ_BUSY = 13,
294};
295
296enum a5xx_pc_perfcounter_select {
297 PERF_PC_BUSY_CYCLES = 0,
298 PERF_PC_WORKING_CYCLES = 1,
299 PERF_PC_STALL_CYCLES_VFD = 2,
300 PERF_PC_STALL_CYCLES_TSE = 3,
301 PERF_PC_STALL_CYCLES_VPC = 4,
302 PERF_PC_STALL_CYCLES_UCHE = 5,
303 PERF_PC_STALL_CYCLES_TESS = 6,
304 PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
305 PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
306 PERF_PC_PASS1_TF_STALL_CYCLES = 9,
307 PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
308 PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
309 PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
310 PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
311 PERF_PC_STARVE_CYCLES_DI = 14,
312 PERF_PC_VIS_STREAMS_LOADED = 15,
313 PERF_PC_INSTANCES = 16,
314 PERF_PC_VPC_PRIMITIVES = 17,
315 PERF_PC_DEAD_PRIM = 18,
316 PERF_PC_LIVE_PRIM = 19,
317 PERF_PC_VERTEX_HITS = 20,
318 PERF_PC_IA_VERTICES = 21,
319 PERF_PC_IA_PRIMITIVES = 22,
320 PERF_PC_GS_PRIMITIVES = 23,
321 PERF_PC_HS_INVOCATIONS = 24,
322 PERF_PC_DS_INVOCATIONS = 25,
323 PERF_PC_VS_INVOCATIONS = 26,
324 PERF_PC_GS_INVOCATIONS = 27,
325 PERF_PC_DS_PRIMITIVES = 28,
326 PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
327 PERF_PC_3D_DRAWCALLS = 30,
328 PERF_PC_2D_DRAWCALLS = 31,
329 PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
330 PERF_TESS_BUSY_CYCLES = 33,
331 PERF_TESS_WORKING_CYCLES = 34,
332 PERF_TESS_STALL_CYCLES_PC = 35,
333 PERF_TESS_STARVE_CYCLES_PC = 36,
334};
335
336enum a5xx_vfd_perfcounter_select {
337 PERF_VFD_BUSY_CYCLES = 0,
338 PERF_VFD_STALL_CYCLES_UCHE = 1,
339 PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
340 PERF_VFD_STALL_CYCLES_MISS_VB = 3,
341 PERF_VFD_STALL_CYCLES_MISS_Q = 4,
342 PERF_VFD_STALL_CYCLES_SP_INFO = 5,
343 PERF_VFD_STALL_CYCLES_SP_ATTR = 6,
344 PERF_VFD_STALL_CYCLES_VFDP_VB = 7,
345 PERF_VFD_STALL_CYCLES_VFDP_Q = 8,
346 PERF_VFD_DECODER_PACKER_STALL = 9,
347 PERF_VFD_STARVE_CYCLES_UCHE = 10,
348 PERF_VFD_RBUFFER_FULL = 11,
349 PERF_VFD_ATTR_INFO_FIFO_FULL = 12,
350 PERF_VFD_DECODED_ATTRIBUTE_BYTES = 13,
351 PERF_VFD_NUM_ATTRIBUTES = 14,
352 PERF_VFD_INSTRUCTIONS = 15,
353 PERF_VFD_UPPER_SHADER_FIBERS = 16,
354 PERF_VFD_LOWER_SHADER_FIBERS = 17,
355 PERF_VFD_MODE_0_FIBERS = 18,
356 PERF_VFD_MODE_1_FIBERS = 19,
357 PERF_VFD_MODE_2_FIBERS = 20,
358 PERF_VFD_MODE_3_FIBERS = 21,
359 PERF_VFD_MODE_4_FIBERS = 22,
360 PERF_VFD_TOTAL_VERTICES = 23,
361 PERF_VFD_NUM_ATTR_MISS = 24,
362 PERF_VFD_1_BURST_REQ = 25,
363 PERF_VFDP_STALL_CYCLES_VFD = 26,
364 PERF_VFDP_STALL_CYCLES_VFD_INDEX = 27,
365 PERF_VFDP_STALL_CYCLES_VFD_PROG = 28,
366 PERF_VFDP_STARVE_CYCLES_PC = 29,
367 PERF_VFDP_VS_STAGE_32_WAVES = 30,
368};
369
370enum a5xx_hlsq_perfcounter_select {
371 PERF_HLSQ_BUSY_CYCLES = 0,
372 PERF_HLSQ_STALL_CYCLES_UCHE = 1,
373 PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
374 PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
375 PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
376 PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
377 PERF_HLSQ_FS_STAGE_32_WAVES = 6,
378 PERF_HLSQ_FS_STAGE_64_WAVES = 7,
379 PERF_HLSQ_QUADS = 8,
380 PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE = 9,
381 PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE = 10,
382 PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE = 11,
383 PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE = 12,
384 PERF_HLSQ_CS_INVOCATIONS = 13,
385 PERF_HLSQ_COMPUTE_DRAWCALLS = 14,
386};
387
388enum a5xx_vpc_perfcounter_select {
389 PERF_VPC_BUSY_CYCLES = 0,
390 PERF_VPC_WORKING_CYCLES = 1,
391 PERF_VPC_STALL_CYCLES_UCHE = 2,
392 PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
393 PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
394 PERF_VPC_STALL_CYCLES_PC = 5,
395 PERF_VPC_STALL_CYCLES_SP_LM = 6,
396 PERF_VPC_POS_EXPORT_STALL_CYCLES = 7,
397 PERF_VPC_STARVE_CYCLES_SP = 8,
398 PERF_VPC_STARVE_CYCLES_LRZ = 9,
399 PERF_VPC_PC_PRIMITIVES = 10,
400 PERF_VPC_SP_COMPONENTS = 11,
401 PERF_VPC_SP_LM_PRIMITIVES = 12,
402 PERF_VPC_SP_LM_COMPONENTS = 13,
403 PERF_VPC_SP_LM_DWORDS = 14,
404 PERF_VPC_STREAMOUT_COMPONENTS = 15,
405 PERF_VPC_GRANT_PHASES = 16,
406};
407
408enum a5xx_tse_perfcounter_select {
409 PERF_TSE_BUSY_CYCLES = 0,
410 PERF_TSE_CLIPPING_CYCLES = 1,
411 PERF_TSE_STALL_CYCLES_RAS = 2,
412 PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
413 PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
414 PERF_TSE_STARVE_CYCLES_PC = 5,
415 PERF_TSE_INPUT_PRIM = 6,
416 PERF_TSE_INPUT_NULL_PRIM = 7,
417 PERF_TSE_TRIVAL_REJ_PRIM = 8,
418 PERF_TSE_CLIPPED_PRIM = 9,
419 PERF_TSE_ZERO_AREA_PRIM = 10,
420 PERF_TSE_FACENESS_CULLED_PRIM = 11,
421 PERF_TSE_ZERO_PIXEL_PRIM = 12,
422 PERF_TSE_OUTPUT_NULL_PRIM = 13,
423 PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
424 PERF_TSE_CINVOCATION = 15,
425 PERF_TSE_CPRIMITIVES = 16,
426 PERF_TSE_2D_INPUT_PRIM = 17,
427 PERF_TSE_2D_ALIVE_CLCLES = 18,
428};
429
430enum a5xx_ras_perfcounter_select {
431 PERF_RAS_BUSY_CYCLES = 0,
432 PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
433 PERF_RAS_STALL_CYCLES_LRZ = 2,
434 PERF_RAS_STARVE_CYCLES_TSE = 3,
435 PERF_RAS_SUPER_TILES = 4,
436 PERF_RAS_8X4_TILES = 5,
437 PERF_RAS_MASKGEN_ACTIVE = 6,
438 PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
439 PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
440 PERF_RAS_PRIM_KILLED_INVISILBE = 9,
441};
442
443enum a5xx_lrz_perfcounter_select {
444 PERF_LRZ_BUSY_CYCLES = 0,
445 PERF_LRZ_STARVE_CYCLES_RAS = 1,
446 PERF_LRZ_STALL_CYCLES_RB = 2,
447 PERF_LRZ_STALL_CYCLES_VSC = 3,
448 PERF_LRZ_STALL_CYCLES_VPC = 4,
449 PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
450 PERF_LRZ_STALL_CYCLES_UCHE = 6,
451 PERF_LRZ_LRZ_READ = 7,
452 PERF_LRZ_LRZ_WRITE = 8,
453 PERF_LRZ_READ_LATENCY = 9,
454 PERF_LRZ_MERGE_CACHE_UPDATING = 10,
455 PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
456 PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
457 PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
458 PERF_LRZ_FULL_8X8_TILES = 14,
459 PERF_LRZ_PARTIAL_8X8_TILES = 15,
460 PERF_LRZ_TILE_KILLED = 16,
461 PERF_LRZ_TOTAL_PIXEL = 17,
462 PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
463};
464
465enum a5xx_uche_perfcounter_select {
466 PERF_UCHE_BUSY_CYCLES = 0,
467 PERF_UCHE_STALL_CYCLES_VBIF = 1,
468 PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
469 PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
470 PERF_UCHE_VBIF_READ_BEATS_TP = 4,
471 PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
472 PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
473 PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
474 PERF_UCHE_VBIF_READ_BEATS_SP = 8,
475 PERF_UCHE_READ_REQUESTS_TP = 9,
476 PERF_UCHE_READ_REQUESTS_VFD = 10,
477 PERF_UCHE_READ_REQUESTS_HLSQ = 11,
478 PERF_UCHE_READ_REQUESTS_LRZ = 12,
479 PERF_UCHE_READ_REQUESTS_SP = 13,
480 PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
481 PERF_UCHE_WRITE_REQUESTS_SP = 15,
482 PERF_UCHE_WRITE_REQUESTS_VPC = 16,
483 PERF_UCHE_WRITE_REQUESTS_VSC = 17,
484 PERF_UCHE_EVICTS = 18,
485 PERF_UCHE_BANK_REQ0 = 19,
486 PERF_UCHE_BANK_REQ1 = 20,
487 PERF_UCHE_BANK_REQ2 = 21,
488 PERF_UCHE_BANK_REQ3 = 22,
489 PERF_UCHE_BANK_REQ4 = 23,
490 PERF_UCHE_BANK_REQ5 = 24,
491 PERF_UCHE_BANK_REQ6 = 25,
492 PERF_UCHE_BANK_REQ7 = 26,
493 PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
494 PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
495 PERF_UCHE_GMEM_READ_BEATS = 29,
496 PERF_UCHE_FLAG_COUNT = 30,
497};
498
499enum a5xx_tp_perfcounter_select {
500 PERF_TP_BUSY_CYCLES = 0,
501 PERF_TP_STALL_CYCLES_UCHE = 1,
502 PERF_TP_LATENCY_CYCLES = 2,
503 PERF_TP_LATENCY_TRANS = 3,
504 PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
505 PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
506 PERF_TP_L1_CACHELINE_REQUESTS = 6,
507 PERF_TP_L1_CACHELINE_MISSES = 7,
508 PERF_TP_SP_TP_TRANS = 8,
509 PERF_TP_TP_SP_TRANS = 9,
510 PERF_TP_OUTPUT_PIXELS = 10,
511 PERF_TP_FILTER_WORKLOAD_16BIT = 11,
512 PERF_TP_FILTER_WORKLOAD_32BIT = 12,
513 PERF_TP_QUADS_RECEIVED = 13,
514 PERF_TP_QUADS_OFFSET = 14,
515 PERF_TP_QUADS_SHADOW = 15,
516 PERF_TP_QUADS_ARRAY = 16,
517 PERF_TP_QUADS_GRADIENT = 17,
518 PERF_TP_QUADS_1D = 18,
519 PERF_TP_QUADS_2D = 19,
520 PERF_TP_QUADS_BUFFER = 20,
521 PERF_TP_QUADS_3D = 21,
522 PERF_TP_QUADS_CUBE = 22,
523 PERF_TP_STATE_CACHE_REQUESTS = 23,
524 PERF_TP_STATE_CACHE_MISSES = 24,
525 PERF_TP_DIVERGENT_QUADS_RECEIVED = 25,
526 PERF_TP_BINDLESS_STATE_CACHE_REQUESTS = 26,
527 PERF_TP_BINDLESS_STATE_CACHE_MISSES = 27,
528 PERF_TP_PRT_NON_RESIDENT_EVENTS = 28,
529 PERF_TP_OUTPUT_PIXELS_POINT = 29,
530 PERF_TP_OUTPUT_PIXELS_BILINEAR = 30,
531 PERF_TP_OUTPUT_PIXELS_MIP = 31,
532 PERF_TP_OUTPUT_PIXELS_ANISO = 32,
533 PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 33,
534 PERF_TP_FLAG_CACHE_REQUESTS = 34,
535 PERF_TP_FLAG_CACHE_MISSES = 35,
536 PERF_TP_L1_5_L2_REQUESTS = 36,
537 PERF_TP_2D_OUTPUT_PIXELS = 37,
538 PERF_TP_2D_OUTPUT_PIXELS_POINT = 38,
539 PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 39,
540 PERF_TP_2D_FILTER_WORKLOAD_16BIT = 40,
541 PERF_TP_2D_FILTER_WORKLOAD_32BIT = 41,
542};
543
544enum a5xx_sp_perfcounter_select {
545 PERF_SP_BUSY_CYCLES = 0,
546 PERF_SP_ALU_WORKING_CYCLES = 1,
547 PERF_SP_EFU_WORKING_CYCLES = 2,
548 PERF_SP_STALL_CYCLES_VPC = 3,
549 PERF_SP_STALL_CYCLES_TP = 4,
550 PERF_SP_STALL_CYCLES_UCHE = 5,
551 PERF_SP_STALL_CYCLES_RB = 6,
552 PERF_SP_SCHEDULER_NON_WORKING = 7,
553 PERF_SP_WAVE_CONTEXTS = 8,
554 PERF_SP_WAVE_CONTEXT_CYCLES = 9,
555 PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
556 PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
557 PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
558 PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
559 PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
560 PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
561 PERF_SP_WAVE_CTRL_CYCLES = 16,
562 PERF_SP_WAVE_LOAD_CYCLES = 17,
563 PERF_SP_WAVE_EMIT_CYCLES = 18,
564 PERF_SP_WAVE_NOP_CYCLES = 19,
565 PERF_SP_WAVE_WAIT_CYCLES = 20,
566 PERF_SP_WAVE_FETCH_CYCLES = 21,
567 PERF_SP_WAVE_IDLE_CYCLES = 22,
568 PERF_SP_WAVE_END_CYCLES = 23,
569 PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
570 PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
571 PERF_SP_WAVE_JOIN_CYCLES = 26,
572 PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
573 PERF_SP_LM_STORE_INSTRUCTIONS = 28,
574 PERF_SP_LM_ATOMICS = 29,
575 PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
576 PERF_SP_GM_STORE_INSTRUCTIONS = 31,
577 PERF_SP_GM_ATOMICS = 32,
578 PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
579 PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS = 34,
580 PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 35,
581 PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 36,
582 PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 37,
583 PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 38,
584 PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 39,
585 PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 40,
586 PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 41,
587 PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 42,
588 PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 43,
589 PERF_SP_VS_INSTRUCTIONS = 44,
590 PERF_SP_FS_INSTRUCTIONS = 45,
591 PERF_SP_ADDR_LOCK_COUNT = 46,
592 PERF_SP_UCHE_READ_TRANS = 47,
593 PERF_SP_UCHE_WRITE_TRANS = 48,
594 PERF_SP_EXPORT_VPC_TRANS = 49,
595 PERF_SP_EXPORT_RB_TRANS = 50,
596 PERF_SP_PIXELS_KILLED = 51,
597 PERF_SP_ICL1_REQUESTS = 52,
598 PERF_SP_ICL1_MISSES = 53,
599 PERF_SP_ICL0_REQUESTS = 54,
600 PERF_SP_ICL0_MISSES = 55,
601 PERF_SP_HS_INSTRUCTIONS = 56,
602 PERF_SP_DS_INSTRUCTIONS = 57,
603 PERF_SP_GS_INSTRUCTIONS = 58,
604 PERF_SP_CS_INSTRUCTIONS = 59,
605 PERF_SP_GPR_READ = 60,
606 PERF_SP_GPR_WRITE = 61,
607 PERF_SP_LM_CH0_REQUESTS = 62,
608 PERF_SP_LM_CH1_REQUESTS = 63,
609 PERF_SP_LM_BANK_CONFLICTS = 64,
610};
611
612enum a5xx_rb_perfcounter_select {
613 PERF_RB_BUSY_CYCLES = 0,
614 PERF_RB_STALL_CYCLES_CCU = 1,
615 PERF_RB_STALL_CYCLES_HLSQ = 2,
616 PERF_RB_STALL_CYCLES_FIFO0_FULL = 3,
617 PERF_RB_STALL_CYCLES_FIFO1_FULL = 4,
618 PERF_RB_STALL_CYCLES_FIFO2_FULL = 5,
619 PERF_RB_STARVE_CYCLES_SP = 6,
620 PERF_RB_STARVE_CYCLES_LRZ_TILE = 7,
621 PERF_RB_STARVE_CYCLES_CCU = 8,
622 PERF_RB_STARVE_CYCLES_Z_PLANE = 9,
623 PERF_RB_STARVE_CYCLES_BARY_PLANE = 10,
624 PERF_RB_Z_WORKLOAD = 11,
625 PERF_RB_HLSQ_ACTIVE = 12,
626 PERF_RB_Z_READ = 13,
627 PERF_RB_Z_WRITE = 14,
628 PERF_RB_C_READ = 15,
629 PERF_RB_C_WRITE = 16,
630 PERF_RB_TOTAL_PASS = 17,
631 PERF_RB_Z_PASS = 18,
632 PERF_RB_Z_FAIL = 19,
633 PERF_RB_S_FAIL = 20,
634 PERF_RB_BLENDED_FXP_COMPONENTS = 21,
635 PERF_RB_BLENDED_FP16_COMPONENTS = 22,
636 RB_RESERVED = 23,
637 PERF_RB_2D_ALIVE_CYCLES = 24,
638 PERF_RB_2D_STALL_CYCLES_A2D = 25,
639 PERF_RB_2D_STARVE_CYCLES_SRC = 26,
640 PERF_RB_2D_STARVE_CYCLES_SP = 27,
641 PERF_RB_2D_STARVE_CYCLES_DST = 28,
642 PERF_RB_2D_VALID_PIXELS = 29,
643};
644
645enum a5xx_rb_samples_perfcounter_select {
646 TOTAL_SAMPLES = 0,
647 ZPASS_SAMPLES = 1,
648 ZFAIL_SAMPLES = 2,
649 SFAIL_SAMPLES = 3,
650};
651
652enum a5xx_vsc_perfcounter_select {
653 PERF_VSC_BUSY_CYCLES = 0,
654 PERF_VSC_WORKING_CYCLES = 1,
655 PERF_VSC_STALL_CYCLES_UCHE = 2,
656 PERF_VSC_EOT_NUM = 3,
657};
658
659enum a5xx_ccu_perfcounter_select {
660 PERF_CCU_BUSY_CYCLES = 0,
661 PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
662 PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
663 PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
664 PERF_CCU_DEPTH_BLOCKS = 4,
665 PERF_CCU_COLOR_BLOCKS = 5,
666 PERF_CCU_DEPTH_BLOCK_HIT = 6,
667 PERF_CCU_COLOR_BLOCK_HIT = 7,
668 PERF_CCU_PARTIAL_BLOCK_READ = 8,
669 PERF_CCU_GMEM_READ = 9,
670 PERF_CCU_GMEM_WRITE = 10,
671 PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
672 PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
673 PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
674 PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
675 PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
676 PERF_CCU_COLOR_READ_FLAG0_COUNT = 16,
677 PERF_CCU_COLOR_READ_FLAG1_COUNT = 17,
678 PERF_CCU_COLOR_READ_FLAG2_COUNT = 18,
679 PERF_CCU_COLOR_READ_FLAG3_COUNT = 19,
680 PERF_CCU_COLOR_READ_FLAG4_COUNT = 20,
681 PERF_CCU_2D_BUSY_CYCLES = 21,
682 PERF_CCU_2D_RD_REQ = 22,
683 PERF_CCU_2D_WR_REQ = 23,
684 PERF_CCU_2D_REORDER_STARVE_CYCLES = 24,
685 PERF_CCU_2D_PIXELS = 25,
686};
687
688enum a5xx_cmp_perfcounter_select {
689 PERF_CMPDECMP_STALL_CYCLES_VBIF = 0,
690 PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
691 PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
692 PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
693 PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
694 PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
695 PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
696 PERF_CMPDECMP_VBIF_READ_DATA = 7,
697 PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
698 PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
699 PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
700 PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
701 PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
702 PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
703 PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
704 PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 15,
705 PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 16,
706 PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 17,
707 PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 18,
708 PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 19,
709 PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 20,
710 PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 21,
711 PERF_CMPDECMP_2D_RD_DATA = 22,
712 PERF_CMPDECMP_2D_WR_DATA = 23,
713};
714
715enum a5xx_vbif_perfcounter_select {
716 AXI_READ_REQUESTS_ID_0 = 0,
717 AXI_READ_REQUESTS_ID_1 = 1,
718 AXI_READ_REQUESTS_ID_2 = 2,
719 AXI_READ_REQUESTS_ID_3 = 3,
720 AXI_READ_REQUESTS_ID_4 = 4,
721 AXI_READ_REQUESTS_ID_5 = 5,
722 AXI_READ_REQUESTS_ID_6 = 6,
723 AXI_READ_REQUESTS_ID_7 = 7,
724 AXI_READ_REQUESTS_ID_8 = 8,
725 AXI_READ_REQUESTS_ID_9 = 9,
726 AXI_READ_REQUESTS_ID_10 = 10,
727 AXI_READ_REQUESTS_ID_11 = 11,
728 AXI_READ_REQUESTS_ID_12 = 12,
729 AXI_READ_REQUESTS_ID_13 = 13,
730 AXI_READ_REQUESTS_ID_14 = 14,
731 AXI_READ_REQUESTS_ID_15 = 15,
732 AXI0_READ_REQUESTS_TOTAL = 16,
733 AXI1_READ_REQUESTS_TOTAL = 17,
734 AXI2_READ_REQUESTS_TOTAL = 18,
735 AXI3_READ_REQUESTS_TOTAL = 19,
736 AXI_READ_REQUESTS_TOTAL = 20,
737 AXI_WRITE_REQUESTS_ID_0 = 21,
738 AXI_WRITE_REQUESTS_ID_1 = 22,
739 AXI_WRITE_REQUESTS_ID_2 = 23,
740 AXI_WRITE_REQUESTS_ID_3 = 24,
741 AXI_WRITE_REQUESTS_ID_4 = 25,
742 AXI_WRITE_REQUESTS_ID_5 = 26,
743 AXI_WRITE_REQUESTS_ID_6 = 27,
744 AXI_WRITE_REQUESTS_ID_7 = 28,
745 AXI_WRITE_REQUESTS_ID_8 = 29,
746 AXI_WRITE_REQUESTS_ID_9 = 30,
747 AXI_WRITE_REQUESTS_ID_10 = 31,
748 AXI_WRITE_REQUESTS_ID_11 = 32,
749 AXI_WRITE_REQUESTS_ID_12 = 33,
750 AXI_WRITE_REQUESTS_ID_13 = 34,
751 AXI_WRITE_REQUESTS_ID_14 = 35,
752 AXI_WRITE_REQUESTS_ID_15 = 36,
753 AXI0_WRITE_REQUESTS_TOTAL = 37,
754 AXI1_WRITE_REQUESTS_TOTAL = 38,
755 AXI2_WRITE_REQUESTS_TOTAL = 39,
756 AXI3_WRITE_REQUESTS_TOTAL = 40,
757 AXI_WRITE_REQUESTS_TOTAL = 41,
758 AXI_TOTAL_REQUESTS = 42,
759 AXI_READ_DATA_BEATS_ID_0 = 43,
760 AXI_READ_DATA_BEATS_ID_1 = 44,
761 AXI_READ_DATA_BEATS_ID_2 = 45,
762 AXI_READ_DATA_BEATS_ID_3 = 46,
763 AXI_READ_DATA_BEATS_ID_4 = 47,
764 AXI_READ_DATA_BEATS_ID_5 = 48,
765 AXI_READ_DATA_BEATS_ID_6 = 49,
766 AXI_READ_DATA_BEATS_ID_7 = 50,
767 AXI_READ_DATA_BEATS_ID_8 = 51,
768 AXI_READ_DATA_BEATS_ID_9 = 52,
769 AXI_READ_DATA_BEATS_ID_10 = 53,
770 AXI_READ_DATA_BEATS_ID_11 = 54,
771 AXI_READ_DATA_BEATS_ID_12 = 55,
772 AXI_READ_DATA_BEATS_ID_13 = 56,
773 AXI_READ_DATA_BEATS_ID_14 = 57,
774 AXI_READ_DATA_BEATS_ID_15 = 58,
775 AXI0_READ_DATA_BEATS_TOTAL = 59,
776 AXI1_READ_DATA_BEATS_TOTAL = 60,
777 AXI2_READ_DATA_BEATS_TOTAL = 61,
778 AXI3_READ_DATA_BEATS_TOTAL = 62,
779 AXI_READ_DATA_BEATS_TOTAL = 63,
780 AXI_WRITE_DATA_BEATS_ID_0 = 64,
781 AXI_WRITE_DATA_BEATS_ID_1 = 65,
782 AXI_WRITE_DATA_BEATS_ID_2 = 66,
783 AXI_WRITE_DATA_BEATS_ID_3 = 67,
784 AXI_WRITE_DATA_BEATS_ID_4 = 68,
785 AXI_WRITE_DATA_BEATS_ID_5 = 69,
786 AXI_WRITE_DATA_BEATS_ID_6 = 70,
787 AXI_WRITE_DATA_BEATS_ID_7 = 71,
788 AXI_WRITE_DATA_BEATS_ID_8 = 72,
789 AXI_WRITE_DATA_BEATS_ID_9 = 73,
790 AXI_WRITE_DATA_BEATS_ID_10 = 74,
791 AXI_WRITE_DATA_BEATS_ID_11 = 75,
792 AXI_WRITE_DATA_BEATS_ID_12 = 76,
793 AXI_WRITE_DATA_BEATS_ID_13 = 77,
794 AXI_WRITE_DATA_BEATS_ID_14 = 78,
795 AXI_WRITE_DATA_BEATS_ID_15 = 79,
796 AXI0_WRITE_DATA_BEATS_TOTAL = 80,
797 AXI1_WRITE_DATA_BEATS_TOTAL = 81,
798 AXI2_WRITE_DATA_BEATS_TOTAL = 82,
799 AXI3_WRITE_DATA_BEATS_TOTAL = 83,
800 AXI_WRITE_DATA_BEATS_TOTAL = 84,
801 AXI_DATA_BEATS_TOTAL = 85,
802};
803
185enum a5xx_tex_filter { 804enum a5xx_tex_filter {
186 A5XX_TEX_NEAREST = 0, 805 A5XX_TEX_NEAREST = 0,
187 A5XX_TEX_LINEAR = 1, 806 A5XX_TEX_LINEAR = 1,
@@ -1289,25 +1908,83 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
1289 1908
1290#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 1909#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
1291 1910
1292#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00 1911#define REG_A5XX_VSC_BIN_SIZE 0x00000bc2
1912#define A5XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
1913#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1914static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1915{
1916 return ((val >> 5) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK;
1917}
1918#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00
1919#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9
1920static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1921{
1922 return ((val >> 5) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK;
1923}
1924
1925#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3
1926
1927#define REG_A5XX_VSC_SIZE_ADDRESS_HI 0x00000bc4
1928
1929#define REG_A5XX_UNKNOWN_0BC5 0x00000bc5
1930
1931#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6
1932
1933static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
1934
1935static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
1936#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
1937#define A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
1938static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
1939{
1940 return ((val) << A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_X__MASK;
1941}
1942#define A5XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
1943#define A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
1944static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
1945{
1946 return ((val) << A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_Y__MASK;
1947}
1948#define A5XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
1949#define A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
1950static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
1951{
1952 return ((val) << A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_W__MASK;
1953}
1954#define A5XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
1955#define A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
1956static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
1957{
1958 return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK;
1959}
1960
1961static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
1962
1963static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
1964
1965static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; }
1966
1967static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
1968
1969static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
1293 1970
1294#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60 1971#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
1295 1972
1296#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61 1973#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
1297 1974
1298#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd 1975#define REG_A5XX_VSC_RESOLVE_CNTL 0x00000cdd
1299#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000 1976#define A5XX_VSC_RESOLVE_CNTL_WINDOW_OFFSET_DISABLE 0x80000000
1300#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff 1977#define A5XX_VSC_RESOLVE_CNTL_X__MASK 0x00007fff
1301#define A5XX_VSC_BIN_SIZE_X__SHIFT 0 1978#define A5XX_VSC_RESOLVE_CNTL_X__SHIFT 0
1302static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val) 1979static inline uint32_t A5XX_VSC_RESOLVE_CNTL_X(uint32_t val)
1303{ 1980{
1304 return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK; 1981 return ((val) << A5XX_VSC_RESOLVE_CNTL_X__SHIFT) & A5XX_VSC_RESOLVE_CNTL_X__MASK;
1305} 1982}
1306#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000 1983#define A5XX_VSC_RESOLVE_CNTL_Y__MASK 0x7fff0000
1307#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16 1984#define A5XX_VSC_RESOLVE_CNTL_Y__SHIFT 16
1308static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val) 1985static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
1309{ 1986{
1310 return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK; 1987 return ((val) << A5XX_VSC_RESOLVE_CNTL_Y__SHIFT) & A5XX_VSC_RESOLVE_CNTL_Y__MASK;
1311} 1988}
1312 1989
1313#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81 1990#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
@@ -1470,6 +2147,7 @@ static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
1470#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61 2147#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
1471 2148
1472#define REG_A5XX_VPC_MODE_CNTL 0x00000e62 2149#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
2150#define A5XX_VPC_MODE_CNTL_BINNING_PASS 0x00000001
1473 2151
1474#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64 2152#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
1475 2153
@@ -1641,6 +2319,14 @@ static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
1641 2319
1642#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c 2320#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
1643 2321
2322#define REG_A5XX_VBIF_PERF_CNT_EN0 0x000030c0
2323
2324#define REG_A5XX_VBIF_PERF_CNT_EN1 0x000030c1
2325
2326#define REG_A5XX_VBIF_PERF_CNT_EN2 0x000030c2
2327
2328#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
2329
1644#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0 2330#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
1645 2331
1646#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1 2332#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -1911,6 +2597,11 @@ static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
1911 2597
1912#define REG_A5XX_GRAS_CNTL 0x0000e005 2598#define REG_A5XX_GRAS_CNTL 0x0000e005
1913#define A5XX_GRAS_CNTL_VARYING 0x00000001 2599#define A5XX_GRAS_CNTL_VARYING 0x00000001
2600#define A5XX_GRAS_CNTL_UNK3 0x00000008
2601#define A5XX_GRAS_CNTL_XCOORD 0x00000040
2602#define A5XX_GRAS_CNTL_YCOORD 0x00000080
2603#define A5XX_GRAS_CNTL_ZCOORD 0x00000100
2604#define A5XX_GRAS_CNTL_WCOORD 0x00000200
1914 2605
1915#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006 2606#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
1916#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff 2607#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -1975,6 +2666,8 @@ static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
1975} 2666}
1976 2667
1977#define REG_A5XX_GRAS_SU_CNTL 0x0000e090 2668#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
2669#define A5XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
2670#define A5XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
1978#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004 2671#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
1979#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8 2672#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
1980#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3 2673#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
@@ -2010,7 +2703,8 @@ static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
2010#define REG_A5XX_UNKNOWN_E093 0x0000e093 2703#define REG_A5XX_UNKNOWN_E093 0x0000e093
2011 2704
2012#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094 2705#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
2013#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001 2706#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
2707#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1 0x00000002
2014 2708
2015#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095 2709#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
2016#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff 2710#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
@@ -2047,6 +2741,7 @@ static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_dep
2047#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099 2741#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
2048 2742
2049#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0 2743#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
2744#define A5XX_GRAS_SC_CNTL_BINNING_PASS 0x00000001
2050#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000 2745#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
2051 2746
2052#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1 2747#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
@@ -2161,12 +2856,21 @@ static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
2161} 2856}
2162 2857
2163#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100 2858#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
2859#define A5XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
2860#define A5XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
2861#define A5XX_GRAS_LRZ_CNTL_GREATER 0x00000004
2164 2862
2165#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101 2863#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
2166 2864
2167#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102 2865#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
2168 2866
2169#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103 2867#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
2868#define A5XX_GRAS_LRZ_BUFFER_PITCH__MASK 0xffffffff
2869#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0
2870static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val)
2871{
2872 return ((val >> 5) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK;
2873}
2170 2874
2171#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104 2875#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
2172 2876
@@ -2188,7 +2892,9 @@ static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
2188#define A5XX_RB_CNTL_BYPASS 0x00020000 2892#define A5XX_RB_CNTL_BYPASS 0x00020000
2189 2893
2190#define REG_A5XX_RB_RENDER_CNTL 0x0000e141 2894#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
2895#define A5XX_RB_RENDER_CNTL_BINNING_PASS 0x00000001
2191#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040 2896#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
2897#define A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE 0x00000080
2192#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 2898#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
2193#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000 2899#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
2194#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 2900#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
@@ -2223,6 +2929,7 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
2223 2929
2224#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144 2930#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
2225#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001 2931#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
2932#define A5XX_RB_RENDER_CONTROL0_UNK3 0x00000008
2226#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040 2933#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
2227#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080 2934#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
2228#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100 2935#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
@@ -2525,6 +3232,7 @@ static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
2525 3232
2526#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0 3233#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
2527#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 3234#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
3235#define A5XX_RB_DEPTH_PLANE_CNTL_UNK1 0x00000002
2528 3236
2529#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1 3237#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
2530#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001 3238#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
@@ -2554,7 +3262,7 @@ static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_fo
2554#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0 3262#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
2555static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val) 3263static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
2556{ 3264{
2557 return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK; 3265 return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
2558} 3266}
2559 3267
2560#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6 3268#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
@@ -2562,7 +3270,7 @@ static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
2562#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0 3270#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
2563static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val) 3271static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
2564{ 3272{
2565 return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK; 3273 return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
2566} 3274}
2567 3275
2568#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0 3276#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
@@ -2678,8 +3386,11 @@ static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
2678 return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK; 3386 return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
2679} 3387}
2680 3388
3389#define REG_A5XX_RB_SAMPLE_COUNT_CONTROL 0x0000e1d1
3390#define A5XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
3391
2681#define REG_A5XX_RB_BLIT_CNTL 0x0000e210 3392#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
2682#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000003f 3393#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000000f
2683#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0 3394#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
2684static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val) 3395static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
2685{ 3396{
@@ -2803,6 +3514,10 @@ static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
2803 return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK; 3514 return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
2804} 3515}
2805 3516
3517#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267
3518
3519#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_HI 0x0000e268
3520
2806#define REG_A5XX_VPC_CNTL_0 0x0000e280 3521#define REG_A5XX_VPC_CNTL_0 0x0000e280
2807#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f 3522#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
2808#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0 3523#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
@@ -2839,32 +3554,71 @@ static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
2839{ 3554{
2840 return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK; 3555 return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
2841} 3556}
3557#define A5XX_VPC_PACK_PSIZELOC__MASK 0x0000ff00
3558#define A5XX_VPC_PACK_PSIZELOC__SHIFT 8
3559static inline uint32_t A5XX_VPC_PACK_PSIZELOC(uint32_t val)
3560{
3561 return ((val) << A5XX_VPC_PACK_PSIZELOC__SHIFT) & A5XX_VPC_PACK_PSIZELOC__MASK;
3562}
2842 3563
2843#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0 3564#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
2844 3565
2845#define REG_A5XX_UNKNOWN_E2A1 0x0000e2a1 3566#define REG_A5XX_VPC_SO_BUF_CNTL 0x0000e2a1
3567#define A5XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
3568#define A5XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
3569#define A5XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
3570#define A5XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
3571#define A5XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
2846 3572
2847#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2 3573#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
3574#define A5XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
2848 3575
2849#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7 3576#define REG_A5XX_VPC_SO_CNTL 0x0000e2a3
3577#define A5XX_VPC_SO_CNTL_ENABLE 0x00010000
2850 3578
2851#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8 3579#define REG_A5XX_VPC_SO_PROG 0x0000e2a4
3580#define A5XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
3581#define A5XX_VPC_SO_PROG_A_BUF__SHIFT 0
3582static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val)
3583{
3584 return ((val) << A5XX_VPC_SO_PROG_A_BUF__SHIFT) & A5XX_VPC_SO_PROG_A_BUF__MASK;
3585}
3586#define A5XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
3587#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2
3588static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val)
3589{
3590 return ((val >> 2) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK;
3591}
3592#define A5XX_VPC_SO_PROG_A_EN 0x00000800
3593#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
3594#define A5XX_VPC_SO_PROG_B_BUF__SHIFT 12
3595static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val)
3596{
3597 return ((val) << A5XX_VPC_SO_PROG_B_BUF__SHIFT) & A5XX_VPC_SO_PROG_B_BUF__MASK;
3598}
3599#define A5XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
3600#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14
3601static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val)
3602{
3603 return ((val >> 2) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK;
3604}
3605#define A5XX_VPC_SO_PROG_B_EN 0x00800000
2852 3606
2853#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9 3607static inline uint32_t REG_A5XX_VPC_SO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
2854 3608
2855#define REG_A5XX_UNKNOWN_E2AB 0x0000e2ab 3609static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
2856 3610
2857#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac 3611static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000e2a8 + 0x7*i0; }
2858 3612
2859#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad 3613static inline uint32_t REG_A5XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000e2a9 + 0x7*i0; }
2860 3614
2861#define REG_A5XX_UNKNOWN_E2AE 0x0000e2ae 3615static inline uint32_t REG_A5XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000e2aa + 0x7*i0; }
2862 3616
2863#define REG_A5XX_UNKNOWN_E2B2 0x0000e2b2 3617static inline uint32_t REG_A5XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000e2ab + 0x7*i0; }
2864 3618
2865#define REG_A5XX_UNKNOWN_E2B9 0x0000e2b9 3619static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000e2ac + 0x7*i0; }
2866 3620
2867#define REG_A5XX_UNKNOWN_E2C0 0x0000e2c0 3621static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x0000e2ad + 0x7*i0; }
2868 3622
2869#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384 3623#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
2870#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f 3624#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
@@ -2873,6 +3627,7 @@ static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
2873{ 3627{
2874 return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK; 3628 return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
2875} 3629}
3630#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
2876 3631
2877#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385 3632#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
2878#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800 3633#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
@@ -2900,18 +3655,18 @@ static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
2900} 3655}
2901 3656
2902#define REG_A5XX_VFD_CONTROL_1 0x0000e401 3657#define REG_A5XX_VFD_CONTROL_1 0x0000e401
3658#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
3659#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
3660static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
3661{
3662 return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
3663}
2903#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00 3664#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
2904#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8 3665#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
2905static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val) 3666static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
2906{ 3667{
2907 return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK; 3668 return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
2908} 3669}
2909#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
2910#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
2911static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
2912{
2913 return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
2914}
2915 3670
2916#define REG_A5XX_VFD_CONTROL_2 0x0000e402 3671#define REG_A5XX_VFD_CONTROL_2 0x0000e402
2917 3672
@@ -2944,18 +3699,15 @@ static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
2944{ 3699{
2945 return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK; 3700 return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
2946} 3701}
3702#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
2947#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000 3703#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
2948#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20 3704#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
2949static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val) 3705static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
2950{ 3706{
2951 return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK; 3707 return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
2952} 3708}
2953#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0xc0000000 3709#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
2954#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 30 3710#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
2955static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
2956{
2957 return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
2958}
2959 3711
2960static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; } 3712static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
2961 3713
@@ -2979,88 +3731,107 @@ static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
2979 3731
2980#define REG_A5XX_SP_SP_CNTL 0x0000e580 3732#define REG_A5XX_SP_SP_CNTL 0x0000e580
2981 3733
2982#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584 3734#define REG_A5XX_SP_VS_CONFIG 0x0000e584
2983#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001 3735#define A5XX_SP_VS_CONFIG_ENABLED 0x00000001
2984#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 3736#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
2985#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 3737#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
2986static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 3738static inline uint32_t A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
2987{ 3739{
2988 return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 3740 return ((val) << A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
2989} 3741}
2990#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 3742#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
2991#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 3743#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
2992static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 3744static inline uint32_t A5XX_SP_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
2993{ 3745{
2994 return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK; 3746 return ((val) << A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK;
2995} 3747}
2996 3748
2997#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585 3749#define REG_A5XX_SP_FS_CONFIG 0x0000e585
2998#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001 3750#define A5XX_SP_FS_CONFIG_ENABLED 0x00000001
2999#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 3751#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3000#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 3752#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3001static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 3753static inline uint32_t A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3002{ 3754{
3003 return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 3755 return ((val) << A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
3004} 3756}
3005#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 3757#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3006#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 3758#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3007static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 3759static inline uint32_t A5XX_SP_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3008{ 3760{
3009 return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK; 3761 return ((val) << A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK;
3010} 3762}
3011 3763
3012#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586 3764#define REG_A5XX_SP_HS_CONFIG 0x0000e586
3013#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001 3765#define A5XX_SP_HS_CONFIG_ENABLED 0x00000001
3014#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 3766#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3015#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 3767#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3016static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 3768static inline uint32_t A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3017{ 3769{
3018 return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 3770 return ((val) << A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
3019} 3771}
3020#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 3772#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3021#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 3773#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3022static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 3774static inline uint32_t A5XX_SP_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3023{ 3775{
3024 return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK; 3776 return ((val) << A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK;
3025} 3777}
3026 3778
3027#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587 3779#define REG_A5XX_SP_DS_CONFIG 0x0000e587
3028#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001 3780#define A5XX_SP_DS_CONFIG_ENABLED 0x00000001
3029#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 3781#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3030#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 3782#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3031static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 3783static inline uint32_t A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3032{ 3784{
3033 return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 3785 return ((val) << A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
3034} 3786}
3035#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 3787#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3036#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 3788#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3037static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 3789static inline uint32_t A5XX_SP_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3038{ 3790{
3039 return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK; 3791 return ((val) << A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK;
3040} 3792}
3041 3793
3042#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588 3794#define REG_A5XX_SP_GS_CONFIG 0x0000e588
3043#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001 3795#define A5XX_SP_GS_CONFIG_ENABLED 0x00000001
3044#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 3796#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3045#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 3797#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3046static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 3798static inline uint32_t A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3047{ 3799{
3048 return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 3800 return ((val) << A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
3049} 3801}
3050#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 3802#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3051#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 3803#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3052static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 3804static inline uint32_t A5XX_SP_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3053{ 3805{
3054 return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK; 3806 return ((val) << A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK;
3055} 3807}
3056 3808
3057#define REG_A5XX_SP_CS_CONFIG 0x0000e589 3809#define REG_A5XX_SP_CS_CONFIG 0x0000e589
3810#define A5XX_SP_CS_CONFIG_ENABLED 0x00000001
3811#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3812#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3813static inline uint32_t A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3814{
3815 return ((val) << A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
3816}
3817#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3818#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3819static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3820{
3821 return ((val) << A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK;
3822}
3058 3823
3059#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a 3824#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
3060 3825
3061#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b 3826#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
3062 3827
3063#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590 3828#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
3829#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008
3830#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3
3831static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
3832{
3833 return ((val) << A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
3834}
3064#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 3835#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
3065#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 3836#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
3066static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) 3837static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -3075,13 +3846,19 @@ static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
3075} 3846}
3076#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000 3847#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
3077#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000 3848#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
3849#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
3850#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 25
3851static inline uint32_t A5XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
3852{
3853 return ((val) << A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
3854}
3078 3855
3079#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592 3856#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
3080#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000001f 3857#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
3081#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0 3858#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
3082static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val) 3859static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
3083{ 3860{
3084 return ((val >> 2) << A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK; 3861 return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
3085} 3862}
3086 3863
3087static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; } 3864static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
@@ -3147,6 +3924,12 @@ static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
3147#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad 3924#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
3148 3925
3149#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0 3926#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
3927#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008
3928#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3
3929static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
3930{
3931 return ((val) << A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
3932}
3150#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 3933#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
3151#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 3934#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
3152static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) 3935static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
@@ -3161,6 +3944,12 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
3161} 3944}
3162#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000 3945#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
3163#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000 3946#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
3947#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
3948#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 25
3949static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
3950{
3951 return ((val) << A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
3952}
3164 3953
3165#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2 3954#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
3166 3955
@@ -3169,6 +3958,8 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
3169#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4 3958#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
3170 3959
3171#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9 3960#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
3961#define A5XX_SP_BLEND_CNTL_ENABLED 0x00000001
3962#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
3172 3963
3173#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca 3964#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
3174#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f 3965#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -3210,15 +4001,66 @@ static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
3210{ 4001{
3211 return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK; 4002 return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
3212} 4003}
4004#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
3213 4005
3214#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db 4006#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
3215 4007
3216#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0 4008#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
4009
4010#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
4011
4012#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
4013
4014#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
4015#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
4016#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
4017static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
4018{
4019 return ((val) << A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
4020}
4021#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
4022#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
4023static inline uint32_t A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
4024{
4025 return ((val) << A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
4026}
4027#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
4028#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
4029static inline uint32_t A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
4030{
4031 return ((val) << A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
4032}
4033#define A5XX_SP_CS_CTRL_REG0_VARYING 0x00010000
4034#define A5XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00100000
4035#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
4036#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 25
4037static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
4038{
4039 return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
4040}
3217 4041
3218#define REG_A5XX_UNKNOWN_E600 0x0000e600 4042#define REG_A5XX_UNKNOWN_E600 0x0000e600
3219 4043
4044#define REG_A5XX_UNKNOWN_E602 0x0000e602
4045
4046#define REG_A5XX_SP_HS_OBJ_START_LO 0x0000e603
4047
4048#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
4049
4050#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
4051
4052#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
4053
4054#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
4055
3220#define REG_A5XX_UNKNOWN_E640 0x0000e640 4056#define REG_A5XX_UNKNOWN_E640 0x0000e640
3221 4057
4058#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
4059
4060#define REG_A5XX_SP_GS_OBJ_START_LO 0x0000e65c
4061
4062#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d
4063
3222#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704 4064#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
3223#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 4065#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
3224#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 4066#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -3236,29 +4078,85 @@ static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_sample
3236} 4078}
3237#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 4079#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
3238 4080
4081#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000e706
4082
4083#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000e707
4084
3239#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700 4085#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
3240 4086
4087#define REG_A5XX_TPL1_HS_TEX_COUNT 0x0000e701
4088
4089#define REG_A5XX_TPL1_DS_TEX_COUNT 0x0000e702
4090
4091#define REG_A5XX_TPL1_GS_TEX_COUNT 0x0000e703
4092
3241#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722 4093#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
3242 4094
3243#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723 4095#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
3244 4096
4097#define REG_A5XX_TPL1_HS_TEX_SAMP_LO 0x0000e724
4098
4099#define REG_A5XX_TPL1_HS_TEX_SAMP_HI 0x0000e725
4100
4101#define REG_A5XX_TPL1_DS_TEX_SAMP_LO 0x0000e726
4102
4103#define REG_A5XX_TPL1_DS_TEX_SAMP_HI 0x0000e727
4104
4105#define REG_A5XX_TPL1_GS_TEX_SAMP_LO 0x0000e728
4106
4107#define REG_A5XX_TPL1_GS_TEX_SAMP_HI 0x0000e729
4108
3245#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a 4109#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
3246 4110
3247#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b 4111#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
3248 4112
4113#define REG_A5XX_TPL1_HS_TEX_CONST_LO 0x0000e72c
4114
4115#define REG_A5XX_TPL1_HS_TEX_CONST_HI 0x0000e72d
4116
4117#define REG_A5XX_TPL1_DS_TEX_CONST_LO 0x0000e72e
4118
4119#define REG_A5XX_TPL1_DS_TEX_CONST_HI 0x0000e72f
4120
4121#define REG_A5XX_TPL1_GS_TEX_CONST_LO 0x0000e730
4122
4123#define REG_A5XX_TPL1_GS_TEX_CONST_HI 0x0000e731
4124
3249#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750 4125#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
3250 4126
4127#define REG_A5XX_TPL1_CS_TEX_COUNT 0x0000e751
4128
3251#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a 4129#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
3252 4130
3253#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b 4131#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
3254 4132
4133#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c
4134
4135#define REG_A5XX_TPL1_CS_TEX_SAMP_HI 0x0000e75d
4136
3255#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e 4137#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
3256 4138
3257#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f 4139#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
3258 4140
4141#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760
4142
4143#define REG_A5XX_TPL1_CS_TEX_CONST_HI 0x0000e761
4144
3259#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764 4145#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
3260 4146
3261#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784 4147#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
4148#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000001
4149#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 0
4150static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
4151{
4152 return ((val) << A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
4153}
4154#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK 0x00000004
4155#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT 2
4156static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(enum a3xx_threadsize val)
4157{
4158 return ((val) << A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK;
4159}
3262 4160
3263#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785 4161#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
3264#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f 4162#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
@@ -3300,84 +4198,98 @@ static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
3300 4198
3301#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a 4199#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
3302 4200
3303#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b 4201#define REG_A5XX_HLSQ_VS_CONFIG 0x0000e78b
3304#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001 4202#define A5XX_HLSQ_VS_CONFIG_ENABLED 0x00000001
3305#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 4203#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3306#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 4204#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3307static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 4205static inline uint32_t A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3308{ 4206{
3309 return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 4207 return ((val) << A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
3310} 4208}
3311#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 4209#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3312#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 4210#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3313static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 4211static inline uint32_t A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3314{ 4212{
3315 return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK; 4213 return ((val) << A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK;
3316} 4214}
3317 4215
3318#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c 4216#define REG_A5XX_HLSQ_FS_CONFIG 0x0000e78c
3319#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001 4217#define A5XX_HLSQ_FS_CONFIG_ENABLED 0x00000001
3320#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 4218#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3321#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 4219#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3322static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 4220static inline uint32_t A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3323{ 4221{
3324 return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 4222 return ((val) << A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
3325} 4223}
3326#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 4224#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3327#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 4225#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3328static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 4226static inline uint32_t A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3329{ 4227{
3330 return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK; 4228 return ((val) << A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK;
3331} 4229}
3332 4230
3333#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d 4231#define REG_A5XX_HLSQ_HS_CONFIG 0x0000e78d
3334#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001 4232#define A5XX_HLSQ_HS_CONFIG_ENABLED 0x00000001
3335#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 4233#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3336#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 4234#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3337static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 4235static inline uint32_t A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3338{ 4236{
3339 return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 4237 return ((val) << A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
3340} 4238}
3341#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 4239#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3342#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 4240#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3343static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 4241static inline uint32_t A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3344{ 4242{
3345 return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK; 4243 return ((val) << A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK;
3346} 4244}
3347 4245
3348#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e 4246#define REG_A5XX_HLSQ_DS_CONFIG 0x0000e78e
3349#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001 4247#define A5XX_HLSQ_DS_CONFIG_ENABLED 0x00000001
3350#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 4248#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3351#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 4249#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3352static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 4250static inline uint32_t A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3353{ 4251{
3354 return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 4252 return ((val) << A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
3355} 4253}
3356#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 4254#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3357#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 4255#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3358static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 4256static inline uint32_t A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3359{ 4257{
3360 return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK; 4258 return ((val) << A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK;
3361} 4259}
3362 4260
3363#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f 4261#define REG_A5XX_HLSQ_GS_CONFIG 0x0000e78f
3364#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001 4262#define A5XX_HLSQ_GS_CONFIG_ENABLED 0x00000001
3365#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe 4263#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
3366#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 4264#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
3367static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) 4265static inline uint32_t A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
3368{ 4266{
3369 return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 4267 return ((val) << A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
3370} 4268}
3371#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 4269#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
3372#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 4270#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
3373static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 4271static inline uint32_t A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
3374{ 4272{
3375 return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK; 4273 return ((val) << A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK;
3376} 4274}
3377 4275
3378#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790 4276#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
4277#define A5XX_HLSQ_CS_CONFIG_ENABLED 0x00000001
4278#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
4279#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
4280static inline uint32_t A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
4281{
4282 return ((val) << A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
4283}
4284#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
4285#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
4286static inline uint32_t A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
4287{
4288 return ((val) << A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK;
4289}
3379 4290
3380#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791 4291#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
4292#define A5XX_HLSQ_VS_CNTL_SSBO_ENABLE 0x00000001
3381#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe 4293#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
3382#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1 4294#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
3383static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val) 4295static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
@@ -3386,6 +4298,7 @@ static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
3386} 4298}
3387 4299
3388#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792 4300#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
4301#define A5XX_HLSQ_FS_CNTL_SSBO_ENABLE 0x00000001
3389#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe 4302#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
3390#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1 4303#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
3391static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val) 4304static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
@@ -3394,6 +4307,7 @@ static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
3394} 4307}
3395 4308
3396#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793 4309#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
4310#define A5XX_HLSQ_HS_CNTL_SSBO_ENABLE 0x00000001
3397#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe 4311#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
3398#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1 4312#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
3399static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val) 4313static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
@@ -3402,6 +4316,7 @@ static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
3402} 4316}
3403 4317
3404#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794 4318#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
4319#define A5XX_HLSQ_DS_CNTL_SSBO_ENABLE 0x00000001
3405#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe 4320#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
3406#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1 4321#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
3407static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val) 4322static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
@@ -3410,6 +4325,7 @@ static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
3410} 4325}
3411 4326
3412#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795 4327#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
4328#define A5XX_HLSQ_GS_CNTL_SSBO_ENABLE 0x00000001
3413#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe 4329#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
3414#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1 4330#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
3415static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val) 4331static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
@@ -3418,6 +4334,7 @@ static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
3418} 4334}
3419 4335
3420#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796 4336#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
4337#define A5XX_HLSQ_CS_CNTL_SSBO_ENABLE 0x00000001
3421#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe 4338#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
3422#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1 4339#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
3423static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val) 4340static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
@@ -3432,20 +4349,86 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
3432#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb 4349#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
3433 4350
3434#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0 4351#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
4352#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
4353#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
4354static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
4355{
4356 return ((val) << A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
4357}
4358#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
4359#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
4360static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
4361{
4362 return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
4363}
4364#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
4365#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
4366static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
4367{
4368 return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
4369}
4370#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
4371#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
4372static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
4373{
4374 return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
4375}
3435 4376
3436#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1 4377#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
4378#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK 0xffffffff
4379#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT 0
4380static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_SIZE_X(uint32_t val)
4381{
4382 return ((val) << A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK;
4383}
3437 4384
3438#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2 4385#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
3439 4386
3440#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3 4387#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
4388#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK 0xffffffff
4389#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT 0
4390static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y(uint32_t val)
4391{
4392 return ((val) << A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK;
4393}
3441 4394
3442#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4 4395#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
3443 4396
3444#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5 4397#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
4398#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK 0xffffffff
4399#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT 0
4400static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z(uint32_t val)
4401{
4402 return ((val) << A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK;
4403}
3445 4404
3446#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6 4405#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
3447 4406
3448#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7 4407#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
4408#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
4409#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
4410static inline uint32_t A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
4411{
4412 return ((val) << A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
4413}
4414#define A5XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
4415#define A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
4416static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
4417{
4418 return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK0__MASK;
4419}
4420#define A5XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
4421#define A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
4422static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
4423{
4424 return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK1__MASK;
4425}
4426#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
4427#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
4428static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
4429{
4430 return ((val) << A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
4431}
3449 4432
3450#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8 4433#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
3451 4434
@@ -3457,16 +4440,12 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
3457 4440
3458#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5 4441#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
3459 4442
3460#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
3461
3462#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
3463
3464#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
3465
3466#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8 4443#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
3467 4444
3468#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9 4445#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
3469 4446
4447#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
4448
3470#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd 4449#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
3471 4450
3472#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce 4451#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
@@ -3479,13 +4458,23 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
3479 4458
3480#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4 4459#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
3481 4460
4461#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
4462
4463#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
4464
3482#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9 4465#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
3483 4466
3484#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc 4467#define REG_A5XX_HLSQ_CS_CONSTLEN 0x0000e7dc
4468
4469#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
4470
4471#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
3485 4472
3486#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd 4473#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
3487 4474
3488#define REG_A5XX_RB_2D_DST_FILL 0x00002101 4475#define REG_A5XX_RB_2D_SRC_SOLID_DW2 0x00002103
4476
4477#define REG_A5XX_RB_2D_SRC_SOLID_DW3 0x00002104
3489 4478
3490#define REG_A5XX_RB_2D_SRC_INFO 0x00002107 4479#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
3491#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff 4480#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -3505,6 +4494,20 @@ static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
3505 4494
3506#define REG_A5XX_RB_2D_SRC_HI 0x00002109 4495#define REG_A5XX_RB_2D_SRC_HI 0x00002109
3507 4496
4497#define REG_A5XX_RB_2D_SRC_SIZE 0x0000210a
4498#define A5XX_RB_2D_SRC_SIZE_PITCH__MASK 0x0000ffff
4499#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0
4500static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val)
4501{
4502 return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK;
4503}
4504#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000
4505#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16
4506static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val)
4507{
4508 return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK;
4509}
4510
3508#define REG_A5XX_RB_2D_DST_INFO 0x00002110 4511#define REG_A5XX_RB_2D_DST_INFO 0x00002110
3509#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff 4512#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
3510#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 4513#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
@@ -3519,14 +4522,28 @@ static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
3519 return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK; 4522 return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
3520} 4523}
3521 4524
3522#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
3523
3524#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
3525
3526#define REG_A5XX_RB_2D_DST_LO 0x00002111 4525#define REG_A5XX_RB_2D_DST_LO 0x00002111
3527 4526
3528#define REG_A5XX_RB_2D_DST_HI 0x00002112 4527#define REG_A5XX_RB_2D_DST_HI 0x00002112
3529 4528
4529#define REG_A5XX_RB_2D_DST_SIZE 0x00002113
4530#define A5XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
4531#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
4532static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
4533{
4534 return ((val >> 6) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK;
4535}
4536#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000
4537#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16
4538static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
4539{
4540 return ((val >> 6) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK;
4541}
4542
4543#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
4544
4545#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
4546
3530#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143 4547#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
3531 4548
3532#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144 4549#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
@@ -3559,6 +4576,12 @@ static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val
3559 return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK; 4576 return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
3560} 4577}
3561 4578
4579#define REG_A5XX_UNKNOWN_2100 0x00002100
4580
4581#define REG_A5XX_UNKNOWN_2180 0x00002180
4582
4583#define REG_A5XX_UNKNOWN_2184 0x00002184
4584
3562#define REG_A5XX_TEX_SAMP_0 0x00000000 4585#define REG_A5XX_TEX_SAMP_0 0x00000000
3563#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 4586#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
3564#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 4587#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
@@ -3628,6 +4651,12 @@ static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
3628} 4651}
3629 4652
3630#define REG_A5XX_TEX_SAMP_2 0x00000002 4653#define REG_A5XX_TEX_SAMP_2 0x00000002
4654#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
4655#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
4656static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
4657{
4658 return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
4659}
3631 4660
3632#define REG_A5XX_TEX_SAMP_3 0x00000003 4661#define REG_A5XX_TEX_SAMP_3 0x00000003
3633 4662
@@ -3663,6 +4692,12 @@ static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
3663{ 4692{
3664 return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK; 4693 return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
3665} 4694}
4695#define A5XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
4696#define A5XX_TEX_CONST_0_MIPLVLS__SHIFT 16
4697static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
4698{
4699 return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
4700}
3666#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000 4701#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
3667#define A5XX_TEX_CONST_0_FMT__SHIFT 22 4702#define A5XX_TEX_CONST_0_FMT__SHIFT 22
3668static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val) 4703static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 31a9bceed32c..b4b54f1c24bc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -11,6 +11,12 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/types.h>
15#include <linux/cpumask.h>
16#include <linux/qcom_scm.h>
17#include <linux/dma-mapping.h>
18#include <linux/of_reserved_mem.h>
19#include <linux/soc/qcom/mdt_loader.h>
14#include "msm_gem.h" 20#include "msm_gem.h"
15#include "msm_mmu.h" 21#include "msm_mmu.h"
16#include "a5xx_gpu.h" 22#include "a5xx_gpu.h"
@@ -18,6 +24,62 @@
18extern bool hang_debug; 24extern bool hang_debug;
19static void a5xx_dump(struct msm_gpu *gpu); 25static void a5xx_dump(struct msm_gpu *gpu);
20 26
27#define GPU_PAS_ID 13
28
29#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
30
31static int zap_shader_load_mdt(struct device *dev, const char *fwname)
32{
33 const struct firmware *fw;
34 phys_addr_t mem_phys;
35 ssize_t mem_size;
36 void *mem_region = NULL;
37 int ret;
38
39 /* Request the MDT file for the firmware */
40 ret = request_firmware(&fw, fwname, dev);
41 if (ret) {
42 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
43 return ret;
44 }
45
46 /* Figure out how much memory we need */
47 mem_size = qcom_mdt_get_size(fw);
48 if (mem_size < 0) {
49 ret = mem_size;
50 goto out;
51 }
52
53 /* Allocate memory for the firmware image */
54 mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
55 if (!mem_region) {
56 ret = -ENOMEM;
57 goto out;
58 }
59
60 /* Load the rest of the MDT */
61 ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
62 mem_size);
63 if (ret)
64 goto out;
65
66 /* Send the image to the secure world */
67 ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
68 if (ret)
69 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
70
71out:
72 release_firmware(fw);
73
74 return ret;
75}
76#else
77static int zap_shader_load_mdt(struct device *dev, const char *fwname)
78{
79 return -ENODEV;
80}
81#endif
82
21static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 83static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
22 struct msm_file_private *ctx) 84 struct msm_file_private *ctx)
23{ 85{
@@ -225,7 +287,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
225 287
226 gpu->funcs->flush(gpu); 288 gpu->funcs->flush(gpu);
227 289
228 return gpu->funcs->idle(gpu) ? 0 : -EINVAL; 290 return a5xx_idle(gpu) ? 0 : -EINVAL;
229} 291}
230 292
231static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, 293static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
@@ -235,24 +297,21 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
235 struct drm_gem_object *bo; 297 struct drm_gem_object *bo;
236 void *ptr; 298 void *ptr;
237 299
238 mutex_lock(&drm->struct_mutex); 300 bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
239 bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
240 mutex_unlock(&drm->struct_mutex);
241
242 if (IS_ERR(bo)) 301 if (IS_ERR(bo))
243 return bo; 302 return bo;
244 303
245 ptr = msm_gem_get_vaddr(bo); 304 ptr = msm_gem_get_vaddr(bo);
246 if (!ptr) { 305 if (!ptr) {
247 drm_gem_object_unreference_unlocked(bo); 306 drm_gem_object_unreference(bo);
248 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
249 } 308 }
250 309
251 if (iova) { 310 if (iova) {
252 int ret = msm_gem_get_iova(bo, gpu->id, iova); 311 int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
253 312
254 if (ret) { 313 if (ret) {
255 drm_gem_object_unreference_unlocked(bo); 314 drm_gem_object_unreference(bo);
256 return ERR_PTR(ret); 315 return ERR_PTR(ret);
257 } 316 }
258 } 317 }
@@ -304,6 +363,98 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
304 return 0; 363 return 0;
305} 364}
306 365
366#define SCM_GPU_ZAP_SHADER_RESUME 0
367
368static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
369{
370 int ret;
371
372 ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
373 if (ret)
374 DRM_ERROR("%s: zap-shader resume failed: %d\n",
375 gpu->name, ret);
376
377 return ret;
378}
379
380/* Set up a child device to "own" the zap shader */
381static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
382{
383 struct device_node *node;
384 int ret;
385
386 if (dev->parent)
387 return 0;
388
389 /* Find the sub-node for the zap shader */
390 node = of_get_child_by_name(parent->of_node, "zap-shader");
391 if (!node) {
392 DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
393 return -ENODEV;
394 }
395
396 dev->parent = parent;
397 dev->of_node = node;
398 dev_set_name(dev, "adreno_zap_shader");
399
400 ret = device_register(dev);
401 if (ret) {
402 DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
403 goto out;
404 }
405
406 ret = of_reserved_mem_device_init(dev);
407 if (ret) {
408 DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
409 device_unregister(dev);
410 }
411
412out:
413 if (ret)
414 dev->parent = NULL;
415
416 return ret;
417}
418
419static int a5xx_zap_shader_init(struct msm_gpu *gpu)
420{
421 static bool loaded;
422 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
423 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
424 struct platform_device *pdev = a5xx_gpu->pdev;
425 int ret;
426
427 /*
428 * If the zap shader is already loaded into memory we just need to kick
429 * the remote processor to reinitialize it
430 */
431 if (loaded)
432 return a5xx_zap_shader_resume(gpu);
433
434 /* We need SCM to be able to load the firmware */
435 if (!qcom_scm_is_available()) {
436 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
437 return -EPROBE_DEFER;
438 }
439
440 /* Each GPU has a target specific zap shader firmware name to use */
441 if (!adreno_gpu->info->zapfw) {
442 DRM_DEV_ERROR(&pdev->dev,
443 "Zap shader firmware file not specified for this target\n");
444 return -ENODEV;
445 }
446
447 ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
448
449 if (!ret)
450 ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
451 adreno_gpu->info->zapfw);
452
453 loaded = !ret;
454
455 return ret;
456}
457
307#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ 458#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
308 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ 459 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
309 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ 460 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
@@ -484,12 +635,31 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
484 OUT_RING(gpu->rb, 0x0F); 635 OUT_RING(gpu->rb, 0x0F);
485 636
486 gpu->funcs->flush(gpu); 637 gpu->funcs->flush(gpu);
487 if (!gpu->funcs->idle(gpu)) 638 if (!a5xx_idle(gpu))
488 return -EINVAL; 639 return -EINVAL;
489 } 640 }
490 641
491 /* Put the GPU into unsecure mode */ 642 /*
492 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); 643 * Try to load a zap shader into the secure world. If successful
644 * we can use the CP to switch out of secure mode. If not then we
645 * have no resource but to try to switch ourselves out manually. If we
646 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
647 * be blocked and a permissions violation will soon follow.
648 */
649 ret = a5xx_zap_shader_init(gpu);
650 if (!ret) {
651 OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
652 OUT_RING(gpu->rb, 0x00000000);
653
654 gpu->funcs->flush(gpu);
655 if (!a5xx_idle(gpu))
656 return -EINVAL;
657 } else {
658 /* Print a warning so if we die, we know why */
659 dev_warn_once(gpu->dev->dev,
660 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
661 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
662 }
493 663
494 return 0; 664 return 0;
495} 665}
@@ -521,21 +691,24 @@ static void a5xx_destroy(struct msm_gpu *gpu)
521 691
522 DBG("%s", gpu->name); 692 DBG("%s", gpu->name);
523 693
694 if (a5xx_gpu->zap_dev.parent)
695 device_unregister(&a5xx_gpu->zap_dev);
696
524 if (a5xx_gpu->pm4_bo) { 697 if (a5xx_gpu->pm4_bo) {
525 if (a5xx_gpu->pm4_iova) 698 if (a5xx_gpu->pm4_iova)
526 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id); 699 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
527 drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); 700 drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
528 } 701 }
529 702
530 if (a5xx_gpu->pfp_bo) { 703 if (a5xx_gpu->pfp_bo) {
531 if (a5xx_gpu->pfp_iova) 704 if (a5xx_gpu->pfp_iova)
532 msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id); 705 msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
533 drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); 706 drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
534 } 707 }
535 708
536 if (a5xx_gpu->gpmu_bo) { 709 if (a5xx_gpu->gpmu_bo) {
537 if (a5xx_gpu->gpmu_iova) 710 if (a5xx_gpu->gpmu_iova)
538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); 711 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); 712 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
540 } 713 }
541 714
@@ -556,7 +729,7 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
556 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); 729 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
557} 730}
558 731
559static bool a5xx_idle(struct msm_gpu *gpu) 732bool a5xx_idle(struct msm_gpu *gpu)
560{ 733{
561 /* wait for CP to drain ringbuffer: */ 734 /* wait for CP to drain ringbuffer: */
562 if (!adreno_idle(gpu)) 735 if (!adreno_idle(gpu))
@@ -861,7 +1034,6 @@ static const struct adreno_gpu_funcs funcs = {
861 .last_fence = adreno_last_fence, 1034 .last_fence = adreno_last_fence,
862 .submit = a5xx_submit, 1035 .submit = a5xx_submit,
863 .flush = adreno_flush, 1036 .flush = adreno_flush,
864 .idle = a5xx_idle,
865 .irq = a5xx_irq, 1037 .irq = a5xx_irq,
866 .destroy = a5xx_destroy, 1038 .destroy = a5xx_destroy,
867#ifdef CONFIG_DEBUG_FS 1039#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 1590f845d554..6638bc85645d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -36,6 +36,8 @@ struct a5xx_gpu {
36 uint32_t gpmu_dwords; 36 uint32_t gpmu_dwords;
37 37
38 uint32_t lm_leakage; 38 uint32_t lm_leakage;
39
40 struct device zap_dev;
39}; 41};
40 42
41#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) 43#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -56,5 +58,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
56 return -ETIMEDOUT; 58 return -ETIMEDOUT;
57} 59}
58 60
61bool a5xx_idle(struct msm_gpu *gpu);
59 62
60#endif /* __A5XX_GPU_H__ */ 63#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 72d52c71f769..87af6eea0483 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -194,7 +194,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
194 194
195 gpu->funcs->flush(gpu); 195 gpu->funcs->flush(gpu);
196 196
197 if (!gpu->funcs->idle(gpu)) { 197 if (!a5xx_idle(gpu)) {
198 DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", 198 DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
199 gpu->name); 199 gpu->name);
200 return -EINVAL; 200 return -EINVAL;
@@ -294,14 +294,12 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
294 */ 294 */
295 bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; 295 bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
296 296
297 mutex_lock(&drm->struct_mutex); 297 a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
298 a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
299 mutex_unlock(&drm->struct_mutex);
300
301 if (IS_ERR(a5xx_gpu->gpmu_bo)) 298 if (IS_ERR(a5xx_gpu->gpmu_bo))
302 goto err; 299 goto err;
303 300
304 if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova)) 301 if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
302 &a5xx_gpu->gpmu_iova))
305 goto err; 303 goto err;
306 304
307 ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); 305 ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
@@ -330,9 +328,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
330 328
331err: 329err:
332 if (a5xx_gpu->gpmu_iova) 330 if (a5xx_gpu->gpmu_iova)
333 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); 331 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
334 if (a5xx_gpu->gpmu_bo) 332 if (a5xx_gpu->gpmu_bo)
335 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); 333 drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
336 334
337 a5xx_gpu->gpmu_bo = NULL; 335 a5xx_gpu->gpmu_bo = NULL;
338 a5xx_gpu->gpmu_iova = 0; 336 a5xx_gpu->gpmu_iova = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 4a33ba6f1244..b634cf71352b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) 18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) 19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
20 20
21Copyright (C) 2013-2016 by the following authors: 21Copyright (C) 2013-2017 by the following authors:
22- Rob Clark <robdclark@gmail.com> (robclark) 22- Rob Clark <robdclark@gmail.com> (robclark)
23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
24 24
@@ -421,6 +421,35 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
421#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b 421#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
422 422
423#define REG_AXXX_CP_STAT 0x0000047f 423#define REG_AXXX_CP_STAT 0x0000047f
424#define AXXX_CP_STAT_CP_BUSY 0x80000000
425#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY 0x40000000
426#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY 0x20000000
427#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY 0x10000000
428#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY 0x08000000
429#define AXXX_CP_STAT_ME_BUSY 0x04000000
430#define AXXX_CP_STAT_MIU_WR_C_BUSY 0x02000000
431#define AXXX_CP_STAT_CP_3D_BUSY 0x00800000
432#define AXXX_CP_STAT_CP_NRT_BUSY 0x00400000
433#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY 0x00200000
434#define AXXX_CP_STAT_RCIU_ME_BUSY 0x00100000
435#define AXXX_CP_STAT_RCIU_PFP_BUSY 0x00080000
436#define AXXX_CP_STAT_MEQ_RING_BUSY 0x00040000
437#define AXXX_CP_STAT_PFP_BUSY 0x00020000
438#define AXXX_CP_STAT_ST_QUEUE_BUSY 0x00010000
439#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY 0x00002000
440#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY 0x00001000
441#define AXXX_CP_STAT_RING_QUEUE_BUSY 0x00000800
442#define AXXX_CP_STAT_CSF_BUSY 0x00000400
443#define AXXX_CP_STAT_CSF_ST_BUSY 0x00000200
444#define AXXX_CP_STAT_EVENT_BUSY 0x00000100
445#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY 0x00000080
446#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY 0x00000040
447#define AXXX_CP_STAT_CSF_RING_BUSY 0x00000020
448#define AXXX_CP_STAT_RCIU_BUSY 0x00000010
449#define AXXX_CP_STAT_RBIU_BUSY 0x00000008
450#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY 0x00000004
451#define AXXX_CP_STAT_MIU_RD_REQ_BUSY 0x00000002
452#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001
424 453
425#define REG_AXXX_CP_SCRATCH_REG0 0x00000578 454#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
426 455
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index c0fa5d1c75ff..c75c4df4bc39 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -86,6 +86,7 @@ static const struct adreno_info gpulist[] = {
86 ADRENO_QUIRK_FAULT_DETECT_MASK, 86 ADRENO_QUIRK_FAULT_DETECT_MASK,
87 .init = a5xx_gpu_init, 87 .init = a5xx_gpu_init,
88 .gpmufw = "a530v3_gpmu.fw2", 88 .gpmufw = "a530v3_gpmu.fw2",
89 .zapfw = "a530_zap.mdt",
89 }, 90 },
90}; 91};
91 92
@@ -158,7 +159,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
158 int ret; 159 int ret;
159 160
160 pm_runtime_get_sync(&pdev->dev); 161 pm_runtime_get_sync(&pdev->dev);
162 mutex_lock(&dev->struct_mutex);
161 ret = msm_gpu_hw_init(gpu); 163 ret = msm_gpu_hw_init(gpu);
164 mutex_unlock(&dev->struct_mutex);
162 pm_runtime_put_sync(&pdev->dev); 165 pm_runtime_put_sync(&pdev->dev);
163 if (ret) { 166 if (ret) {
164 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 167 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 5b63fc649dcc..f1ab2703674a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
64 64
65 DBG("%s", gpu->name); 65 DBG("%s", gpu->name);
66 66
67 ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); 67 ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
68 if (ret) { 68 if (ret) {
69 gpu->rb_iova = 0; 69 gpu->rb_iova = 0;
70 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); 70 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -77,7 +77,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
77 /* reset completed fence seqno: */ 77 /* reset completed fence seqno: */
78 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; 78 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
79 adreno_gpu->memptrs->rptr = 0; 79 adreno_gpu->memptrs->rptr = 0;
80 adreno_gpu->memptrs->wptr = 0;
81 80
82 /* Setup REG_CP_RB_CNTL: */ 81 /* Setup REG_CP_RB_CNTL: */
83 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, 82 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
@@ -258,7 +257,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
258 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, 257 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
259 gpu->fctx->last_fence); 258 gpu->fctx->last_fence);
260 seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); 259 seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
261 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
262 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); 260 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
263 261
264 /* dump these out in a form that can be parsed by demsm: */ 262 /* dump these out in a form that can be parsed by demsm: */
@@ -294,7 +292,6 @@ void adreno_dump_info(struct msm_gpu *gpu)
294 printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, 292 printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
295 gpu->fctx->last_fence); 293 gpu->fctx->last_fence);
296 printk("rptr: %d\n", get_rptr(adreno_gpu)); 294 printk("rptr: %d\n", get_rptr(adreno_gpu));
297 printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
298 printk("rb wptr: %d\n", get_wptr(gpu->rb)); 295 printk("rb wptr: %d\n", get_wptr(gpu->rb));
299} 296}
300 297
@@ -342,6 +339,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
342 struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) 339 struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
343{ 340{
344 struct adreno_platform_config *config = pdev->dev.platform_data; 341 struct adreno_platform_config *config = pdev->dev.platform_data;
342 struct msm_gpu_config adreno_gpu_config = { 0 };
345 struct msm_gpu *gpu = &adreno_gpu->base; 343 struct msm_gpu *gpu = &adreno_gpu->base;
346 int ret; 344 int ret;
347 345
@@ -360,9 +358,16 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
360 DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u", 358 DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
361 gpu->fast_rate, gpu->bus_freq); 359 gpu->fast_rate, gpu->bus_freq);
362 360
361 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
362 adreno_gpu_config.irqname = "kgsl_3d0_irq";
363
364 adreno_gpu_config.va_start = SZ_16M;
365 adreno_gpu_config.va_end = 0xffffffff;
366
367 adreno_gpu_config.ringsz = RB_SIZE;
368
363 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, 369 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
364 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", 370 adreno_gpu->info->name, &adreno_gpu_config);
365 RB_SIZE);
366 if (ret) 371 if (ret)
367 return ret; 372 return ret;
368 373
@@ -392,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
392 return ret; 397 return ret;
393 } 398 }
394 399
395 mutex_lock(&drm->struct_mutex);
396 adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), 400 adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
397 MSM_BO_UNCACHED); 401 MSM_BO_UNCACHED);
398 mutex_unlock(&drm->struct_mutex);
399 if (IS_ERR(adreno_gpu->memptrs_bo)) { 402 if (IS_ERR(adreno_gpu->memptrs_bo)) {
400 ret = PTR_ERR(adreno_gpu->memptrs_bo); 403 ret = PTR_ERR(adreno_gpu->memptrs_bo);
401 adreno_gpu->memptrs_bo = NULL; 404 adreno_gpu->memptrs_bo = NULL;
@@ -409,7 +412,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
409 return -ENOMEM; 412 return -ENOMEM;
410 } 413 }
411 414
412 ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, 415 ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
413 &adreno_gpu->memptrs_iova); 416 &adreno_gpu->memptrs_iova);
414 if (ret) { 417 if (ret) {
415 dev_err(drm->dev, "could not map memptrs: %d\n", ret); 418 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -428,7 +431,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
428 msm_gem_put_vaddr(adreno_gpu->memptrs_bo); 431 msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
429 432
430 if (adreno_gpu->memptrs_iova) 433 if (adreno_gpu->memptrs_iova)
431 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); 434 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
432 435
433 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); 436 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
434 } 437 }
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index fb4831f9f80b..4d9165f29f43 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -77,6 +77,7 @@ struct adreno_info {
77 uint32_t gmem; 77 uint32_t gmem;
78 enum adreno_quirks quirks; 78 enum adreno_quirks quirks;
79 struct msm_gpu *(*init)(struct drm_device *dev); 79 struct msm_gpu *(*init)(struct drm_device *dev);
80 const char *zapfw;
80}; 81};
81 82
82const struct adreno_info *adreno_info(struct adreno_rev rev); 83const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -86,7 +87,6 @@ const struct adreno_info *adreno_info(struct adreno_rev rev);
86 87
87struct adreno_rbmemptrs { 88struct adreno_rbmemptrs {
88 volatile uint32_t rptr; 89 volatile uint32_t rptr;
89 volatile uint32_t wptr;
90 volatile uint32_t fence; 90 volatile uint32_t fence;
91}; 91};
92 92
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6a2930e75503..fb605a3534cf 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) 18- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) 19- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
20 20
21Copyright (C) 2013-2016 by the following authors: 21Copyright (C) 2013-2017 by the following authors:
22- Rob Clark <robdclark@gmail.com> (robclark) 22- Rob Clark <robdclark@gmail.com> (robclark)
23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 23- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
24 24
@@ -67,10 +67,18 @@ enum vgt_event_type {
67 PERFCOUNTER_STOP = 24, 67 PERFCOUNTER_STOP = 24,
68 VS_FETCH_DONE = 27, 68 VS_FETCH_DONE = 27,
69 FACENESS_FLUSH = 28, 69 FACENESS_FLUSH = 28,
70 FLUSH_SO_0 = 17,
71 FLUSH_SO_1 = 18,
72 FLUSH_SO_2 = 19,
73 FLUSH_SO_3 = 20,
74 UNK_19 = 25,
70 UNK_1C = 28, 75 UNK_1C = 28,
71 UNK_1D = 29, 76 UNK_1D = 29,
72 BLIT = 30, 77 BLIT = 30,
73 UNK_26 = 38, 78 UNK_25 = 37,
79 LRZ_FLUSH = 38,
80 UNK_2C = 44,
81 UNK_2D = 45,
74}; 82};
75 83
76enum pc_di_primtype { 84enum pc_di_primtype {
@@ -134,11 +142,13 @@ enum adreno_pm4_type3_packets {
134 CP_WAIT_IB_PFD_COMPLETE = 93, 142 CP_WAIT_IB_PFD_COMPLETE = 93,
135 CP_REG_RMW = 33, 143 CP_REG_RMW = 33,
136 CP_SET_BIN_DATA = 47, 144 CP_SET_BIN_DATA = 47,
145 CP_SET_BIN_DATA5 = 47,
137 CP_REG_TO_MEM = 62, 146 CP_REG_TO_MEM = 62,
138 CP_MEM_WRITE = 61, 147 CP_MEM_WRITE = 61,
139 CP_MEM_WRITE_CNTR = 79, 148 CP_MEM_WRITE_CNTR = 79,
140 CP_COND_EXEC = 68, 149 CP_COND_EXEC = 68,
141 CP_COND_WRITE = 69, 150 CP_COND_WRITE = 69,
151 CP_COND_WRITE5 = 69,
142 CP_EVENT_WRITE = 70, 152 CP_EVENT_WRITE = 70,
143 CP_EVENT_WRITE_SHD = 88, 153 CP_EVENT_WRITE_SHD = 88,
144 CP_EVENT_WRITE_CFL = 89, 154 CP_EVENT_WRITE_CFL = 89,
@@ -165,6 +175,7 @@ enum adreno_pm4_type3_packets {
165 CP_SET_PROTECTED_MODE = 95, 175 CP_SET_PROTECTED_MODE = 95,
166 CP_BOOTSTRAP_UCODE = 111, 176 CP_BOOTSTRAP_UCODE = 111,
167 CP_LOAD_STATE = 48, 177 CP_LOAD_STATE = 48,
178 CP_LOAD_STATE4 = 48,
168 CP_COND_INDIRECT_BUFFER_PFE = 58, 179 CP_COND_INDIRECT_BUFFER_PFE = 58,
169 CP_COND_INDIRECT_BUFFER_PFD = 50, 180 CP_COND_INDIRECT_BUFFER_PFD = 50,
170 CP_INDIRECT_BUFFER_PFE = 63, 181 CP_INDIRECT_BUFFER_PFE = 63,
@@ -204,6 +215,7 @@ enum adreno_pm4_type3_packets {
204 CP_COMPUTE_CHECKPOINT = 110, 215 CP_COMPUTE_CHECKPOINT = 110,
205 CP_MEM_TO_MEM = 115, 216 CP_MEM_TO_MEM = 115,
206 CP_BLIT = 44, 217 CP_BLIT = 44,
218 CP_UNK_39 = 57,
207 IN_IB_PREFETCH_END = 23, 219 IN_IB_PREFETCH_END = 23,
208 IN_SUBBLK_PREFETCH = 31, 220 IN_SUBBLK_PREFETCH = 31,
209 IN_INSTR_PREFETCH = 32, 221 IN_INSTR_PREFETCH = 32,
@@ -239,21 +251,61 @@ enum adreno_state_src {
239 SS_INDIRECT_STM = 6, 251 SS_INDIRECT_STM = 6,
240}; 252};
241 253
254enum a4xx_state_block {
255 SB4_VS_TEX = 0,
256 SB4_HS_TEX = 1,
257 SB4_DS_TEX = 2,
258 SB4_GS_TEX = 3,
259 SB4_FS_TEX = 4,
260 SB4_CS_TEX = 5,
261 SB4_VS_SHADER = 8,
262 SB4_HS_SHADER = 9,
263 SB4_DS_SHADER = 10,
264 SB4_GS_SHADER = 11,
265 SB4_FS_SHADER = 12,
266 SB4_CS_SHADER = 13,
267 SB4_SSBO = 14,
268 SB4_CS_SSBO = 15,
269};
270
271enum a4xx_state_type {
272 ST4_SHADER = 0,
273 ST4_CONSTANTS = 1,
274};
275
276enum a4xx_state_src {
277 SS4_DIRECT = 0,
278 SS4_INDIRECT = 2,
279};
280
242enum a4xx_index_size { 281enum a4xx_index_size {
243 INDEX4_SIZE_8_BIT = 0, 282 INDEX4_SIZE_8_BIT = 0,
244 INDEX4_SIZE_16_BIT = 1, 283 INDEX4_SIZE_16_BIT = 1,
245 INDEX4_SIZE_32_BIT = 2, 284 INDEX4_SIZE_32_BIT = 2,
246}; 285};
247 286
287enum cp_cond_function {
288 WRITE_ALWAYS = 0,
289 WRITE_LT = 1,
290 WRITE_LE = 2,
291 WRITE_EQ = 3,
292 WRITE_NE = 4,
293 WRITE_GE = 5,
294 WRITE_GT = 6,
295};
296
248enum render_mode_cmd { 297enum render_mode_cmd {
249 BYPASS = 1, 298 BYPASS = 1,
299 BINNING = 2,
250 GMEM = 3, 300 GMEM = 3,
251 BLIT2D = 5, 301 BLIT2D = 5,
302 BLIT2DSCALE = 7,
252}; 303};
253 304
254enum cp_blit_cmd { 305enum cp_blit_cmd {
255 BLIT_OP_FILL = 0, 306 BLIT_OP_FILL = 0,
256 BLIT_OP_BLIT = 1, 307 BLIT_OP_COPY = 1,
308 BLIT_OP_SCALE = 3,
257}; 309};
258 310
259#define REG_CP_LOAD_STATE_0 0x00000000 311#define REG_CP_LOAD_STATE_0 0x00000000
@@ -296,12 +348,52 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
296 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; 348 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
297} 349}
298 350
299#define REG_CP_LOAD_STATE_2 0x00000002 351#define REG_CP_LOAD_STATE4_0 0x00000000
300#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK 0xffffffff 352#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x0000ffff
301#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT 0 353#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
302static inline uint32_t CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(uint32_t val) 354static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
355{
356 return ((val) << CP_LOAD_STATE4_0_DST_OFF__SHIFT) & CP_LOAD_STATE4_0_DST_OFF__MASK;
357}
358#define CP_LOAD_STATE4_0_STATE_SRC__MASK 0x00030000
359#define CP_LOAD_STATE4_0_STATE_SRC__SHIFT 16
360static inline uint32_t CP_LOAD_STATE4_0_STATE_SRC(enum a4xx_state_src val)
361{
362 return ((val) << CP_LOAD_STATE4_0_STATE_SRC__SHIFT) & CP_LOAD_STATE4_0_STATE_SRC__MASK;
363}
364#define CP_LOAD_STATE4_0_STATE_BLOCK__MASK 0x003c0000
365#define CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT 18
366static inline uint32_t CP_LOAD_STATE4_0_STATE_BLOCK(enum a4xx_state_block val)
367{
368 return ((val) << CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE4_0_STATE_BLOCK__MASK;
369}
370#define CP_LOAD_STATE4_0_NUM_UNIT__MASK 0xffc00000
371#define CP_LOAD_STATE4_0_NUM_UNIT__SHIFT 22
372static inline uint32_t CP_LOAD_STATE4_0_NUM_UNIT(uint32_t val)
373{
374 return ((val) << CP_LOAD_STATE4_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE4_0_NUM_UNIT__MASK;
375}
376
377#define REG_CP_LOAD_STATE4_1 0x00000001
378#define CP_LOAD_STATE4_1_STATE_TYPE__MASK 0x00000003
379#define CP_LOAD_STATE4_1_STATE_TYPE__SHIFT 0
380static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val)
381{
382 return ((val) << CP_LOAD_STATE4_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE4_1_STATE_TYPE__MASK;
383}
384#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK 0xfffffffc
385#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2
386static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val)
387{
388 return ((val >> 2) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK;
389}
390
391#define REG_CP_LOAD_STATE4_2 0x00000002
392#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
393#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT 0
394static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
303{ 395{
304 return ((val) << CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK; 396 return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
305} 397}
306 398
307#define REG_CP_DRAW_INDX_0 0x00000000 399#define REG_CP_DRAW_INDX_0 0x00000000
@@ -570,6 +662,52 @@ static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
570 return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK; 662 return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
571} 663}
572 664
665#define REG_CP_SET_BIN_DATA5_0 0x00000000
666#define CP_SET_BIN_DATA5_0_VSC_SIZE__MASK 0x003f0000
667#define CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT 16
668static inline uint32_t CP_SET_BIN_DATA5_0_VSC_SIZE(uint32_t val)
669{
670 return ((val) << CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_0_VSC_SIZE__MASK;
671}
672#define CP_SET_BIN_DATA5_0_VSC_N__MASK 0x07c00000
673#define CP_SET_BIN_DATA5_0_VSC_N__SHIFT 22
674static inline uint32_t CP_SET_BIN_DATA5_0_VSC_N(uint32_t val)
675{
676 return ((val) << CP_SET_BIN_DATA5_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_0_VSC_N__MASK;
677}
678
679#define REG_CP_SET_BIN_DATA5_1 0x00000001
680#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK 0xffffffff
681#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT 0
682static inline uint32_t CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO(uint32_t val)
683{
684 return ((val) << CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT) & CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK;
685}
686
687#define REG_CP_SET_BIN_DATA5_2 0x00000002
688#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK 0xffffffff
689#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT 0
690static inline uint32_t CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI(uint32_t val)
691{
692 return ((val) << CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT) & CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK;
693}
694
695#define REG_CP_SET_BIN_DATA5_3 0x00000003
696#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK 0xffffffff
697#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT 0
698static inline uint32_t CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO(uint32_t val)
699{
700 return ((val) << CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK;
701}
702
703#define REG_CP_SET_BIN_DATA5_4 0x00000004
704#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK 0xffffffff
705#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT 0
706static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
707{
708 return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
709}
710
573#define REG_CP_REG_TO_MEM_0 0x00000000 711#define REG_CP_REG_TO_MEM_0 0x00000000
574#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff 712#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
575#define CP_REG_TO_MEM_0_REG__SHIFT 0 713#define CP_REG_TO_MEM_0_REG__SHIFT 0
@@ -594,6 +732,128 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
594 return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK; 732 return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
595} 733}
596 734
735#define REG_CP_MEM_TO_MEM_0 0x00000000
736#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
737#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
738#define CP_MEM_TO_MEM_0_NEG_C 0x00000004
739#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000
740
741#define REG_CP_COND_WRITE_0 0x00000000
742#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007
743#define CP_COND_WRITE_0_FUNCTION__SHIFT 0
744static inline uint32_t CP_COND_WRITE_0_FUNCTION(enum cp_cond_function val)
745{
746 return ((val) << CP_COND_WRITE_0_FUNCTION__SHIFT) & CP_COND_WRITE_0_FUNCTION__MASK;
747}
748#define CP_COND_WRITE_0_POLL_MEMORY 0x00000010
749#define CP_COND_WRITE_0_WRITE_MEMORY 0x00000100
750
751#define REG_CP_COND_WRITE_1 0x00000001
752#define CP_COND_WRITE_1_POLL_ADDR__MASK 0xffffffff
753#define CP_COND_WRITE_1_POLL_ADDR__SHIFT 0
754static inline uint32_t CP_COND_WRITE_1_POLL_ADDR(uint32_t val)
755{
756 return ((val) << CP_COND_WRITE_1_POLL_ADDR__SHIFT) & CP_COND_WRITE_1_POLL_ADDR__MASK;
757}
758
759#define REG_CP_COND_WRITE_2 0x00000002
760#define CP_COND_WRITE_2_REF__MASK 0xffffffff
761#define CP_COND_WRITE_2_REF__SHIFT 0
762static inline uint32_t CP_COND_WRITE_2_REF(uint32_t val)
763{
764 return ((val) << CP_COND_WRITE_2_REF__SHIFT) & CP_COND_WRITE_2_REF__MASK;
765}
766
767#define REG_CP_COND_WRITE_3 0x00000003
768#define CP_COND_WRITE_3_MASK__MASK 0xffffffff
769#define CP_COND_WRITE_3_MASK__SHIFT 0
770static inline uint32_t CP_COND_WRITE_3_MASK(uint32_t val)
771{
772 return ((val) << CP_COND_WRITE_3_MASK__SHIFT) & CP_COND_WRITE_3_MASK__MASK;
773}
774
775#define REG_CP_COND_WRITE_4 0x00000004
776#define CP_COND_WRITE_4_WRITE_ADDR__MASK 0xffffffff
777#define CP_COND_WRITE_4_WRITE_ADDR__SHIFT 0
778static inline uint32_t CP_COND_WRITE_4_WRITE_ADDR(uint32_t val)
779{
780 return ((val) << CP_COND_WRITE_4_WRITE_ADDR__SHIFT) & CP_COND_WRITE_4_WRITE_ADDR__MASK;
781}
782
783#define REG_CP_COND_WRITE_5 0x00000005
784#define CP_COND_WRITE_5_WRITE_DATA__MASK 0xffffffff
785#define CP_COND_WRITE_5_WRITE_DATA__SHIFT 0
786static inline uint32_t CP_COND_WRITE_5_WRITE_DATA(uint32_t val)
787{
788 return ((val) << CP_COND_WRITE_5_WRITE_DATA__SHIFT) & CP_COND_WRITE_5_WRITE_DATA__MASK;
789}
790
791#define REG_CP_COND_WRITE5_0 0x00000000
792#define CP_COND_WRITE5_0_FUNCTION__MASK 0x00000007
793#define CP_COND_WRITE5_0_FUNCTION__SHIFT 0
794static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
795{
796 return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
797}
798#define CP_COND_WRITE5_0_POLL_MEMORY 0x00000010
799#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100
800
801#define REG_CP_COND_WRITE5_1 0x00000001
802#define CP_COND_WRITE5_1_POLL_ADDR_LO__MASK 0xffffffff
803#define CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT 0
804static inline uint32_t CP_COND_WRITE5_1_POLL_ADDR_LO(uint32_t val)
805{
806 return ((val) << CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT) & CP_COND_WRITE5_1_POLL_ADDR_LO__MASK;
807}
808
809#define REG_CP_COND_WRITE5_2 0x00000002
810#define CP_COND_WRITE5_2_POLL_ADDR_HI__MASK 0xffffffff
811#define CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT 0
812static inline uint32_t CP_COND_WRITE5_2_POLL_ADDR_HI(uint32_t val)
813{
814 return ((val) << CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT) & CP_COND_WRITE5_2_POLL_ADDR_HI__MASK;
815}
816
817#define REG_CP_COND_WRITE5_3 0x00000003
818#define CP_COND_WRITE5_3_REF__MASK 0xffffffff
819#define CP_COND_WRITE5_3_REF__SHIFT 0
820static inline uint32_t CP_COND_WRITE5_3_REF(uint32_t val)
821{
822 return ((val) << CP_COND_WRITE5_3_REF__SHIFT) & CP_COND_WRITE5_3_REF__MASK;
823}
824
825#define REG_CP_COND_WRITE5_4 0x00000004
826#define CP_COND_WRITE5_4_MASK__MASK 0xffffffff
827#define CP_COND_WRITE5_4_MASK__SHIFT 0
828static inline uint32_t CP_COND_WRITE5_4_MASK(uint32_t val)
829{
830 return ((val) << CP_COND_WRITE5_4_MASK__SHIFT) & CP_COND_WRITE5_4_MASK__MASK;
831}
832
833#define REG_CP_COND_WRITE5_5 0x00000005
834#define CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK 0xffffffff
835#define CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT 0
836static inline uint32_t CP_COND_WRITE5_5_WRITE_ADDR_LO(uint32_t val)
837{
838 return ((val) << CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT) & CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK;
839}
840
841#define REG_CP_COND_WRITE5_6 0x00000006
842#define CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK 0xffffffff
843#define CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT 0
844static inline uint32_t CP_COND_WRITE5_6_WRITE_ADDR_HI(uint32_t val)
845{
846 return ((val) << CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT) & CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK;
847}
848
849#define REG_CP_COND_WRITE5_7 0x00000007
850#define CP_COND_WRITE5_7_WRITE_DATA__MASK 0xffffffff
851#define CP_COND_WRITE5_7_WRITE_DATA__SHIFT 0
852static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val)
853{
854 return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK;
855}
856
597#define REG_CP_DISPATCH_COMPUTE_0 0x00000000 857#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
598 858
599#define REG_CP_DISPATCH_COMPUTE_1 0x00000001 859#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
@@ -645,6 +905,7 @@ static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
645} 905}
646 906
647#define REG_CP_SET_RENDER_MODE_3 0x00000003 907#define REG_CP_SET_RENDER_MODE_3 0x00000003
908#define CP_SET_RENDER_MODE_3_VSC_ENABLE 0x00000008
648#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010 909#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
649 910
650#define REG_CP_SET_RENDER_MODE_4 0x00000004 911#define REG_CP_SET_RENDER_MODE_4 0x00000004
@@ -673,6 +934,50 @@ static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
673 return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK; 934 return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
674} 935}
675 936
937#define REG_CP_COMPUTE_CHECKPOINT_0 0x00000000
938#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK 0xffffffff
939#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT 0
940static inline uint32_t CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO(uint32_t val)
941{
942 return ((val) << CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK;
943}
944
945#define REG_CP_COMPUTE_CHECKPOINT_1 0x00000001
946#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK 0xffffffff
947#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT 0
948static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
949{
950 return ((val) << CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK;
951}
952
953#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
954
955#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
956
957#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
958#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
959#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
960static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
961{
962 return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
963}
964
965#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
966#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
967#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
968static inline uint32_t CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO(uint32_t val)
969{
970 return ((val) << CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK;
971}
972
973#define REG_CP_COMPUTE_CHECKPOINT_6 0x00000006
974#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK 0xffffffff
975#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT 0
976static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
977{
978 return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
979}
980
676#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000 981#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
677 982
678#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001 983#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
@@ -698,6 +1003,7 @@ static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
698{ 1003{
699 return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK; 1004 return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
700} 1005}
1006#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000
701 1007
702#define REG_CP_EVENT_WRITE_1 0x00000001 1008#define REG_CP_EVENT_WRITE_1 0x00000001
703#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff 1009#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
@@ -781,5 +1087,31 @@ static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
781 return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK; 1087 return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
782} 1088}
783 1089
1090#define REG_CP_EXEC_CS_0 0x00000000
1091
1092#define REG_CP_EXEC_CS_1 0x00000001
1093#define CP_EXEC_CS_1_NGROUPS_X__MASK 0xffffffff
1094#define CP_EXEC_CS_1_NGROUPS_X__SHIFT 0
1095static inline uint32_t CP_EXEC_CS_1_NGROUPS_X(uint32_t val)
1096{
1097 return ((val) << CP_EXEC_CS_1_NGROUPS_X__SHIFT) & CP_EXEC_CS_1_NGROUPS_X__MASK;
1098}
1099
1100#define REG_CP_EXEC_CS_2 0x00000002
1101#define CP_EXEC_CS_2_NGROUPS_Y__MASK 0xffffffff
1102#define CP_EXEC_CS_2_NGROUPS_Y__SHIFT 0
1103static inline uint32_t CP_EXEC_CS_2_NGROUPS_Y(uint32_t val)
1104{
1105 return ((val) << CP_EXEC_CS_2_NGROUPS_Y__SHIFT) & CP_EXEC_CS_2_NGROUPS_Y__MASK;
1106}
1107
1108#define REG_CP_EXEC_CS_3 0x00000003
1109#define CP_EXEC_CS_3_NGROUPS_Z__MASK 0xffffffff
1110#define CP_EXEC_CS_3_NGROUPS_Z__SHIFT 0
1111static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
1112{
1113 return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
1114}
1115
784 1116
785#endif /* ADRENO_PM4_XML */ 1117#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index b3d70ea42891..479086ccf180 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,8 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
13 22
14Copyright (C) 2013-2017 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
15- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f97a7803a02d..9e9c5696bc03 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -32,6 +32,7 @@
32#include "dsi.xml.h" 32#include "dsi.xml.h"
33#include "sfpb.xml.h" 33#include "sfpb.xml.h"
34#include "dsi_cfg.h" 34#include "dsi_cfg.h"
35#include "msm_kms.h"
35 36
36static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 37static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
37{ 38{
@@ -975,23 +976,23 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
975static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) 976static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
976{ 977{
977 struct drm_device *dev = msm_host->dev; 978 struct drm_device *dev = msm_host->dev;
979 struct msm_drm_private *priv = dev->dev_private;
978 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 980 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
979 int ret; 981 int ret;
980 uint64_t iova; 982 uint64_t iova;
981 983
982 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 984 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
983 mutex_lock(&dev->struct_mutex);
984 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); 985 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
985 if (IS_ERR(msm_host->tx_gem_obj)) { 986 if (IS_ERR(msm_host->tx_gem_obj)) {
986 ret = PTR_ERR(msm_host->tx_gem_obj); 987 ret = PTR_ERR(msm_host->tx_gem_obj);
987 pr_err("%s: failed to allocate gem, %d\n", 988 pr_err("%s: failed to allocate gem, %d\n",
988 __func__, ret); 989 __func__, ret);
989 msm_host->tx_gem_obj = NULL; 990 msm_host->tx_gem_obj = NULL;
990 mutex_unlock(&dev->struct_mutex);
991 return ret; 991 return ret;
992 } 992 }
993 993
994 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); 994 ret = msm_gem_get_iova(msm_host->tx_gem_obj,
995 priv->kms->aspace, &iova);
995 mutex_unlock(&dev->struct_mutex); 996 mutex_unlock(&dev->struct_mutex);
996 if (ret) { 997 if (ret) {
997 pr_err("%s: failed to get iova, %d\n", __func__, ret); 998 pr_err("%s: failed to get iova, %d\n", __func__, ret);
@@ -1141,12 +1142,15 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1141static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1142static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1142{ 1143{
1143 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1144 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1145 struct drm_device *dev = msm_host->dev;
1146 struct msm_drm_private *priv = dev->dev_private;
1144 int ret; 1147 int ret;
1145 uint64_t dma_base; 1148 uint64_t dma_base;
1146 bool triggered; 1149 bool triggered;
1147 1150
1148 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 1151 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1149 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base); 1152 ret = msm_gem_get_iova(msm_host->tx_gem_obj,
1153 priv->kms->aspace, &dma_base);
1150 if (ret) { 1154 if (ret) {
1151 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1155 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1152 return ret; 1156 return ret;
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 8b9f3ebaeba7..57cf7fa7f1c4 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 3fcbb30dc241..9d4d1feaefd7 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index d7bf3232dc88..f150d4a47707 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 0a97ff75ed6f..ecebf8b623ab 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2016 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
@@ -111,6 +111,32 @@ static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
111#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040 111#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
112#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080 112#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
113 113
114#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
115#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK 0x0000003f
116#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT 0
117static inline uint32_t HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(uint32_t val)
118{
119 return ((val) << HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
120}
121#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK 0x00003f00
122#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT 8
123static inline uint32_t HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE(uint32_t val)
124{
125 return ((val) << HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
126}
127#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK 0x003f0000
128#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT 16
129static inline uint32_t HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE(uint32_t val)
130{
131 return ((val) << HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK;
132}
133#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK 0x3f000000
134#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT 24
135static inline uint32_t HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE(uint32_t val)
136{
137 return ((val) << HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK;
138}
139
114#define REG_HDMI_GEN_PKT_CTRL 0x00000034 140#define REG_HDMI_GEN_PKT_CTRL 0x00000034
115#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001 141#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
116#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002 142#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
@@ -463,7 +489,7 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
463#define REG_HDMI_CEC_RD_FILTER 0x000002b0 489#define REG_HDMI_CEC_RD_FILTER 0x000002b0
464 490
465#define REG_HDMI_ACTIVE_HSYNC 0x000002b4 491#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
466#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff 492#define HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
467#define HDMI_ACTIVE_HSYNC_START__SHIFT 0 493#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
468static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val) 494static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
469{ 495{
@@ -477,13 +503,13 @@ static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
477} 503}
478 504
479#define REG_HDMI_ACTIVE_VSYNC 0x000002b8 505#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
480#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff 506#define HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
481#define HDMI_ACTIVE_VSYNC_START__SHIFT 0 507#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
482static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val) 508static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
483{ 509{
484 return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK; 510 return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
485} 511}
486#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000 512#define HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
487#define HDMI_ACTIVE_VSYNC_END__SHIFT 16 513#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
488static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val) 514static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
489{ 515{
@@ -491,13 +517,13 @@ static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
491} 517}
492 518
493#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc 519#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
494#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff 520#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
495#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0 521#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
496static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val) 522static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
497{ 523{
498 return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK; 524 return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
499} 525}
500#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000 526#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
501#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16 527#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
502static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val) 528static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
503{ 529{
@@ -505,13 +531,13 @@ static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
505} 531}
506 532
507#define REG_HDMI_TOTAL 0x000002c0 533#define REG_HDMI_TOTAL 0x000002c0
508#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff 534#define HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
509#define HDMI_TOTAL_H_TOTAL__SHIFT 0 535#define HDMI_TOTAL_H_TOTAL__SHIFT 0
510static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val) 536static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
511{ 537{
512 return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK; 538 return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
513} 539}
514#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000 540#define HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
515#define HDMI_TOTAL_V_TOTAL__SHIFT 16 541#define HDMI_TOTAL_V_TOTAL__SHIFT 16
516static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val) 542static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
517{ 543{
@@ -519,7 +545,7 @@ static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
519} 545}
520 546
521#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4 547#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
522#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff 548#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
523#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0 549#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
524static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) 550static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
525{ 551{
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 8177e8511afd..9c34b91ae329 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -175,10 +175,10 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
175 /* configure infoframe: */ 175 /* configure infoframe: */
176 hdmi_audio_infoframe_pack(info, buf, sizeof(buf)); 176 hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
177 hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0, 177 hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
178 (buf[3] << 0) || (buf[4] << 8) || 178 (buf[3] << 0) | (buf[4] << 8) |
179 (buf[5] << 16) || (buf[6] << 24)); 179 (buf[5] << 16) | (buf[6] << 24));
180 hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1, 180 hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
181 (buf[7] << 0) || (buf[8] << 8)); 181 (buf[7] << 0) | (buf[8] << 8));
182 182
183 hdmi_write(hdmi, REG_HDMI_GC, 0); 183 hdmi_write(hdmi, REG_HDMI_GC, 0);
184 184
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 4e6d1bf27474..ae40e7179d4f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -86,6 +86,65 @@ static void power_off(struct drm_bridge *bridge)
86 } 86 }
87} 87}
88 88
89#define AVI_IFRAME_LINE_NUMBER 1
90
91static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
92{
93 struct drm_crtc *crtc = hdmi->encoder->crtc;
94 const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
95 union hdmi_infoframe frame;
96 u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
97 u32 val;
98 int len;
99
100 drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
101
102 len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
103 if (len < 0) {
104 dev_err(&hdmi->pdev->dev,
105 "failed to configure avi infoframe\n");
106 return;
107 }
108
109 /*
110 * the AVI_INFOx registers don't map exactly to how the AVI infoframes
111 * are packed according to the spec. The checksum from the header is
112 * written to the LSB byte of AVI_INFO0 and the version is written to
113 * the third byte from the LSB of AVI_INFO3
114 */
115 hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
116 buffer[3] |
117 buffer[4] << 8 |
118 buffer[5] << 16 |
119 buffer[6] << 24);
120
121 hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
122 buffer[7] |
123 buffer[8] << 8 |
124 buffer[9] << 16 |
125 buffer[10] << 24);
126
127 hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
128 buffer[11] |
129 buffer[12] << 8 |
130 buffer[13] << 16 |
131 buffer[14] << 24);
132
133 hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
134 buffer[15] |
135 buffer[16] << 8 |
136 buffer[1] << 24);
137
138 hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
139 HDMI_INFOFRAME_CTRL0_AVI_SEND |
140 HDMI_INFOFRAME_CTRL0_AVI_CONT);
141
142 val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
143 val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
144 val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
145 hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
146}
147
89static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge) 148static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
90{ 149{
91 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 150 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -98,7 +157,10 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
98 msm_hdmi_phy_resource_enable(phy); 157 msm_hdmi_phy_resource_enable(phy);
99 msm_hdmi_power_on(bridge); 158 msm_hdmi_power_on(bridge);
100 hdmi->power_on = true; 159 hdmi->power_on = true;
101 msm_hdmi_audio_update(hdmi); 160 if (hdmi->hdmi_mode) {
161 msm_hdmi_config_avi_infoframe(hdmi);
162 msm_hdmi_audio_update(hdmi);
163 }
102 } 164 }
103 165
104 msm_hdmi_phy_powerup(phy, hdmi->pixclock); 166 msm_hdmi_phy_powerup(phy, hdmi->pixclock);
@@ -134,7 +196,8 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
134 if (hdmi->power_on) { 196 if (hdmi->power_on) {
135 power_off(bridge); 197 power_off(bridge);
136 hdmi->power_on = false; 198 hdmi->power_on = false;
137 msm_hdmi_audio_update(hdmi); 199 if (hdmi->hdmi_mode)
200 msm_hdmi_audio_update(hdmi);
138 msm_hdmi_phy_resource_disable(phy); 201 msm_hdmi_phy_resource_disable(phy);
139 } 202 }
140} 203}
@@ -196,7 +259,8 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
196 DBG("frame_ctrl=%08x", frame_ctrl); 259 DBG("frame_ctrl=%08x", frame_ctrl);
197 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); 260 hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
198 261
199 msm_hdmi_audio_update(hdmi); 262 if (hdmi->hdmi_mode)
263 msm_hdmi_audio_update(hdmi);
200} 264}
201 265
202static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = { 266static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 143eab46ba68..1fb7645cc721 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -670,6 +670,11 @@ static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
670 670
671static void hdmi_8996_pll_unprepare(struct clk_hw *hw) 671static void hdmi_8996_pll_unprepare(struct clk_hw *hw)
672{ 672{
673 struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
674 struct hdmi_phy *phy = pll_get_phy(pll);
675
676 hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x6);
677 usleep_range(100, 150);
673} 678}
674 679
675static int hdmi_8996_pll_is_enabled(struct clk_hw *hw) 680static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 1b996ede7a65..da646deedf4b 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 88037889589b..576cea30d391 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 698e514203c6..615e1def64d9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -126,8 +126,9 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
126 struct mdp4_crtc *mdp4_crtc = 126 struct mdp4_crtc *mdp4_crtc =
127 container_of(work, struct mdp4_crtc, unref_cursor_work); 127 container_of(work, struct mdp4_crtc, unref_cursor_work);
128 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); 128 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
129 struct msm_kms *kms = &mdp4_kms->base.base;
129 130
130 msm_gem_put_iova(val, mdp4_kms->id); 131 msm_gem_put_iova(val, kms->aspace);
131 drm_gem_object_unreference_unlocked(val); 132 drm_gem_object_unreference_unlocked(val);
132} 133}
133 134
@@ -360,6 +361,7 @@ static void update_cursor(struct drm_crtc *crtc)
360{ 361{
361 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 362 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
362 struct mdp4_kms *mdp4_kms = get_kms(crtc); 363 struct mdp4_kms *mdp4_kms = get_kms(crtc);
364 struct msm_kms *kms = &mdp4_kms->base.base;
363 enum mdp4_dma dma = mdp4_crtc->dma; 365 enum mdp4_dma dma = mdp4_crtc->dma;
364 unsigned long flags; 366 unsigned long flags;
365 367
@@ -372,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
372 if (next_bo) { 374 if (next_bo) {
373 /* take a obj ref + iova ref when we start scanning out: */ 375 /* take a obj ref + iova ref when we start scanning out: */
374 drm_gem_object_reference(next_bo); 376 drm_gem_object_reference(next_bo);
375 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova); 377 msm_gem_get_iova(next_bo, kms->aspace, &iova);
376 378
377 /* enable cursor: */ 379 /* enable cursor: */
378 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), 380 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -409,6 +411,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
409{ 411{
410 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 412 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
411 struct mdp4_kms *mdp4_kms = get_kms(crtc); 413 struct mdp4_kms *mdp4_kms = get_kms(crtc);
414 struct msm_kms *kms = &mdp4_kms->base.base;
412 struct drm_device *dev = crtc->dev; 415 struct drm_device *dev = crtc->dev;
413 struct drm_gem_object *cursor_bo, *old_bo; 416 struct drm_gem_object *cursor_bo, *old_bo;
414 unsigned long flags; 417 unsigned long flags;
@@ -429,7 +432,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
429 } 432 }
430 433
431 if (cursor_bo) { 434 if (cursor_bo) {
432 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova); 435 ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
433 if (ret) 436 if (ret)
434 goto fail; 437 goto fail;
435 } else { 438 } else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 3d26d7774c08..bcd1f5cac72c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -160,10 +160,10 @@ static void mdp4_destroy(struct msm_kms *kms)
160{ 160{
161 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 161 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
162 struct device *dev = mdp4_kms->dev->dev; 162 struct device *dev = mdp4_kms->dev->dev;
163 struct msm_gem_address_space *aspace = mdp4_kms->aspace; 163 struct msm_gem_address_space *aspace = kms->aspace;
164 164
165 if (mdp4_kms->blank_cursor_iova) 165 if (mdp4_kms->blank_cursor_iova)
166 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); 166 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
167 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); 167 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
168 168
169 if (aspace) { 169 if (aspace) {
@@ -510,7 +510,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
510 goto fail; 510 goto fail;
511 } 511 }
512 512
513 mdp4_kms->aspace = aspace; 513 kms->aspace = aspace;
514 514
515 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 515 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
516 ARRAY_SIZE(iommu_ports)); 516 ARRAY_SIZE(iommu_ports));
@@ -522,22 +522,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
522 aspace = NULL; 522 aspace = NULL;
523 } 523 }
524 524
525 mdp4_kms->id = msm_register_address_space(dev, aspace);
526 if (mdp4_kms->id < 0) {
527 ret = mdp4_kms->id;
528 dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
529 goto fail;
530 }
531
532 ret = modeset_init(mdp4_kms); 525 ret = modeset_init(mdp4_kms);
533 if (ret) { 526 if (ret) {
534 dev_err(dev->dev, "modeset_init failed: %d\n", ret); 527 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
535 goto fail; 528 goto fail;
536 } 529 }
537 530
538 mutex_lock(&dev->struct_mutex);
539 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); 531 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
540 mutex_unlock(&dev->struct_mutex);
541 if (IS_ERR(mdp4_kms->blank_cursor_bo)) { 532 if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
542 ret = PTR_ERR(mdp4_kms->blank_cursor_bo); 533 ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
543 dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); 534 dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
@@ -545,7 +536,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
545 goto fail; 536 goto fail;
546 } 537 }
547 538
548 ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, 539 ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
549 &mdp4_kms->blank_cursor_iova); 540 &mdp4_kms->blank_cursor_iova);
550 if (ret) { 541 if (ret) {
551 dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); 542 dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c413779d488a..940de51ac5cd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -34,9 +34,6 @@ struct mdp4_kms {
34 34
35 int rev; 35 int rev;
36 36
37 /* mapper-id used to request GEM buffer mapped for scanout: */
38 int id;
39
40 void __iomem *mmio; 37 void __iomem *mmio;
41 38
42 struct regulator *vdd; 39 struct regulator *vdd;
@@ -45,7 +42,6 @@ struct mdp4_kms {
45 struct clk *pclk; 42 struct clk *pclk;
46 struct clk *lut_clk; 43 struct clk *lut_clk;
47 struct clk *axi_clk; 44 struct clk *axi_clk;
48 struct msm_gem_address_space *aspace;
49 45
50 struct mdp_irq error_handler; 46 struct mdp_irq error_handler;
51 47
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 53619d07677e..a20e3d644523 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -103,13 +103,14 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
103{ 103{
104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
105 struct mdp4_kms *mdp4_kms = get_kms(plane); 105 struct mdp4_kms *mdp4_kms = get_kms(plane);
106 struct msm_kms *kms = &mdp4_kms->base.base;
106 struct drm_framebuffer *fb = new_state->fb; 107 struct drm_framebuffer *fb = new_state->fb;
107 108
108 if (!fb) 109 if (!fb)
109 return 0; 110 return 0;
110 111
111 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); 112 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
112 return msm_framebuffer_prepare(fb, mdp4_kms->id); 113 return msm_framebuffer_prepare(fb, kms->aspace);
113} 114}
114 115
115static void mdp4_plane_cleanup_fb(struct drm_plane *plane, 116static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -117,13 +118,14 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
117{ 118{
118 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 119 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
119 struct mdp4_kms *mdp4_kms = get_kms(plane); 120 struct mdp4_kms *mdp4_kms = get_kms(plane);
121 struct msm_kms *kms = &mdp4_kms->base.base;
120 struct drm_framebuffer *fb = old_state->fb; 122 struct drm_framebuffer *fb = old_state->fb;
121 123
122 if (!fb) 124 if (!fb)
123 return; 125 return;
124 126
125 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); 127 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
126 msm_framebuffer_cleanup(fb, mdp4_kms->id); 128 msm_framebuffer_cleanup(fb, kms->aspace);
127} 129}
128 130
129 131
@@ -161,6 +163,7 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
161{ 163{
162 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 164 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
163 struct mdp4_kms *mdp4_kms = get_kms(plane); 165 struct mdp4_kms *mdp4_kms = get_kms(plane);
166 struct msm_kms *kms = &mdp4_kms->base.base;
164 enum mdp4_pipe pipe = mdp4_plane->pipe; 167 enum mdp4_pipe pipe = mdp4_plane->pipe;
165 168
166 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), 169 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
@@ -172,13 +175,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
172 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 175 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
173 176
174 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), 177 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
175 msm_framebuffer_iova(fb, mdp4_kms->id, 0)); 178 msm_framebuffer_iova(fb, kms->aspace, 0));
176 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), 179 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
177 msm_framebuffer_iova(fb, mdp4_kms->id, 1)); 180 msm_framebuffer_iova(fb, kms->aspace, 1));
178 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), 181 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
179 msm_framebuffer_iova(fb, mdp4_kms->id, 2)); 182 msm_framebuffer_iova(fb, kms->aspace, 2));
180 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), 183 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
181 msm_framebuffer_iova(fb, mdp4_kms->id, 3)); 184 msm_framebuffer_iova(fb, kms->aspace, 3));
182 185
183 plane->fb = fb; 186 plane->fb = fb;
184} 187}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index e6dfc518d4db..d9c10e02ee41 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-01-11 05:19:19) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
14 22
15Copyright (C) 2013-2017 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
16- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0764a6498110..cb5415d6c04b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,9 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
160 struct mdp5_crtc *mdp5_crtc = 160 struct mdp5_crtc *mdp5_crtc =
161 container_of(work, struct mdp5_crtc, unref_cursor_work); 161 container_of(work, struct mdp5_crtc, unref_cursor_work);
162 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); 162 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
163 struct msm_kms *kms = &mdp5_kms->base.base;
163 164
164 msm_gem_put_iova(val, mdp5_kms->id); 165 msm_gem_put_iova(val, kms->aspace);
165 drm_gem_object_unreference_unlocked(val); 166 drm_gem_object_unreference_unlocked(val);
166} 167}
167 168
@@ -724,6 +725,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
724 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 725 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
725 struct drm_device *dev = crtc->dev; 726 struct drm_device *dev = crtc->dev;
726 struct mdp5_kms *mdp5_kms = get_kms(crtc); 727 struct mdp5_kms *mdp5_kms = get_kms(crtc);
728 struct msm_kms *kms = &mdp5_kms->base.base;
727 struct drm_gem_object *cursor_bo, *old_bo = NULL; 729 struct drm_gem_object *cursor_bo, *old_bo = NULL;
728 uint32_t blendcfg, stride; 730 uint32_t blendcfg, stride;
729 uint64_t cursor_addr; 731 uint64_t cursor_addr;
@@ -758,7 +760,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
758 if (!cursor_bo) 760 if (!cursor_bo)
759 return -ENOENT; 761 return -ENOENT;
760 762
761 ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr); 763 ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
762 if (ret) 764 if (ret)
763 return -EINVAL; 765 return -EINVAL;
764 766
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e2b3346ead48..5d13fa5381ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -163,7 +163,7 @@ static void mdp5_set_encoder_mode(struct msm_kms *kms,
163static void mdp5_kms_destroy(struct msm_kms *kms) 163static void mdp5_kms_destroy(struct msm_kms *kms)
164{ 164{
165 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 165 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
166 struct msm_gem_address_space *aspace = mdp5_kms->aspace; 166 struct msm_gem_address_space *aspace = kms->aspace;
167 int i; 167 int i;
168 168
169 for (i = 0; i < mdp5_kms->num_hwmixers; i++) 169 for (i = 0; i < mdp5_kms->num_hwmixers; i++)
@@ -663,7 +663,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
663 goto fail; 663 goto fail;
664 } 664 }
665 665
666 mdp5_kms->aspace = aspace; 666 kms->aspace = aspace;
667 667
668 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 668 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
669 ARRAY_SIZE(iommu_ports)); 669 ARRAY_SIZE(iommu_ports));
@@ -678,13 +678,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
678 aspace = NULL;; 678 aspace = NULL;;
679 } 679 }
680 680
681 mdp5_kms->id = msm_register_address_space(dev, aspace);
682 if (mdp5_kms->id < 0) {
683 ret = mdp5_kms->id;
684 dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
685 goto fail;
686 }
687
688 ret = modeset_init(mdp5_kms); 681 ret = modeset_init(mdp5_kms);
689 if (ret) { 682 if (ret) {
690 dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); 683 dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 8bdb7ee4983b..17caa0e8c8ae 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -55,10 +55,6 @@ struct mdp5_kms {
55 struct mdp5_state *state; 55 struct mdp5_state *state;
56 struct drm_modeset_lock state_lock; 56 struct drm_modeset_lock state_lock;
57 57
58 /* mapper-id used to request GEM buffer mapped for scanout: */
59 int id;
60 struct msm_gem_address_space *aspace;
61
62 struct mdp5_smp *smp; 58 struct mdp5_smp *smp;
63 struct mdp5_ctl_manager *ctlm; 59 struct mdp5_ctl_manager *ctlm;
64 60
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index abaaac7fea1a..fe3a4de1a433 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -22,8 +22,6 @@
22struct mdp5_plane { 22struct mdp5_plane {
23 struct drm_plane base; 23 struct drm_plane base;
24 24
25 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
26
27 uint32_t nformats; 25 uint32_t nformats;
28 uint32_t formats[32]; 26 uint32_t formats[32];
29}; 27};
@@ -274,26 +272,28 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
274 struct drm_plane_state *new_state) 272 struct drm_plane_state *new_state)
275{ 273{
276 struct mdp5_kms *mdp5_kms = get_kms(plane); 274 struct mdp5_kms *mdp5_kms = get_kms(plane);
275 struct msm_kms *kms = &mdp5_kms->base.base;
277 struct drm_framebuffer *fb = new_state->fb; 276 struct drm_framebuffer *fb = new_state->fb;
278 277
279 if (!new_state->fb) 278 if (!new_state->fb)
280 return 0; 279 return 0;
281 280
282 DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); 281 DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
283 return msm_framebuffer_prepare(fb, mdp5_kms->id); 282 return msm_framebuffer_prepare(fb, kms->aspace);
284} 283}
285 284
286static void mdp5_plane_cleanup_fb(struct drm_plane *plane, 285static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
287 struct drm_plane_state *old_state) 286 struct drm_plane_state *old_state)
288{ 287{
289 struct mdp5_kms *mdp5_kms = get_kms(plane); 288 struct mdp5_kms *mdp5_kms = get_kms(plane);
289 struct msm_kms *kms = &mdp5_kms->base.base;
290 struct drm_framebuffer *fb = old_state->fb; 290 struct drm_framebuffer *fb = old_state->fb;
291 291
292 if (!fb) 292 if (!fb)
293 return; 293 return;
294 294
295 DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); 295 DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
296 msm_framebuffer_cleanup(fb, mdp5_kms->id); 296 msm_framebuffer_cleanup(fb, kms->aspace);
297} 297}
298 298
299#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) 299#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
@@ -500,6 +500,8 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
500 enum mdp5_pipe pipe, 500 enum mdp5_pipe pipe,
501 struct drm_framebuffer *fb) 501 struct drm_framebuffer *fb)
502{ 502{
503 struct msm_kms *kms = &mdp5_kms->base.base;
504
503 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 505 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
504 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 506 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
505 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); 507 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -509,13 +511,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
509 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 511 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
510 512
511 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), 513 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
512 msm_framebuffer_iova(fb, mdp5_kms->id, 0)); 514 msm_framebuffer_iova(fb, kms->aspace, 0));
513 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), 515 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
514 msm_framebuffer_iova(fb, mdp5_kms->id, 1)); 516 msm_framebuffer_iova(fb, kms->aspace, 1));
515 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), 517 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
516 msm_framebuffer_iova(fb, mdp5_kms->id, 2)); 518 msm_framebuffer_iova(fb, kms->aspace, 2));
517 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), 519 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
518 msm_framebuffer_iova(fb, mdp5_kms->id, 3)); 520 msm_framebuffer_iova(fb, kms->aspace, 3));
519} 521}
520 522
521/* Note: mdp5_plane->pipe_lock must be locked */ 523/* Note: mdp5_plane->pipe_lock must be locked */
@@ -881,7 +883,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
881 struct drm_crtc *crtc, struct drm_framebuffer *fb, 883 struct drm_crtc *crtc, struct drm_framebuffer *fb,
882 struct drm_rect *src, struct drm_rect *dest) 884 struct drm_rect *src, struct drm_rect *dest)
883{ 885{
884 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
885 struct drm_plane_state *pstate = plane->state; 886 struct drm_plane_state *pstate = plane->state;
886 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; 887 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
887 struct mdp5_kms *mdp5_kms = get_kms(plane); 888 struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -902,7 +903,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
902 uint32_t src_img_w, src_img_h; 903 uint32_t src_img_w, src_img_h;
903 uint32_t src_x_r; 904 uint32_t src_x_r;
904 int crtc_x_r; 905 int crtc_x_r;
905 unsigned long flags;
906 int ret; 906 int ret;
907 907
908 nplanes = fb->format->num_planes; 908 nplanes = fb->format->num_planes;
@@ -981,8 +981,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
981 hflip = !!(rotation & DRM_MODE_REFLECT_X); 981 hflip = !!(rotation & DRM_MODE_REFLECT_X);
982 vflip = !!(rotation & DRM_MODE_REFLECT_Y); 982 vflip = !!(rotation & DRM_MODE_REFLECT_Y);
983 983
984 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
985
986 mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe, 984 mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
987 config, hdecm, vdecm, hflip, vflip, 985 config, hdecm, vdecm, hflip, vflip,
988 crtc_x, crtc_y, crtc_w, crtc_h, 986 crtc_x, crtc_y, crtc_w, crtc_h,
@@ -995,8 +993,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
995 src_img_w, src_img_h, 993 src_img_w, src_img_h,
996 src_x_r, src_y, src_w, src_h); 994 src_x_r, src_y, src_w, src_h);
997 995
998 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
999
1000 plane->fb = fb; 996 plane->fb = fb;
1001 997
1002 return ret; 998 return ret;
@@ -1139,8 +1135,6 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
1139 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, 1135 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
1140 ARRAY_SIZE(mdp5_plane->formats), false); 1136 ARRAY_SIZE(mdp5_plane->formats), false);
1141 1137
1142 spin_lock_init(&mdp5_plane->pipe_lock);
1143
1144 if (type == DRM_PLANE_TYPE_CURSOR) 1138 if (type == DRM_PLANE_TYPE_CURSOR)
1145 ret = drm_universal_plane_init(dev, plane, 0xff, 1139 ret = drm_universal_plane_init(dev, plane, 0xff,
1146 &mdp5_cursor_plane_funcs, 1140 &mdp5_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 8994c365e218..1494c407be44 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2017 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 26
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9d498eb81906..f49f6ac5585c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -51,19 +51,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
51 .atomic_state_free = msm_atomic_state_free, 51 .atomic_state_free = msm_atomic_state_free,
52}; 52};
53 53
54int msm_register_address_space(struct drm_device *dev,
55 struct msm_gem_address_space *aspace)
56{
57 struct msm_drm_private *priv = dev->dev_private;
58
59 if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
60 return -EINVAL;
61
62 priv->aspace[priv->num_aspaces] = aspace;
63
64 return priv->num_aspaces++;
65}
66
67#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 54#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
68static bool reglog = false; 55static bool reglog = false;
69MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 56MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -349,6 +336,7 @@ static int msm_init_vram(struct drm_device *dev)
349 priv->vram.size = size; 336 priv->vram.size = size;
350 337
351 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 338 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
339 spin_lock_init(&priv->vram.lock);
352 340
353 attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 341 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
354 attrs |= DMA_ATTR_WRITE_COMBINE; 342 attrs |= DMA_ATTR_WRITE_COMBINE;
@@ -699,6 +687,17 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
699 return ret; 687 return ret;
700} 688}
701 689
690static int msm_ioctl_gem_info_iova(struct drm_device *dev,
691 struct drm_gem_object *obj, uint64_t *iova)
692{
693 struct msm_drm_private *priv = dev->dev_private;
694
695 if (!priv->gpu)
696 return -EINVAL;
697
698 return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
699}
700
702static int msm_ioctl_gem_info(struct drm_device *dev, void *data, 701static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
703 struct drm_file *file) 702 struct drm_file *file)
704{ 703{
@@ -706,14 +705,22 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
706 struct drm_gem_object *obj; 705 struct drm_gem_object *obj;
707 int ret = 0; 706 int ret = 0;
708 707
709 if (args->pad) 708 if (args->flags & ~MSM_INFO_FLAGS)
710 return -EINVAL; 709 return -EINVAL;
711 710
712 obj = drm_gem_object_lookup(file, args->handle); 711 obj = drm_gem_object_lookup(file, args->handle);
713 if (!obj) 712 if (!obj)
714 return -ENOENT; 713 return -ENOENT;
715 714
716 args->offset = msm_gem_mmap_offset(obj); 715 if (args->flags & MSM_INFO_IOVA) {
716 uint64_t iova;
717
718 ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
719 if (!ret)
720 args->offset = iova;
721 } else {
722 args->offset = msm_gem_mmap_offset(obj);
723 }
717 724
718 drm_gem_object_unreference_unlocked(obj); 725 drm_gem_object_unreference_unlocked(obj);
719 726
@@ -842,7 +849,7 @@ static struct drm_driver msm_driver = {
842 .debugfs_init = msm_debugfs_init, 849 .debugfs_init = msm_debugfs_init,
843#endif 850#endif
844 .ioctls = msm_ioctls, 851 .ioctls = msm_ioctls,
845 .num_ioctls = DRM_MSM_NUM_IOCTLS, 852 .num_ioctls = ARRAY_SIZE(msm_ioctls),
846 .fops = &fops, 853 .fops = &fops,
847 .name = "msm", 854 .name = "msm",
848 .desc = "MSM Snapdragon DRM", 855 .desc = "MSM Snapdragon DRM",
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 1b26ca626528..fc8d24f7c084 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -123,14 +123,6 @@ struct msm_drm_private {
123 uint32_t pending_crtcs; 123 uint32_t pending_crtcs;
124 wait_queue_head_t pending_crtcs_event; 124 wait_queue_head_t pending_crtcs_event;
125 125
126 /* Registered address spaces.. currently this is fixed per # of
127 * iommu's. Ie. one for display block and one for gpu block.
128 * Eventually, to do per-process gpu pagetables, we'll want one
129 * of these per-process.
130 */
131 unsigned int num_aspaces;
132 struct msm_gem_address_space *aspace[NUM_DOMAINS];
133
134 unsigned int num_planes; 126 unsigned int num_planes;
135 struct drm_plane *planes[16]; 127 struct drm_plane *planes[16];
136 128
@@ -157,6 +149,7 @@ struct msm_drm_private {
157 * and position mm_node->start is in # of pages: 149 * and position mm_node->start is in # of pages:
158 */ 150 */
159 struct drm_mm mm; 151 struct drm_mm mm;
152 spinlock_t lock; /* Protects drm_mm node allocation/removal */
160 } vram; 153 } vram;
161 154
162 struct notifier_block vmap_notifier; 155 struct notifier_block vmap_notifier;
@@ -183,9 +176,6 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
183void msm_atomic_state_clear(struct drm_atomic_state *state); 176void msm_atomic_state_clear(struct drm_atomic_state *state);
184void msm_atomic_state_free(struct drm_atomic_state *state); 177void msm_atomic_state_free(struct drm_atomic_state *state);
185 178
186int msm_register_address_space(struct drm_device *dev,
187 struct msm_gem_address_space *aspace);
188
189void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 179void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
190 struct msm_gem_vma *vma, struct sg_table *sgt); 180 struct msm_gem_vma *vma, struct sg_table *sgt);
191int msm_gem_map_vma(struct msm_gem_address_space *aspace, 181int msm_gem_map_vma(struct msm_gem_address_space *aspace,
@@ -209,13 +199,14 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
209int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 199int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
210int msm_gem_fault(struct vm_fault *vmf); 200int msm_gem_fault(struct vm_fault *vmf);
211uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 201uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
212int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 202int msm_gem_get_iova(struct drm_gem_object *obj,
213 uint64_t *iova); 203 struct msm_gem_address_space *aspace, uint64_t *iova);
214int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova); 204uint64_t msm_gem_iova(struct drm_gem_object *obj,
215uint64_t msm_gem_iova(struct drm_gem_object *obj, int id); 205 struct msm_gem_address_space *aspace);
216struct page **msm_gem_get_pages(struct drm_gem_object *obj); 206struct page **msm_gem_get_pages(struct drm_gem_object *obj);
217void msm_gem_put_pages(struct drm_gem_object *obj); 207void msm_gem_put_pages(struct drm_gem_object *obj);
218void msm_gem_put_iova(struct drm_gem_object *obj, int id); 208void msm_gem_put_iova(struct drm_gem_object *obj,
209 struct msm_gem_address_space *aspace);
219int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 210int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
220 struct drm_mode_create_dumb *args); 211 struct drm_mode_create_dumb *args);
221int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 212int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -229,13 +220,9 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
229 struct dma_buf_attachment *attach, struct sg_table *sg); 220 struct dma_buf_attachment *attach, struct sg_table *sg);
230int msm_gem_prime_pin(struct drm_gem_object *obj); 221int msm_gem_prime_pin(struct drm_gem_object *obj);
231void msm_gem_prime_unpin(struct drm_gem_object *obj); 222void msm_gem_prime_unpin(struct drm_gem_object *obj);
232void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
233void *msm_gem_get_vaddr(struct drm_gem_object *obj); 223void *msm_gem_get_vaddr(struct drm_gem_object *obj);
234void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
235void msm_gem_put_vaddr(struct drm_gem_object *obj); 224void msm_gem_put_vaddr(struct drm_gem_object *obj);
236int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); 225int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
237void msm_gem_purge(struct drm_gem_object *obj);
238void msm_gem_vunmap(struct drm_gem_object *obj);
239int msm_gem_sync_object(struct drm_gem_object *obj, 226int msm_gem_sync_object(struct drm_gem_object *obj,
240 struct msm_fence_context *fctx, bool exclusive); 227 struct msm_fence_context *fctx, bool exclusive);
241void msm_gem_move_to_active(struct drm_gem_object *obj, 228void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -248,12 +235,17 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
248 uint32_t size, uint32_t flags, uint32_t *handle); 235 uint32_t size, uint32_t flags, uint32_t *handle);
249struct drm_gem_object *msm_gem_new(struct drm_device *dev, 236struct drm_gem_object *msm_gem_new(struct drm_device *dev,
250 uint32_t size, uint32_t flags); 237 uint32_t size, uint32_t flags);
238struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
239 uint32_t size, uint32_t flags);
251struct drm_gem_object *msm_gem_import(struct drm_device *dev, 240struct drm_gem_object *msm_gem_import(struct drm_device *dev,
252 struct dma_buf *dmabuf, struct sg_table *sgt); 241 struct dma_buf *dmabuf, struct sg_table *sgt);
253 242
254int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); 243int msm_framebuffer_prepare(struct drm_framebuffer *fb,
255void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); 244 struct msm_gem_address_space *aspace);
256uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); 245void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
246 struct msm_gem_address_space *aspace);
247uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
248 struct msm_gem_address_space *aspace, int plane);
257struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 249struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
258const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 250const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
259struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 251struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index ba2733a95a4f..6ecb7b170316 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -84,14 +84,15 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
84 * should be fine, since only the scanout (mdpN) side of things needs 84 * should be fine, since only the scanout (mdpN) side of things needs
85 * this, the gpu doesn't care about fb's. 85 * this, the gpu doesn't care about fb's.
86 */ 86 */
87int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) 87int msm_framebuffer_prepare(struct drm_framebuffer *fb,
88 struct msm_gem_address_space *aspace)
88{ 89{
89 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 90 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
90 int ret, i, n = fb->format->num_planes; 91 int ret, i, n = fb->format->num_planes;
91 uint64_t iova; 92 uint64_t iova;
92 93
93 for (i = 0; i < n; i++) { 94 for (i = 0; i < n; i++) {
94 ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); 95 ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
95 DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); 96 DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
96 if (ret) 97 if (ret)
97 return ret; 98 return ret;
@@ -100,21 +101,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
100 return 0; 101 return 0;
101} 102}
102 103
103void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) 104void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
105 struct msm_gem_address_space *aspace)
104{ 106{
105 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 107 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
106 int i, n = fb->format->num_planes; 108 int i, n = fb->format->num_planes;
107 109
108 for (i = 0; i < n; i++) 110 for (i = 0; i < n; i++)
109 msm_gem_put_iova(msm_fb->planes[i], id); 111 msm_gem_put_iova(msm_fb->planes[i], aspace);
110} 112}
111 113
112uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) 114uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
115 struct msm_gem_address_space *aspace, int plane)
113{ 116{
114 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 117 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
115 if (!msm_fb->planes[plane]) 118 if (!msm_fb->planes[plane])
116 return 0; 119 return 0;
117 return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane]; 120 return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
118} 121}
119 122
120struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) 123struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index feea8ba4e05b..5ecf4ff9a059 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -20,6 +20,7 @@
20 20
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "msm_gem.h" 22#include "msm_gem.h"
23#include "msm_kms.h"
23 24
24extern int msm_gem_mmap_obj(struct drm_gem_object *obj, 25extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
25 struct vm_area_struct *vma); 26 struct vm_area_struct *vma);
@@ -73,6 +74,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
73{ 74{
74 struct msm_fbdev *fbdev = to_msm_fbdev(helper); 75 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
75 struct drm_device *dev = helper->dev; 76 struct drm_device *dev = helper->dev;
77 struct msm_drm_private *priv = dev->dev_private;
76 struct drm_framebuffer *fb = NULL; 78 struct drm_framebuffer *fb = NULL;
77 struct fb_info *fbi = NULL; 79 struct fb_info *fbi = NULL;
78 struct drm_mode_fb_cmd2 mode_cmd = {0}; 80 struct drm_mode_fb_cmd2 mode_cmd = {0};
@@ -95,10 +97,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
95 /* allocate backing bo */ 97 /* allocate backing bo */
96 size = mode_cmd.pitches[0] * mode_cmd.height; 98 size = mode_cmd.pitches[0] * mode_cmd.height;
97 DBG("allocating %d bytes for fb %d", size, dev->primary->index); 99 DBG("allocating %d bytes for fb %d", size, dev->primary->index);
98 mutex_lock(&dev->struct_mutex);
99 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | 100 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
100 MSM_BO_WC | MSM_BO_STOLEN); 101 MSM_BO_WC | MSM_BO_STOLEN);
101 mutex_unlock(&dev->struct_mutex);
102 if (IS_ERR(fbdev->bo)) { 102 if (IS_ERR(fbdev->bo)) {
103 ret = PTR_ERR(fbdev->bo); 103 ret = PTR_ERR(fbdev->bo);
104 fbdev->bo = NULL; 104 fbdev->bo = NULL;
@@ -124,7 +124,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
124 * in panic (ie. lock-safe, etc) we could avoid pinning the 124 * in panic (ie. lock-safe, etc) we could avoid pinning the
125 * buffer now: 125 * buffer now:
126 */ 126 */
127 ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); 127 ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
128 if (ret) { 128 if (ret) {
129 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); 129 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
130 goto fail_unlock; 130 goto fail_unlock;
@@ -153,7 +153,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
153 153
154 dev->mode_config.fb_base = paddr; 154 dev->mode_config.fb_base = paddr;
155 155
156 fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo); 156 fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
157 if (IS_ERR(fbi->screen_base)) { 157 if (IS_ERR(fbi->screen_base)) {
158 ret = PTR_ERR(fbi->screen_base); 158 ret = PTR_ERR(fbi->screen_base);
159 goto fail_unlock; 159 goto fail_unlock;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index be77a35a7a8e..65f35544c1ec 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -26,6 +26,9 @@
26#include "msm_gpu.h" 26#include "msm_gpu.h"
27#include "msm_mmu.h" 27#include "msm_mmu.h"
28 28
29static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
29static dma_addr_t physaddr(struct drm_gem_object *obj) 32static dma_addr_t physaddr(struct drm_gem_object *obj)
30{ 33{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj); 34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -41,8 +44,7 @@ static bool use_pages(struct drm_gem_object *obj)
41} 44}
42 45
43/* allocate pages from VRAM carveout, used when no IOMMU: */ 46/* allocate pages from VRAM carveout, used when no IOMMU: */
44static struct page **get_pages_vram(struct drm_gem_object *obj, 47static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
45 int npages)
46{ 48{
47 struct msm_gem_object *msm_obj = to_msm_bo(obj); 49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private; 50 struct msm_drm_private *priv = obj->dev->dev_private;
@@ -54,7 +56,9 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
54 if (!p) 56 if (!p)
55 return ERR_PTR(-ENOMEM); 57 return ERR_PTR(-ENOMEM);
56 58
59 spin_lock(&priv->vram.lock);
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 spin_unlock(&priv->vram.lock);
58 if (ret) { 62 if (ret) {
59 kvfree(p); 63 kvfree(p);
60 return ERR_PTR(ret); 64 return ERR_PTR(ret);
@@ -69,7 +73,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
69 return p; 73 return p;
70} 74}
71 75
72/* called with dev->struct_mutex held */
73static struct page **get_pages(struct drm_gem_object *obj) 76static struct page **get_pages(struct drm_gem_object *obj)
74{ 77{
75 struct msm_gem_object *msm_obj = to_msm_bo(obj); 78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -109,6 +112,18 @@ static struct page **get_pages(struct drm_gem_object *obj)
109 return msm_obj->pages; 112 return msm_obj->pages;
110} 113}
111 114
115static void put_pages_vram(struct drm_gem_object *obj)
116{
117 struct msm_gem_object *msm_obj = to_msm_bo(obj);
118 struct msm_drm_private *priv = obj->dev->dev_private;
119
120 spin_lock(&priv->vram.lock);
121 drm_mm_remove_node(msm_obj->vram_node);
122 spin_unlock(&priv->vram.lock);
123
124 kvfree(msm_obj->pages);
125}
126
112static void put_pages(struct drm_gem_object *obj) 127static void put_pages(struct drm_gem_object *obj)
113{ 128{
114 struct msm_gem_object *msm_obj = to_msm_bo(obj); 129 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -125,10 +140,8 @@ static void put_pages(struct drm_gem_object *obj)
125 140
126 if (use_pages(obj)) 141 if (use_pages(obj))
127 drm_gem_put_pages(obj, msm_obj->pages, true, false); 142 drm_gem_put_pages(obj, msm_obj->pages, true, false);
128 else { 143 else
129 drm_mm_remove_node(msm_obj->vram_node); 144 put_pages_vram(obj);
130 kvfree(msm_obj->pages);
131 }
132 145
133 msm_obj->pages = NULL; 146 msm_obj->pages = NULL;
134 } 147 }
@@ -136,11 +149,18 @@ static void put_pages(struct drm_gem_object *obj)
136 149
137struct page **msm_gem_get_pages(struct drm_gem_object *obj) 150struct page **msm_gem_get_pages(struct drm_gem_object *obj)
138{ 151{
139 struct drm_device *dev = obj->dev; 152 struct msm_gem_object *msm_obj = to_msm_bo(obj);
140 struct page **p; 153 struct page **p;
141 mutex_lock(&dev->struct_mutex); 154
155 mutex_lock(&msm_obj->lock);
156
157 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
158 mutex_unlock(&msm_obj->lock);
159 return ERR_PTR(-EBUSY);
160 }
161
142 p = get_pages(obj); 162 p = get_pages(obj);
143 mutex_unlock(&dev->struct_mutex); 163 mutex_unlock(&msm_obj->lock);
144 return p; 164 return p;
145} 165}
146 166
@@ -195,28 +215,25 @@ int msm_gem_fault(struct vm_fault *vmf)
195{ 215{
196 struct vm_area_struct *vma = vmf->vma; 216 struct vm_area_struct *vma = vmf->vma;
197 struct drm_gem_object *obj = vma->vm_private_data; 217 struct drm_gem_object *obj = vma->vm_private_data;
198 struct drm_device *dev = obj->dev; 218 struct msm_gem_object *msm_obj = to_msm_bo(obj);
199 struct msm_drm_private *priv = dev->dev_private;
200 struct page **pages; 219 struct page **pages;
201 unsigned long pfn; 220 unsigned long pfn;
202 pgoff_t pgoff; 221 pgoff_t pgoff;
203 int ret; 222 int ret;
204 223
205 /* This should only happen if userspace tries to pass a mmap'd 224 /*
206 * but unfaulted gem bo vaddr into submit ioctl, triggering 225 * vm_ops.open/drm_gem_mmap_obj and close get and put
207 * a page fault while struct_mutex is already held. This is 226 * a reference on obj. So, we dont need to hold one here.
208 * not a valid use-case so just bail.
209 */
210 if (priv->struct_mutex_task == current)
211 return VM_FAULT_SIGBUS;
212
213 /* Make sure we don't parallel update on a fault, nor move or remove
214 * something from beneath our feet
215 */ 227 */
216 ret = mutex_lock_interruptible(&dev->struct_mutex); 228 ret = mutex_lock_interruptible(&msm_obj->lock);
217 if (ret) 229 if (ret)
218 goto out; 230 goto out;
219 231
232 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
233 mutex_unlock(&msm_obj->lock);
234 return VM_FAULT_SIGBUS;
235 }
236
220 /* make sure we have pages attached now */ 237 /* make sure we have pages attached now */
221 pages = get_pages(obj); 238 pages = get_pages(obj);
222 if (IS_ERR(pages)) { 239 if (IS_ERR(pages)) {
@@ -235,7 +252,7 @@ int msm_gem_fault(struct vm_fault *vmf)
235 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 252 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
236 253
237out_unlock: 254out_unlock:
238 mutex_unlock(&dev->struct_mutex); 255 mutex_unlock(&msm_obj->lock);
239out: 256out:
240 switch (ret) { 257 switch (ret) {
241 case -EAGAIN: 258 case -EAGAIN:
@@ -259,9 +276,10 @@ out:
259static uint64_t mmap_offset(struct drm_gem_object *obj) 276static uint64_t mmap_offset(struct drm_gem_object *obj)
260{ 277{
261 struct drm_device *dev = obj->dev; 278 struct drm_device *dev = obj->dev;
279 struct msm_gem_object *msm_obj = to_msm_bo(obj);
262 int ret; 280 int ret;
263 281
264 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 282 WARN_ON(!mutex_is_locked(&msm_obj->lock));
265 283
266 /* Make it mmapable */ 284 /* Make it mmapable */
267 ret = drm_gem_create_mmap_offset(obj); 285 ret = drm_gem_create_mmap_offset(obj);
@@ -277,95 +295,140 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
277uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 295uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
278{ 296{
279 uint64_t offset; 297 uint64_t offset;
280 mutex_lock(&obj->dev->struct_mutex); 298 struct msm_gem_object *msm_obj = to_msm_bo(obj);
299
300 mutex_lock(&msm_obj->lock);
281 offset = mmap_offset(obj); 301 offset = mmap_offset(obj);
282 mutex_unlock(&obj->dev->struct_mutex); 302 mutex_unlock(&msm_obj->lock);
283 return offset; 303 return offset;
284} 304}
285 305
286static void 306static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
287put_iova(struct drm_gem_object *obj) 307 struct msm_gem_address_space *aspace)
288{ 308{
289 struct drm_device *dev = obj->dev;
290 struct msm_drm_private *priv = obj->dev->dev_private;
291 struct msm_gem_object *msm_obj = to_msm_bo(obj); 309 struct msm_gem_object *msm_obj = to_msm_bo(obj);
292 int id; 310 struct msm_gem_vma *vma;
293 311
294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 312 WARN_ON(!mutex_is_locked(&msm_obj->lock));
295 313
296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 314 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
297 if (!priv->aspace[id]) 315 if (!vma)
298 continue; 316 return ERR_PTR(-ENOMEM);
299 msm_gem_unmap_vma(priv->aspace[id], 317
300 &msm_obj->domain[id], msm_obj->sgt); 318 vma->aspace = aspace;
301 } 319
320 list_add_tail(&vma->list, &msm_obj->vmas);
321
322 return vma;
302} 323}
303 324
304/* should be called under struct_mutex.. although it can be called 325static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
305 * from atomic context without struct_mutex to acquire an extra 326 struct msm_gem_address_space *aspace)
306 * iova ref if you know one is already held.
307 *
308 * That means when I do eventually need to add support for unpinning
309 * the refcnt counter needs to be atomic_t.
310 */
311int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
312 uint64_t *iova)
313{ 327{
314 struct msm_gem_object *msm_obj = to_msm_bo(obj); 328 struct msm_gem_object *msm_obj = to_msm_bo(obj);
315 int ret = 0; 329 struct msm_gem_vma *vma;
316 330
317 if (!msm_obj->domain[id].iova) { 331 WARN_ON(!mutex_is_locked(&msm_obj->lock));
318 struct msm_drm_private *priv = obj->dev->dev_private;
319 struct page **pages = get_pages(obj);
320 332
321 if (IS_ERR(pages)) 333 list_for_each_entry(vma, &msm_obj->vmas, list) {
322 return PTR_ERR(pages); 334 if (vma->aspace == aspace)
323 335 return vma;
324 if (iommu_present(&platform_bus_type)) {
325 ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
326 msm_obj->sgt, obj->size >> PAGE_SHIFT);
327 } else {
328 msm_obj->domain[id].iova = physaddr(obj);
329 }
330 } 336 }
331 337
332 if (!ret) 338 return NULL;
333 *iova = msm_obj->domain[id].iova; 339}
334 340
335 return ret; 341static void del_vma(struct msm_gem_vma *vma)
342{
343 if (!vma)
344 return;
345
346 list_del(&vma->list);
347 kfree(vma);
348}
349
350/* Called with msm_obj->lock locked */
351static void
352put_iova(struct drm_gem_object *obj)
353{
354 struct msm_gem_object *msm_obj = to_msm_bo(obj);
355 struct msm_gem_vma *vma, *tmp;
356
357 WARN_ON(!mutex_is_locked(&msm_obj->lock));
358
359 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
360 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
361 del_vma(vma);
362 }
336} 363}
337 364
338/* get iova, taking a reference. Should have a matching put */ 365/* get iova, taking a reference. Should have a matching put */
339int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) 366int msm_gem_get_iova(struct drm_gem_object *obj,
367 struct msm_gem_address_space *aspace, uint64_t *iova)
340{ 368{
341 struct msm_gem_object *msm_obj = to_msm_bo(obj); 369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
342 int ret; 370 struct msm_gem_vma *vma;
371 int ret = 0;
343 372
344 /* this is safe right now because we don't unmap until the 373 mutex_lock(&msm_obj->lock);
345 * bo is deleted: 374
346 */ 375 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
347 if (msm_obj->domain[id].iova) { 376 mutex_unlock(&msm_obj->lock);
348 *iova = msm_obj->domain[id].iova; 377 return -EBUSY;
349 return 0;
350 } 378 }
351 379
352 mutex_lock(&obj->dev->struct_mutex); 380 vma = lookup_vma(obj, aspace);
353 ret = msm_gem_get_iova_locked(obj, id, iova); 381
354 mutex_unlock(&obj->dev->struct_mutex); 382 if (!vma) {
383 struct page **pages;
384
385 vma = add_vma(obj, aspace);
386 if (IS_ERR(vma))
387 return PTR_ERR(vma);
388
389 pages = get_pages(obj);
390 if (IS_ERR(pages)) {
391 ret = PTR_ERR(pages);
392 goto fail;
393 }
394
395 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
396 obj->size >> PAGE_SHIFT);
397 if (ret)
398 goto fail;
399 }
400
401 *iova = vma->iova;
402
403 mutex_unlock(&msm_obj->lock);
404 return 0;
405
406fail:
407 del_vma(vma);
408
409 mutex_unlock(&msm_obj->lock);
355 return ret; 410 return ret;
356} 411}
357 412
358/* get iova without taking a reference, used in places where you have 413/* get iova without taking a reference, used in places where you have
359 * already done a 'msm_gem_get_iova()'. 414 * already done a 'msm_gem_get_iova()'.
360 */ 415 */
361uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) 416uint64_t msm_gem_iova(struct drm_gem_object *obj,
417 struct msm_gem_address_space *aspace)
362{ 418{
363 struct msm_gem_object *msm_obj = to_msm_bo(obj); 419 struct msm_gem_object *msm_obj = to_msm_bo(obj);
364 WARN_ON(!msm_obj->domain[id].iova); 420 struct msm_gem_vma *vma;
365 return msm_obj->domain[id].iova; 421
422 mutex_lock(&msm_obj->lock);
423 vma = lookup_vma(obj, aspace);
424 mutex_unlock(&msm_obj->lock);
425 WARN_ON(!vma);
426
427 return vma ? vma->iova : 0;
366} 428}
367 429
368void msm_gem_put_iova(struct drm_gem_object *obj, int id) 430void msm_gem_put_iova(struct drm_gem_object *obj,
431 struct msm_gem_address_space *aspace)
369{ 432{
370 // XXX TODO .. 433 // XXX TODO ..
371 // NOTE: probably don't need a _locked() version.. we wouldn't 434 // NOTE: probably don't need a _locked() version.. we wouldn't
@@ -405,45 +468,57 @@ fail:
405 return ret; 468 return ret;
406} 469}
407 470
408void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 471void *msm_gem_get_vaddr(struct drm_gem_object *obj)
409{ 472{
410 struct msm_gem_object *msm_obj = to_msm_bo(obj); 473 struct msm_gem_object *msm_obj = to_msm_bo(obj);
411 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 474 int ret = 0;
475
476 mutex_lock(&msm_obj->lock);
477
478 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
479 mutex_unlock(&msm_obj->lock);
480 return ERR_PTR(-EBUSY);
481 }
482
483 /* increment vmap_count *before* vmap() call, so shrinker can
484 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
485 * This guarantees that we won't try to msm_gem_vunmap() this
486 * same object from within the vmap() call (while we already
487 * hold msm_obj->lock)
488 */
489 msm_obj->vmap_count++;
490
412 if (!msm_obj->vaddr) { 491 if (!msm_obj->vaddr) {
413 struct page **pages = get_pages(obj); 492 struct page **pages = get_pages(obj);
414 if (IS_ERR(pages)) 493 if (IS_ERR(pages)) {
415 return ERR_CAST(pages); 494 ret = PTR_ERR(pages);
495 goto fail;
496 }
416 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 497 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
417 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 498 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
418 if (msm_obj->vaddr == NULL) 499 if (msm_obj->vaddr == NULL) {
419 return ERR_PTR(-ENOMEM); 500 ret = -ENOMEM;
501 goto fail;
502 }
420 } 503 }
421 msm_obj->vmap_count++; 504
505 mutex_unlock(&msm_obj->lock);
422 return msm_obj->vaddr; 506 return msm_obj->vaddr;
423}
424 507
425void *msm_gem_get_vaddr(struct drm_gem_object *obj) 508fail:
426{ 509 msm_obj->vmap_count--;
427 void *ret; 510 mutex_unlock(&msm_obj->lock);
428 mutex_lock(&obj->dev->struct_mutex); 511 return ERR_PTR(ret);
429 ret = msm_gem_get_vaddr_locked(obj);
430 mutex_unlock(&obj->dev->struct_mutex);
431 return ret;
432} 512}
433 513
434void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 514void msm_gem_put_vaddr(struct drm_gem_object *obj)
435{ 515{
436 struct msm_gem_object *msm_obj = to_msm_bo(obj); 516 struct msm_gem_object *msm_obj = to_msm_bo(obj);
437 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 517
518 mutex_lock(&msm_obj->lock);
438 WARN_ON(msm_obj->vmap_count < 1); 519 WARN_ON(msm_obj->vmap_count < 1);
439 msm_obj->vmap_count--; 520 msm_obj->vmap_count--;
440} 521 mutex_unlock(&msm_obj->lock);
441
442void msm_gem_put_vaddr(struct drm_gem_object *obj)
443{
444 mutex_lock(&obj->dev->struct_mutex);
445 msm_gem_put_vaddr_locked(obj);
446 mutex_unlock(&obj->dev->struct_mutex);
447} 522}
448 523
449/* Update madvise status, returns true if not purged, else 524/* Update madvise status, returns true if not purged, else
@@ -453,15 +528,21 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
453{ 528{
454 struct msm_gem_object *msm_obj = to_msm_bo(obj); 529 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 530
531 mutex_lock(&msm_obj->lock);
532
456 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 533 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
457 534
458 if (msm_obj->madv != __MSM_MADV_PURGED) 535 if (msm_obj->madv != __MSM_MADV_PURGED)
459 msm_obj->madv = madv; 536 msm_obj->madv = madv;
460 537
461 return (msm_obj->madv != __MSM_MADV_PURGED); 538 madv = msm_obj->madv;
539
540 mutex_unlock(&msm_obj->lock);
541
542 return (madv != __MSM_MADV_PURGED);
462} 543}
463 544
464void msm_gem_purge(struct drm_gem_object *obj) 545void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
465{ 546{
466 struct drm_device *dev = obj->dev; 547 struct drm_device *dev = obj->dev;
467 struct msm_gem_object *msm_obj = to_msm_bo(obj); 548 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -470,9 +551,11 @@ void msm_gem_purge(struct drm_gem_object *obj)
470 WARN_ON(!is_purgeable(msm_obj)); 551 WARN_ON(!is_purgeable(msm_obj));
471 WARN_ON(obj->import_attach); 552 WARN_ON(obj->import_attach);
472 553
554 mutex_lock_nested(&msm_obj->lock, subclass);
555
473 put_iova(obj); 556 put_iova(obj);
474 557
475 msm_gem_vunmap(obj); 558 msm_gem_vunmap_locked(obj);
476 559
477 put_pages(obj); 560 put_pages(obj);
478 561
@@ -490,12 +573,16 @@ void msm_gem_purge(struct drm_gem_object *obj)
490 573
491 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 574 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
492 0, (loff_t)-1); 575 0, (loff_t)-1);
576
577 mutex_unlock(&msm_obj->lock);
493} 578}
494 579
495void msm_gem_vunmap(struct drm_gem_object *obj) 580static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
496{ 581{
497 struct msm_gem_object *msm_obj = to_msm_bo(obj); 582 struct msm_gem_object *msm_obj = to_msm_bo(obj);
498 583
584 WARN_ON(!mutex_is_locked(&msm_obj->lock));
585
499 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 586 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
500 return; 587 return;
501 588
@@ -503,6 +590,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
503 msm_obj->vaddr = NULL; 590 msm_obj->vaddr = NULL;
504} 591}
505 592
593void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
594{
595 struct msm_gem_object *msm_obj = to_msm_bo(obj);
596
597 mutex_lock_nested(&msm_obj->lock, subclass);
598 msm_gem_vunmap_locked(obj);
599 mutex_unlock(&msm_obj->lock);
600}
601
506/* must be called before _move_to_active().. */ 602/* must be called before _move_to_active().. */
507int msm_gem_sync_object(struct drm_gem_object *obj, 603int msm_gem_sync_object(struct drm_gem_object *obj,
508 struct msm_fence_context *fctx, bool exclusive) 604 struct msm_fence_context *fctx, bool exclusive)
@@ -619,13 +715,12 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
619 struct msm_gem_object *msm_obj = to_msm_bo(obj); 715 struct msm_gem_object *msm_obj = to_msm_bo(obj);
620 struct reservation_object *robj = msm_obj->resv; 716 struct reservation_object *robj = msm_obj->resv;
621 struct reservation_object_list *fobj; 717 struct reservation_object_list *fobj;
622 struct msm_drm_private *priv = obj->dev->dev_private;
623 struct dma_fence *fence; 718 struct dma_fence *fence;
719 struct msm_gem_vma *vma;
624 uint64_t off = drm_vma_node_start(&obj->vma_node); 720 uint64_t off = drm_vma_node_start(&obj->vma_node);
625 const char *madv; 721 const char *madv;
626 unsigned id;
627 722
628 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 723 mutex_lock(&msm_obj->lock);
629 724
630 switch (msm_obj->madv) { 725 switch (msm_obj->madv) {
631 case __MSM_MADV_PURGED: 726 case __MSM_MADV_PURGED:
@@ -645,8 +740,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
645 obj->name, kref_read(&obj->refcount), 740 obj->name, kref_read(&obj->refcount),
646 off, msm_obj->vaddr); 741 off, msm_obj->vaddr);
647 742
648 for (id = 0; id < priv->num_aspaces; id++) 743 /* FIXME: we need to print the address space here too */
649 seq_printf(m, " %08llx", msm_obj->domain[id].iova); 744 list_for_each_entry(vma, &msm_obj->vmas, list)
745 seq_printf(m, " %08llx", vma->iova);
650 746
651 seq_printf(m, " %zu%s\n", obj->size, madv); 747 seq_printf(m, " %zu%s\n", obj->size, madv);
652 748
@@ -665,6 +761,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
665 if (fence) 761 if (fence)
666 describe_fence(fence, "Exclusive", m); 762 describe_fence(fence, "Exclusive", m);
667 rcu_read_unlock(); 763 rcu_read_unlock();
764
765 mutex_unlock(&msm_obj->lock);
668} 766}
669 767
670void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 768void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -697,6 +795,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
697 795
698 list_del(&msm_obj->mm_list); 796 list_del(&msm_obj->mm_list);
699 797
798 mutex_lock(&msm_obj->lock);
799
700 put_iova(obj); 800 put_iova(obj);
701 801
702 if (obj->import_attach) { 802 if (obj->import_attach) {
@@ -711,7 +811,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
711 811
712 drm_prime_gem_destroy(obj, msm_obj->sgt); 812 drm_prime_gem_destroy(obj, msm_obj->sgt);
713 } else { 813 } else {
714 msm_gem_vunmap(obj); 814 msm_gem_vunmap_locked(obj);
715 put_pages(obj); 815 put_pages(obj);
716 } 816 }
717 817
@@ -720,6 +820,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
720 820
721 drm_gem_object_release(obj); 821 drm_gem_object_release(obj);
722 822
823 mutex_unlock(&msm_obj->lock);
723 kfree(msm_obj); 824 kfree(msm_obj);
724} 825}
725 826
@@ -730,14 +831,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
730 struct drm_gem_object *obj; 831 struct drm_gem_object *obj;
731 int ret; 832 int ret;
732 833
733 ret = mutex_lock_interruptible(&dev->struct_mutex);
734 if (ret)
735 return ret;
736
737 obj = msm_gem_new(dev, size, flags); 834 obj = msm_gem_new(dev, size, flags);
738 835
739 mutex_unlock(&dev->struct_mutex);
740
741 if (IS_ERR(obj)) 836 if (IS_ERR(obj))
742 return PTR_ERR(obj); 837 return PTR_ERR(obj);
743 838
@@ -752,13 +847,11 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
752static int msm_gem_new_impl(struct drm_device *dev, 847static int msm_gem_new_impl(struct drm_device *dev,
753 uint32_t size, uint32_t flags, 848 uint32_t size, uint32_t flags,
754 struct reservation_object *resv, 849 struct reservation_object *resv,
755 struct drm_gem_object **obj) 850 struct drm_gem_object **obj,
851 bool struct_mutex_locked)
756{ 852{
757 struct msm_drm_private *priv = dev->dev_private; 853 struct msm_drm_private *priv = dev->dev_private;
758 struct msm_gem_object *msm_obj; 854 struct msm_gem_object *msm_obj;
759 bool use_vram = false;
760
761 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
762 855
763 switch (flags & MSM_BO_CACHE_MASK) { 856 switch (flags & MSM_BO_CACHE_MASK) {
764 case MSM_BO_UNCACHED: 857 case MSM_BO_UNCACHED:
@@ -771,20 +864,11 @@ static int msm_gem_new_impl(struct drm_device *dev,
771 return -EINVAL; 864 return -EINVAL;
772 } 865 }
773 866
774 if (!iommu_present(&platform_bus_type))
775 use_vram = true;
776 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
777 use_vram = true;
778
779 if (WARN_ON(use_vram && !priv->vram.size))
780 return -EINVAL;
781
782 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 867 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
783 if (!msm_obj) 868 if (!msm_obj)
784 return -ENOMEM; 869 return -ENOMEM;
785 870
786 if (use_vram) 871 mutex_init(&msm_obj->lock);
787 msm_obj->vram_node = &msm_obj->domain[0].node;
788 872
789 msm_obj->flags = flags; 873 msm_obj->flags = flags;
790 msm_obj->madv = MSM_MADV_WILLNEED; 874 msm_obj->madv = MSM_MADV_WILLNEED;
@@ -797,48 +881,96 @@ static int msm_gem_new_impl(struct drm_device *dev,
797 } 881 }
798 882
799 INIT_LIST_HEAD(&msm_obj->submit_entry); 883 INIT_LIST_HEAD(&msm_obj->submit_entry);
800 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 884 INIT_LIST_HEAD(&msm_obj->vmas);
885
886 if (struct_mutex_locked) {
887 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
888 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
889 } else {
890 mutex_lock(&dev->struct_mutex);
891 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
892 mutex_unlock(&dev->struct_mutex);
893 }
801 894
802 *obj = &msm_obj->base; 895 *obj = &msm_obj->base;
803 896
804 return 0; 897 return 0;
805} 898}
806 899
807struct drm_gem_object *msm_gem_new(struct drm_device *dev, 900static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
808 uint32_t size, uint32_t flags) 901 uint32_t size, uint32_t flags, bool struct_mutex_locked)
809{ 902{
903 struct msm_drm_private *priv = dev->dev_private;
810 struct drm_gem_object *obj = NULL; 904 struct drm_gem_object *obj = NULL;
905 bool use_vram = false;
811 int ret; 906 int ret;
812 907
813 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
814
815 size = PAGE_ALIGN(size); 908 size = PAGE_ALIGN(size);
816 909
910 if (!iommu_present(&platform_bus_type))
911 use_vram = true;
912 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
913 use_vram = true;
914
915 if (WARN_ON(use_vram && !priv->vram.size))
916 return ERR_PTR(-EINVAL);
917
817 /* Disallow zero sized objects as they make the underlying 918 /* Disallow zero sized objects as they make the underlying
818 * infrastructure grumpy 919 * infrastructure grumpy
819 */ 920 */
820 if (size == 0) 921 if (size == 0)
821 return ERR_PTR(-EINVAL); 922 return ERR_PTR(-EINVAL);
822 923
823 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 924 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
824 if (ret) 925 if (ret)
825 goto fail; 926 goto fail;
826 927
827 if (use_pages(obj)) { 928 if (use_vram) {
929 struct msm_gem_vma *vma;
930 struct page **pages;
931
932 vma = add_vma(obj, NULL);
933 if (IS_ERR(vma)) {
934 ret = PTR_ERR(vma);
935 goto fail;
936 }
937
938 to_msm_bo(obj)->vram_node = &vma->node;
939
940 drm_gem_private_object_init(dev, obj, size);
941
942 pages = get_pages(obj);
943 if (IS_ERR(pages)) {
944 ret = PTR_ERR(pages);
945 goto fail;
946 }
947
948 vma->iova = physaddr(obj);
949 } else {
828 ret = drm_gem_object_init(dev, obj, size); 950 ret = drm_gem_object_init(dev, obj, size);
829 if (ret) 951 if (ret)
830 goto fail; 952 goto fail;
831 } else {
832 drm_gem_private_object_init(dev, obj, size);
833 } 953 }
834 954
835 return obj; 955 return obj;
836 956
837fail: 957fail:
838 drm_gem_object_unreference(obj); 958 drm_gem_object_unreference_unlocked(obj);
839 return ERR_PTR(ret); 959 return ERR_PTR(ret);
840} 960}
841 961
962struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
963 uint32_t size, uint32_t flags)
964{
965 return _msm_gem_new(dev, size, flags, true);
966}
967
968struct drm_gem_object *msm_gem_new(struct drm_device *dev,
969 uint32_t size, uint32_t flags)
970{
971 return _msm_gem_new(dev, size, flags, false);
972}
973
842struct drm_gem_object *msm_gem_import(struct drm_device *dev, 974struct drm_gem_object *msm_gem_import(struct drm_device *dev,
843 struct dma_buf *dmabuf, struct sg_table *sgt) 975 struct dma_buf *dmabuf, struct sg_table *sgt)
844{ 976{
@@ -855,11 +987,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
855 987
856 size = PAGE_ALIGN(dmabuf->size); 988 size = PAGE_ALIGN(dmabuf->size);
857 989
858 /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ 990 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
859 mutex_lock(&dev->struct_mutex);
860 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
861 mutex_unlock(&dev->struct_mutex);
862
863 if (ret) 991 if (ret)
864 goto fail; 992 goto fail;
865 993
@@ -868,17 +996,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
868 npages = size / PAGE_SIZE; 996 npages = size / PAGE_SIZE;
869 997
870 msm_obj = to_msm_bo(obj); 998 msm_obj = to_msm_bo(obj);
999 mutex_lock(&msm_obj->lock);
871 msm_obj->sgt = sgt; 1000 msm_obj->sgt = sgt;
872 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1001 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
873 if (!msm_obj->pages) { 1002 if (!msm_obj->pages) {
1003 mutex_unlock(&msm_obj->lock);
874 ret = -ENOMEM; 1004 ret = -ENOMEM;
875 goto fail; 1005 goto fail;
876 } 1006 }
877 1007
878 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1008 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
879 if (ret) 1009 if (ret) {
1010 mutex_unlock(&msm_obj->lock);
880 goto fail; 1011 goto fail;
1012 }
881 1013
1014 mutex_unlock(&msm_obj->lock);
882 return obj; 1015 return obj;
883 1016
884fail: 1017fail:
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 1b4cf20043ea..91c210d2359c 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -31,6 +31,7 @@ struct msm_gem_address_space {
31 * and position mm_node->start is in # of pages: 31 * and position mm_node->start is in # of pages:
32 */ 32 */
33 struct drm_mm mm; 33 struct drm_mm mm;
34 spinlock_t lock; /* Protects drm_mm node allocation/removal */
34 struct msm_mmu *mmu; 35 struct msm_mmu *mmu;
35 struct kref kref; 36 struct kref kref;
36}; 37};
@@ -38,6 +39,8 @@ struct msm_gem_address_space {
38struct msm_gem_vma { 39struct msm_gem_vma {
39 struct drm_mm_node node; 40 struct drm_mm_node node;
40 uint64_t iova; 41 uint64_t iova;
42 struct msm_gem_address_space *aspace;
43 struct list_head list; /* node in msm_gem_object::vmas */
41}; 44};
42 45
43struct msm_gem_object { 46struct msm_gem_object {
@@ -77,7 +80,7 @@ struct msm_gem_object {
77 struct sg_table *sgt; 80 struct sg_table *sgt;
78 void *vaddr; 81 void *vaddr;
79 82
80 struct msm_gem_vma domain[NUM_DOMAINS]; 83 struct list_head vmas; /* list of msm_gem_vma */
81 84
82 /* normally (resv == &_resv) except for imported bo's */ 85 /* normally (resv == &_resv) except for imported bo's */
83 struct reservation_object *resv; 86 struct reservation_object *resv;
@@ -87,6 +90,7 @@ struct msm_gem_object {
87 * an IOMMU. Also used for stolen/splashscreen buffer. 90 * an IOMMU. Also used for stolen/splashscreen buffer.
88 */ 91 */
89 struct drm_mm_node *vram_node; 92 struct drm_mm_node *vram_node;
93 struct mutex lock; /* Protects resources associated with bo */
90}; 94};
91#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) 95#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
92 96
@@ -97,6 +101,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
97 101
98static inline bool is_purgeable(struct msm_gem_object *msm_obj) 102static inline bool is_purgeable(struct msm_gem_object *msm_obj)
99{ 103{
104 WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
100 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && 105 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
101 !msm_obj->base.dma_buf && !msm_obj->base.import_attach; 106 !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
102} 107}
@@ -106,6 +111,25 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
106 return (msm_obj->vmap_count == 0) && msm_obj->vaddr; 111 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
107} 112}
108 113
114/* The shrinker can be triggered while we hold objA->lock, and need
115 * to grab objB->lock to purge it. Lockdep just sees these as a single
116 * class of lock, so we use subclasses to teach it the difference.
117 *
118 * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
119 * OBJ_LOCK_SHRINKER is used by shrinker.
120 *
121 * It is *essential* that we never go down paths that could trigger the
122 * shrinker for a purgable object. This is ensured by checking that
123 * msm_obj->madv == MSM_MADV_WILLNEED.
124 */
125enum msm_gem_lock {
126 OBJ_LOCK_NORMAL,
127 OBJ_LOCK_SHRINKER,
128};
129
130void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
131void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
132
109/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 133/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
110 * associated with the cmdstream submission for synchronization (and 134 * associated with the cmdstream submission for synchronization (and
111 * make it easier to unwind when things go wrong, etc). This only 135 * make it easier to unwind when things go wrong, etc). This only
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index ab1dd020eb04..b72d8e6cd51d 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -20,6 +20,18 @@
20 20
21static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) 21static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
22{ 22{
23 /* NOTE: we are *closer* to being able to get rid of
24 * mutex_trylock_recursive().. the msm_gem code itself does
25 * not need struct_mutex, although codepaths that can trigger
26 * shrinker are still called in code-paths that hold the
27 * struct_mutex.
28 *
29 * Also, msm_obj->madv is protected by struct_mutex.
30 *
31 * The next step is probably split out a seperate lock for
32 * protecting inactive_list, so that shrinker does not need
33 * struct_mutex.
34 */
23 switch (mutex_trylock_recursive(&dev->struct_mutex)) { 35 switch (mutex_trylock_recursive(&dev->struct_mutex)) {
24 case MUTEX_TRYLOCK_FAILED: 36 case MUTEX_TRYLOCK_FAILED:
25 return false; 37 return false;
@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
77 if (freed >= sc->nr_to_scan) 89 if (freed >= sc->nr_to_scan)
78 break; 90 break;
79 if (is_purgeable(msm_obj)) { 91 if (is_purgeable(msm_obj)) {
80 msm_gem_purge(&msm_obj->base); 92 msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
81 freed += msm_obj->base.size >> PAGE_SHIFT; 93 freed += msm_obj->base.size >> PAGE_SHIFT;
82 } 94 }
83 } 95 }
@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
106 118
107 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { 119 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
108 if (is_vunmapable(msm_obj)) { 120 if (is_vunmapable(msm_obj)) {
109 msm_gem_vunmap(&msm_obj->base); 121 msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
110 /* since we don't know any better, lets bail after a few 122 /* since we don't know any better, lets bail after a few
111 * and if necessary the shrinker will be invoked again. 123 * and if necessary the shrinker will be invoked again.
112 * Seems better than unmapping *everything* 124 * Seems better than unmapping *everything*
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 7832e6421d25..6bfca7470141 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -31,11 +31,14 @@
31#define BO_PINNED 0x2000 31#define BO_PINNED 0x2000
32 32
33static struct msm_gem_submit *submit_create(struct drm_device *dev, 33static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 struct msm_gpu *gpu, int nr_bos, int nr_cmds) 34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
35{ 35{
36 struct msm_gem_submit *submit; 36 struct msm_gem_submit *submit;
37 int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + 37 uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
38 (nr_cmds * sizeof(*submit->cmd)); 38 (nr_cmds * sizeof(submit->cmd[0]));
39
40 if (sz > SIZE_MAX)
41 return NULL;
39 42
40 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 43 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
41 if (!submit) 44 if (!submit)
@@ -158,7 +161,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
158 struct msm_gem_object *msm_obj = submit->bos[i].obj; 161 struct msm_gem_object *msm_obj = submit->bos[i].obj;
159 162
160 if (submit->bos[i].flags & BO_PINNED) 163 if (submit->bos[i].flags & BO_PINNED)
161 msm_gem_put_iova(&msm_obj->base, submit->gpu->id); 164 msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
162 165
163 if (submit->bos[i].flags & BO_LOCKED) 166 if (submit->bos[i].flags & BO_LOCKED)
164 ww_mutex_unlock(&msm_obj->resv->lock); 167 ww_mutex_unlock(&msm_obj->resv->lock);
@@ -245,8 +248,8 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
245 uint64_t iova; 248 uint64_t iova;
246 249
247 /* if locking succeeded, pin bo: */ 250 /* if locking succeeded, pin bo: */
248 ret = msm_gem_get_iova_locked(&msm_obj->base, 251 ret = msm_gem_get_iova(&msm_obj->base,
249 submit->gpu->id, &iova); 252 submit->gpu->aspace, &iova);
250 253
251 if (ret) 254 if (ret)
252 break; 255 break;
@@ -301,7 +304,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
301 /* For now, just map the entire thing. Eventually we probably 304 /* For now, just map the entire thing. Eventually we probably
302 * to do it page-by-page, w/ kmap() if not vmap()d.. 305 * to do it page-by-page, w/ kmap() if not vmap()d..
303 */ 306 */
304 ptr = msm_gem_get_vaddr_locked(&obj->base); 307 ptr = msm_gem_get_vaddr(&obj->base);
305 308
306 if (IS_ERR(ptr)) { 309 if (IS_ERR(ptr)) {
307 ret = PTR_ERR(ptr); 310 ret = PTR_ERR(ptr);
@@ -359,7 +362,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
359 } 362 }
360 363
361out: 364out:
362 msm_gem_put_vaddr_locked(&obj->base); 365 msm_gem_put_vaddr(&obj->base);
363 366
364 return ret; 367 return ret;
365} 368}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index f285d7e210db..c36321bc8714 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -50,7 +50,9 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
50 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); 50 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
51 } 51 }
52 52
53 spin_lock(&aspace->lock);
53 drm_mm_remove_node(&vma->node); 54 drm_mm_remove_node(&vma->node);
55 spin_unlock(&aspace->lock);
54 56
55 vma->iova = 0; 57 vma->iova = 0;
56 58
@@ -63,10 +65,15 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
63{ 65{
64 int ret; 66 int ret;
65 67
66 if (WARN_ON(drm_mm_node_allocated(&vma->node))) 68 spin_lock(&aspace->lock);
69 if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
70 spin_unlock(&aspace->lock);
67 return 0; 71 return 0;
72 }
68 73
69 ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); 74 ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
75 spin_unlock(&aspace->lock);
76
70 if (ret) 77 if (ret)
71 return ret; 78 return ret;
72 79
@@ -94,6 +101,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
94 if (!aspace) 101 if (!aspace)
95 return ERR_PTR(-ENOMEM); 102 return ERR_PTR(-ENOMEM);
96 103
104 spin_lock_init(&aspace->lock);
97 aspace->name = name; 105 aspace->name = name;
98 aspace->mmu = msm_iommu_new(dev, domain); 106 aspace->mmu = msm_iommu_new(dev, domain);
99 107
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0fdc88d79ca8..9f3dbc236ab3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -203,6 +203,8 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
203{ 203{
204 int ret; 204 int ret;
205 205
206 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
207
206 if (!gpu->needs_hw_init) 208 if (!gpu->needs_hw_init)
207 return 0; 209 return 0;
208 210
@@ -414,7 +416,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
414 struct msm_gem_object *msm_obj = submit->bos[i].obj; 416 struct msm_gem_object *msm_obj = submit->bos[i].obj;
415 /* move to inactive: */ 417 /* move to inactive: */
416 msm_gem_move_to_inactive(&msm_obj->base); 418 msm_gem_move_to_inactive(&msm_obj->base);
417 msm_gem_put_iova(&msm_obj->base, gpu->id); 419 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
418 drm_gem_object_unreference(&msm_obj->base); 420 drm_gem_object_unreference(&msm_obj->base);
419 } 421 }
420 422
@@ -495,8 +497,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
495 497
496 /* submit takes a reference to the bo and iova until retired: */ 498 /* submit takes a reference to the bo and iova until retired: */
497 drm_gem_object_reference(&msm_obj->base); 499 drm_gem_object_reference(&msm_obj->base);
498 msm_gem_get_iova_locked(&msm_obj->base, 500 msm_gem_get_iova(&msm_obj->base,
499 submit->gpu->id, &iova); 501 submit->gpu->aspace, &iova);
500 502
501 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 503 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
502 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 504 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
@@ -562,7 +564,7 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
562 564
563int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 565int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
564 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 566 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
565 const char *name, const char *ioname, const char *irqname, int ringsz) 567 const char *name, struct msm_gpu_config *config)
566{ 568{
567 struct iommu_domain *iommu; 569 struct iommu_domain *iommu;
568 int ret; 570 int ret;
@@ -593,14 +595,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
593 595
594 596
595 /* Map registers: */ 597 /* Map registers: */
596 gpu->mmio = msm_ioremap(pdev, ioname, name); 598 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
597 if (IS_ERR(gpu->mmio)) { 599 if (IS_ERR(gpu->mmio)) {
598 ret = PTR_ERR(gpu->mmio); 600 ret = PTR_ERR(gpu->mmio);
599 goto fail; 601 goto fail;
600 } 602 }
601 603
602 /* Get Interrupt: */ 604 /* Get Interrupt: */
603 gpu->irq = platform_get_irq_byname(pdev, irqname); 605 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
604 if (gpu->irq < 0) { 606 if (gpu->irq < 0) {
605 ret = gpu->irq; 607 ret = gpu->irq;
606 dev_err(drm->dev, "failed to get irq: %d\n", ret); 608 dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -640,9 +642,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
640 */ 642 */
641 iommu = iommu_domain_alloc(&platform_bus_type); 643 iommu = iommu_domain_alloc(&platform_bus_type);
642 if (iommu) { 644 if (iommu) {
643 /* TODO 32b vs 64b address space.. */ 645 iommu->geometry.aperture_start = config->va_start;
644 iommu->geometry.aperture_start = SZ_16M; 646 iommu->geometry.aperture_end = config->va_end;
645 iommu->geometry.aperture_end = 0xffffffff;
646 647
647 dev_info(drm->dev, "%s: using IOMMU\n", name); 648 dev_info(drm->dev, "%s: using IOMMU\n", name);
648 gpu->aspace = msm_gem_address_space_create(&pdev->dev, 649 gpu->aspace = msm_gem_address_space_create(&pdev->dev,
@@ -658,13 +659,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
658 } else { 659 } else {
659 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 660 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
660 } 661 }
661 gpu->id = msm_register_address_space(drm, gpu->aspace);
662
663 662
664 /* Create ringbuffer: */ 663 /* Create ringbuffer: */
665 mutex_lock(&drm->struct_mutex); 664 gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
666 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
667 mutex_unlock(&drm->struct_mutex);
668 if (IS_ERR(gpu->rb)) { 665 if (IS_ERR(gpu->rb)) {
669 ret = PTR_ERR(gpu->rb); 666 ret = PTR_ERR(gpu->rb);
670 gpu->rb = NULL; 667 gpu->rb = NULL;
@@ -693,7 +690,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
693 690
694 if (gpu->rb) { 691 if (gpu->rb) {
695 if (gpu->rb_iova) 692 if (gpu->rb_iova)
696 msm_gem_put_iova(gpu->rb->bo, gpu->id); 693 msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
697 msm_ringbuffer_destroy(gpu->rb); 694 msm_ringbuffer_destroy(gpu->rb);
698 } 695 }
699 696
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index aa3241000455..df4e2771fb85 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -28,6 +28,14 @@
28struct msm_gem_submit; 28struct msm_gem_submit;
29struct msm_gpu_perfcntr; 29struct msm_gpu_perfcntr;
30 30
31struct msm_gpu_config {
32 const char *ioname;
33 const char *irqname;
34 uint64_t va_start;
35 uint64_t va_end;
36 unsigned int ringsz;
37};
38
31/* So far, with hardware that I've seen to date, we can have: 39/* So far, with hardware that I've seen to date, we can have:
32 * + zero, one, or two z180 2d cores 40 * + zero, one, or two z180 2d cores
33 * + a3xx or a2xx 3d core, which share a common CP (the firmware 41 * + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -50,7 +58,6 @@ struct msm_gpu_funcs {
50 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, 58 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
51 struct msm_file_private *ctx); 59 struct msm_file_private *ctx);
52 void (*flush)(struct msm_gpu *gpu); 60 void (*flush)(struct msm_gpu *gpu);
53 bool (*idle)(struct msm_gpu *gpu);
54 irqreturn_t (*irq)(struct msm_gpu *irq); 61 irqreturn_t (*irq)(struct msm_gpu *irq);
55 uint32_t (*last_fence)(struct msm_gpu *gpu); 62 uint32_t (*last_fence)(struct msm_gpu *gpu);
56 void (*recover)(struct msm_gpu *gpu); 63 void (*recover)(struct msm_gpu *gpu);
@@ -99,7 +106,6 @@ struct msm_gpu {
99 int irq; 106 int irq;
100 107
101 struct msm_gem_address_space *aspace; 108 struct msm_gem_address_space *aspace;
102 int id;
103 109
104 /* Power Control: */ 110 /* Power Control: */
105 struct regulator *gpu_reg, *gpu_cx; 111 struct regulator *gpu_reg, *gpu_cx;
@@ -209,7 +215,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
209 215
210int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 216int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
211 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 217 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
212 const char *name, const char *ioname, const char *irqname, int ringsz); 218 const char *name, struct msm_gpu_config *config);
219
213void msm_gpu_cleanup(struct msm_gpu *gpu); 220void msm_gpu_cleanup(struct msm_gpu *gpu);
214 221
215struct msm_gpu *adreno_load_gpu(struct drm_device *dev); 222struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index faa22c7c5423..a8f2ba5e5f07 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -72,6 +72,9 @@ struct msm_kms {
72 72
73 /* irq number to be passed on to drm_irq_install */ 73 /* irq number to be passed on to drm_irq_install */
74 int irq; 74 int irq;
75
76 /* mapper-id used to request GEM buffer mapped for scanout: */
77 struct msm_gem_address_space *aspace;
75}; 78};
76 79
77/** 80/**
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0e81faab2c50..0366b8092f97 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -268,7 +268,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
268 struct msm_gem_object *obj = submit->bos[idx].obj; 268 struct msm_gem_object *obj = submit->bos[idx].obj;
269 const char *buf; 269 const char *buf;
270 270
271 buf = msm_gem_get_vaddr_locked(&obj->base); 271 buf = msm_gem_get_vaddr(&obj->base);
272 if (IS_ERR(buf)) 272 if (IS_ERR(buf))
273 return; 273 return;
274 274
@@ -283,7 +283,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
283 (uint32_t[3]){ iova, size, iova >> 32 }, 12); 283 (uint32_t[3]){ iova, size, iova >> 32 }, 12);
284 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); 284 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
285 285
286 msm_gem_put_vaddr_locked(&obj->base); 286 msm_gem_put_vaddr(&obj->base);
287} 287}
288 288
289/* called under struct_mutex */ 289/* called under struct_mutex */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 67b34e069abf..791bca3c6a9c 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -40,7 +40,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
40 goto fail; 40 goto fail;
41 } 41 }
42 42
43 ring->start = msm_gem_get_vaddr_locked(ring->bo); 43 ring->start = msm_gem_get_vaddr(ring->bo);
44 if (IS_ERR(ring->start)) { 44 if (IS_ERR(ring->start)) {
45 ret = PTR_ERR(ring->start); 45 ret = PTR_ERR(ring->start);
46 goto fail; 46 goto fail;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480ff9d22..3178ba0c537c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
3393 rdev->pdev->subsystem_vendor == 0x103c && 3393 rdev->pdev->subsystem_vendor == 0x103c &&
3394 rdev->pdev->subsystem_device == 0x280a) 3394 rdev->pdev->subsystem_device == 0x280a)
3395 return; 3395 return;
3396 /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
3397 * - it hangs on resume inside the dynclk 1 table.
3398 */
3399 if (rdev->family == CHIP_RS400 &&
3400 rdev->pdev->subsystem_vendor == 0x1179 &&
3401 rdev->pdev->subsystem_device == 0xff31)
3402 return;
3396 3403
3397 /* DYN CLK 1 */ 3404 /* DYN CLK 1 */
3398 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3405 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf42783d4b..0a6444d72000 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381 136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
137 */ 137 */
138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
140 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
141 */
142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* macbook pro 8.2 */ 143 /* macbook pro 8.2 */
140 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, 144 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
141 { 0, 0, 0, 0, 0 }, 145 { 0, 0, 0, 0, 0 },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8ca1e8ce0af2..4f9a3938189a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -319,6 +319,9 @@
319#define USB_VENDOR_ID_DELCOM 0x0fc5 319#define USB_VENDOR_ID_DELCOM 0x0fc5
320#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 320#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
321 321
322#define USB_VENDOR_ID_DELL 0x413c
323#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
324
322#define USB_VENDOR_ID_DELORME 0x1163 325#define USB_VENDOR_ID_DELORME 0x1163
323#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 326#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
324#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 327#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 1d6c997b3001..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
349 349
350 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 350 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
351 magicmouse_emit_buttons(msc, clicks & 3); 351 magicmouse_emit_buttons(msc, clicks & 3);
352 input_mt_report_pointer_emulation(input, true);
353 input_report_rel(input, REL_X, x); 352 input_report_rel(input, REL_X, x);
354 input_report_rel(input, REL_Y, y); 353 input_report_rel(input, REL_Y, y);
355 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 354 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
389 __clear_bit(BTN_RIGHT, input->keybit); 388 __clear_bit(BTN_RIGHT, input->keybit);
390 __clear_bit(BTN_MIDDLE, input->keybit); 389 __clear_bit(BTN_MIDDLE, input->keybit);
391 __set_bit(BTN_MOUSE, input->keybit); 390 __set_bit(BTN_MOUSE, input->keybit);
391 __set_bit(BTN_TOOL_FINGER, input->keybit);
392 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
393 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
394 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
395 __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
396 __set_bit(BTN_TOUCH, input->keybit);
397 __set_bit(INPUT_PROP_POINTER, input->propbit);
392 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); 398 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
393 } 399 }
394 400
395 __set_bit(BTN_TOOL_FINGER, input->keybit);
396 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
397 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
398 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
399 __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
400 __set_bit(BTN_TOUCH, input->keybit);
401 __set_bit(INPUT_PROP_POINTER, input->propbit);
402 401
403 __set_bit(EV_ABS, input->evbit); 402 __set_bit(EV_ABS, input->evbit);
404 403
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6316498b7812..a88e7c7bea0a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
88 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
89 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, 91 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 95ed17183e73..54a47b40546f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
734 * the first read operation, otherwise the first read cost 734 * the first read operation, otherwise the first read cost
735 * one extra clock cycle. 735 * one extra clock cycle.
736 */ 736 */
737 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 737 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
738 temp |= I2CR_MTX; 738 temp |= I2CR_MTX;
739 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 739 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
740 } 740 }
741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); 741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
742 742
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
857 * the first read operation, otherwise the first read cost 857 * the first read operation, otherwise the first read cost
858 * one extra clock cycle. 858 * one extra clock cycle.
859 */ 859 */
860 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 860 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
861 temp |= I2CR_MTX; 861 temp |= I2CR_MTX;
862 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 862 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
863 } 863 }
864 } else if (i == (msgs->len - 2)) { 864 } else if (i == (msgs->len - 2)) {
865 dev_dbg(&i2c_imx->adapter.dev, 865 dev_dbg(&i2c_imx->adapter.dev,
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e37d37273182..f600f3a7a3c6 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
248 248
249 if (!btns_desc) { 249 if (!btns_desc) {
250 dev_err(dev, "ACPI Button Descriptors not found\n"); 250 dev_err(dev, "ACPI Button Descriptors not found\n");
251 return ERR_PTR(-ENODEV); 251 button_info = ERR_PTR(-ENODEV);
252 goto out;
252 } 253 }
253 254
254 /* The first package describes the collection */ 255 /* The first package describes the collection */
@@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
264 } 265 }
265 if (collection_uid == -1) { 266 if (collection_uid == -1) {
266 dev_err(dev, "Invalid Button Collection Descriptor\n"); 267 dev_err(dev, "Invalid Button Collection Descriptor\n");
267 return ERR_PTR(-ENODEV); 268 button_info = ERR_PTR(-ENODEV);
269 goto out;
268 } 270 }
269 271
270 /* There are package.count - 1 buttons + 1 terminating empty entry */ 272 /* There are package.count - 1 buttons + 1 terminating empty entry */
271 button_info = devm_kcalloc(dev, btns_desc->package.count, 273 button_info = devm_kcalloc(dev, btns_desc->package.count,
272 sizeof(*button_info), GFP_KERNEL); 274 sizeof(*button_info), GFP_KERNEL);
273 if (!button_info) 275 if (!button_info) {
274 return ERR_PTR(-ENOMEM); 276 button_info = ERR_PTR(-ENOMEM);
277 goto out;
278 }
275 279
276 /* Parse the button descriptors */ 280 /* Parse the button descriptors */
277 for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) { 281 for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) {
278 if (soc_button_parse_btn_desc(dev, 282 if (soc_button_parse_btn_desc(dev,
279 &btns_desc->package.elements[i], 283 &btns_desc->package.elements[i],
280 collection_uid, 284 collection_uid,
281 &button_info[btn])) 285 &button_info[btn])) {
282 return ERR_PTR(-ENODEV); 286 button_info = ERR_PTR(-ENODEV);
287 goto out;
288 }
283 } 289 }
284 290
291out:
292 kfree(buf.pointer);
285 return button_info; 293 return button_info;
286} 294}
287 295
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index dea63e2db3e6..f5206e2c767e 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -31,9 +31,6 @@
31#define F54_GET_REPORT 1 31#define F54_GET_REPORT 1
32#define F54_FORCE_CAL 2 32#define F54_FORCE_CAL 2
33 33
34/* Fixed sizes of reports */
35#define F54_QUERY_LEN 27
36
37/* F54 capabilities */ 34/* F54 capabilities */
38#define F54_CAP_BASELINE (1 << 2) 35#define F54_CAP_BASELINE (1 << 2)
39#define F54_CAP_IMAGE8 (1 << 3) 36#define F54_CAP_IMAGE8 (1 << 3)
@@ -95,7 +92,6 @@ struct rmi_f54_reports {
95struct f54_data { 92struct f54_data {
96 struct rmi_function *fn; 93 struct rmi_function *fn;
97 94
98 u8 qry[F54_QUERY_LEN];
99 u8 num_rx_electrodes; 95 u8 num_rx_electrodes;
100 u8 num_tx_electrodes; 96 u8 num_tx_electrodes;
101 u8 capabilities; 97 u8 capabilities;
@@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn)
632{ 628{
633 int error; 629 int error;
634 struct f54_data *f54; 630 struct f54_data *f54;
631 u8 buf[6];
635 632
636 f54 = dev_get_drvdata(&fn->dev); 633 f54 = dev_get_drvdata(&fn->dev);
637 634
638 error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, 635 error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
639 &f54->qry, sizeof(f54->qry)); 636 buf, sizeof(buf));
640 if (error) { 637 if (error) {
641 dev_err(&fn->dev, "%s: Failed to query F54 properties\n", 638 dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
642 __func__); 639 __func__);
643 return error; 640 return error;
644 } 641 }
645 642
646 f54->num_rx_electrodes = f54->qry[0]; 643 f54->num_rx_electrodes = buf[0];
647 f54->num_tx_electrodes = f54->qry[1]; 644 f54->num_tx_electrodes = buf[1];
648 f54->capabilities = f54->qry[2]; 645 f54->capabilities = buf[2];
649 f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8); 646 f54->clock_rate = buf[3] | (buf[4] << 8);
650 f54->family = f54->qry[5]; 647 f54->family = buf[5];
651 648
652 rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n", 649 rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
653 f54->num_rx_electrodes); 650 f54->num_rx_electrodes);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 09720d950686..f932a83b4990 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
723 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), 723 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
724 }, 724 },
725 }, 725 },
726 {
727 /* Fujitsu UH554 laptop */
728 .matches = {
729 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
730 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
731 },
732 },
726 { } 733 { }
727}; 734};
728 735
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index eb7fbe159963..929f8558bf1c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
140} 140}
141 141
142#ifdef CONFIG_CLKSRC_MIPS_GIC 142#ifdef CONFIG_CLKSRC_MIPS_GIC
143u64 gic_read_count(void) 143u64 notrace gic_read_count(void)
144{ 144{
145 unsigned int hi, hi2, lo; 145 unsigned int hi, hi2, lo;
146 146
@@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void)
167 return bits; 167 return bits;
168} 168}
169 169
170void gic_write_compare(u64 cnt) 170void notrace gic_write_compare(u64 cnt)
171{ 171{
172 if (mips_cm_is64) { 172 if (mips_cm_is64) {
173 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); 173 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt)
179 } 179 }
180} 180}
181 181
182void gic_write_cpu_compare(u64 cnt, int cpu) 182void notrace gic_write_cpu_compare(u64 cnt, int cpu)
183{ 183{
184 unsigned long flags; 184 unsigned long flags;
185 185
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7910bfe50da4..93b181088168 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
1105static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1105static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1106{ 1106{
1107 struct bio *bio; 1107 struct bio *bio;
1108 spin_lock_irq(&ic->endio_wait.lock); 1108 unsigned long flags;
1109
1110 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1109 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1111 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1110 bio_list_add(&ic->flush_bio_list, bio); 1112 bio_list_add(&ic->flush_bio_list, bio);
1111 spin_unlock_irq(&ic->endio_wait.lock); 1113 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1114
1112 queue_work(ic->commit_wq, &ic->commit_work); 1115 queue_work(ic->commit_wq, &ic->commit_work);
1113} 1116}
1114 1117
@@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3040 ti->error = "The device is too small"; 3043 ti->error = "The device is too small";
3041 goto bad; 3044 goto bad;
3042 } 3045 }
3046 if (ti->len > ic->provided_data_sectors) {
3047 r = -EINVAL;
3048 ti->error = "Not enough provided sectors for requested mapping size";
3049 goto bad;
3050 }
3043 3051
3044 if (!buffer_sectors) 3052 if (!buffer_sectors)
3045 buffer_sectors = 1; 3053 buffer_sectors = 1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3702e502466d..8d5ca30f6551 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
317 else if (op == REQ_OP_WRITE_SAME) 317 else if (op == REQ_OP_WRITE_SAME)
318 special_cmd_max_sectors = q->limits.max_write_same_sectors; 318 special_cmd_max_sectors = q->limits.max_write_same_sectors;
319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || 319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
320 op == REQ_OP_WRITE_SAME) && 320 op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
321 special_cmd_max_sectors == 0) { 321 atomic_inc(&io->count);
322 dec_count(io, region, -EOPNOTSUPP); 322 dec_count(io, region, -EOPNOTSUPP);
323 return; 323 return;
324 } 324 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index e61c45047c25..4da8858856fb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
145 145
146struct dm_raid1_bio_record { 146struct dm_raid1_bio_record {
147 struct mirror *m; 147 struct mirror *m;
148 /* if details->bi_bdev == NULL, details were not saved */
148 struct dm_bio_details details; 149 struct dm_bio_details details;
149 region_t write_region; 150 region_t write_region;
150}; 151};
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1198 struct dm_raid1_bio_record *bio_record = 1199 struct dm_raid1_bio_record *bio_record =
1199 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1200 1201
1202 bio_record->details.bi_bdev = NULL;
1203
1201 if (rw == WRITE) { 1204 if (rw == WRITE) {
1202 /* Save region for mirror_end_io() handler */ 1205 /* Save region for mirror_end_io() handler */
1203 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1206 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1256 } 1259 }
1257 1260
1258 if (error == -EOPNOTSUPP) 1261 if (error == -EOPNOTSUPP)
1259 return error; 1262 goto out;
1260 1263
1261 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) 1264 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
1262 return error; 1265 goto out;
1263 1266
1264 if (unlikely(error)) { 1267 if (unlikely(error)) {
1268 if (!bio_record->details.bi_bdev) {
1269 /*
1270 * There wasn't enough memory to record necessary
1271 * information for a retry or there was no other
1272 * mirror in-sync.
1273 */
1274 DMERR_LIMIT("Mirror read failed.");
1275 return -EIO;
1276 }
1277
1265 m = bio_record->m; 1278 m = bio_record->m;
1266 1279
1267 DMERR("Mirror read failed from %s. Trying alternative device.", 1280 DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1277 bd = &bio_record->details; 1290 bd = &bio_record->details;
1278 1291
1279 dm_bio_restore(bd, bio); 1292 dm_bio_restore(bd, bio);
1293 bio_record->details.bi_bdev = NULL;
1280 bio->bi_error = 0; 1294 bio->bi_error = 0;
1281 1295
1282 queue_bio(ms, bio, rw); 1296 queue_bio(ms, bio, rw);
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1285 DMERR("All replicated volumes dead, failing I/O"); 1299 DMERR("All replicated volumes dead, failing I/O");
1286 } 1300 }
1287 1301
1302out:
1303 bio_record->details.bi_bdev = NULL;
1304
1288 return error; 1305 return error;
1289} 1306}
1290 1307
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 75488e65cd96..8d46e3ad9529 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
245 int ret; 245 int ret;
246 246
247 ret = regmap_read_poll_timeout(arizona->regmap, 247 ret = regmap_read_poll_timeout(arizona->regmap,
248 ARIZONA_INTERRUPT_RAW_STATUS_5, val, 248 reg, val, ((val & mask) == target),
249 ((val & mask) == target),
250 ARIZONA_REG_POLL_DELAY_US, 249 ARIZONA_REG_POLL_DELAY_US,
251 timeout_ms * 1000); 250 timeout_ms * 1000);
252 if (ret) 251 if (ret)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index ea1bfcf1870a..53309f659951 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
2171{ 2171{
2172 int err; 2172 int err;
2173 2173
2174 mutex_lock(&uld_mutex);
2174 err = setup_sge_queues(adap); 2175 err = setup_sge_queues(adap);
2175 if (err) 2176 if (err)
2176 goto out; 2177 goto rel_lock;
2177 err = setup_rss(adap); 2178 err = setup_rss(adap);
2178 if (err) 2179 if (err)
2179 goto freeq; 2180 goto freeq;
@@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap)
2197 goto irq_err; 2198 goto irq_err;
2198 } 2199 }
2199 2200
2200 mutex_lock(&uld_mutex);
2201 enable_rx(adap); 2201 enable_rx(adap);
2202 t4_sge_start(adap); 2202 t4_sge_start(adap);
2203 t4_intr_enable(adap); 2203 t4_intr_enable(adap);
@@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap)
2210#endif 2210#endif
2211 /* Initialize hash mac addr list*/ 2211 /* Initialize hash mac addr list*/
2212 INIT_LIST_HEAD(&adap->mac_hlist); 2212 INIT_LIST_HEAD(&adap->mac_hlist);
2213 out:
2214 return err; 2213 return err;
2214
2215 irq_err: 2215 irq_err:
2216 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2216 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2217 freeq: 2217 freeq:
2218 t4_free_sge_resources(adap); 2218 t4_free_sge_resources(adap);
2219 goto out; 2219 rel_lock:
2220 mutex_unlock(&uld_mutex);
2221 return err;
2220} 2222}
2221 2223
2222static void cxgb_down(struct adapter *adapter) 2224static void cxgb_down(struct adapter *adapter)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 9a520e4f0df9..290ad0563320 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2648 2648
2649 /* device used for DMA mapping */ 2649 /* device used for DMA mapping */
2650 arch_setup_dma_ops(dev, 0, 0, NULL, false); 2650 set_dma_ops(dev, get_dma_ops(&pdev->dev));
2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); 2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2652 if (err) { 2652 if (err) {
2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); 2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 0b31f8502ada..6e67d22fd0d5 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
623 goto no_mem; 623 goto no_mem;
624 } 624 }
625 625
626 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
627
626 ret = platform_device_add_data(pdev, &data, sizeof(data)); 628 ret = platform_device_add_data(pdev, &data, sizeof(data));
627 if (ret) 629 if (ret)
628 goto err; 630 goto err;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index b8fab149690f..e95795b3c841 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
288 288
289 /* Force 1000M Link, Default is 0x0200 */ 289 /* Force 1000M Link, Default is 0x0200 */
290 phy_write(phy_dev, 7, 0x20C); 290 phy_write(phy_dev, 7, 0x20C);
291 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
292 291
293 /* Enable PHY loop-back */ 292 /* Powerup Fiber */
293 phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
294 val = phy_read(phy_dev, COPPER_CONTROL_REG);
295 val &= ~PHY_POWER_DOWN;
296 phy_write(phy_dev, COPPER_CONTROL_REG, val);
297
298 /* Enable Phy Loopback */
299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
294 val = phy_read(phy_dev, COPPER_CONTROL_REG); 300 val = phy_read(phy_dev, COPPER_CONTROL_REG);
295 val |= PHY_LOOP_BACK; 301 val |= PHY_LOOP_BACK;
296 val &= ~PHY_POWER_DOWN; 302 val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); 305 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
300 phy_write(phy_dev, 1, 0x400); 306 phy_write(phy_dev, 1, 0x400);
301 phy_write(phy_dev, 7, 0x200); 307 phy_write(phy_dev, 7, 0x200);
308
309 phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
310 val = phy_read(phy_dev, COPPER_CONTROL_REG);
311 val |= PHY_POWER_DOWN;
312 phy_write(phy_dev, COPPER_CONTROL_REG, val);
313
302 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 314 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
303 phy_write(phy_dev, 9, 0xF00); 315 phy_write(phy_dev, 9, 0xF00);
304 316
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8209affa75c3..16486dff1493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
1242 SOF_TIMESTAMPING_RX_HARDWARE | 1242 SOF_TIMESTAMPING_RX_HARDWARE |
1243 SOF_TIMESTAMPING_RAW_HARDWARE; 1243 SOF_TIMESTAMPING_RAW_HARDWARE;
1244 1244
1245 info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | 1245 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1246 (BIT(1) << HWTSTAMP_TX_ON); 1246 BIT(HWTSTAMP_TX_ON);
1247 1247
1248 info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | 1248 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1249 (BIT(1) << HWTSTAMP_FILTER_ALL); 1249 BIT(HWTSTAMP_FILTER_ALL);
1250 1250
1251 return 0; 1251 return 0;
1252} 1252}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 41cd22a223dc..277f4de30375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4241 return netdev; 4241 return netdev;
4242 4242
4243err_cleanup_nic: 4243err_cleanup_nic:
4244 profile->cleanup(priv); 4244 if (profile->cleanup)
4245 profile->cleanup(priv);
4245 free_netdev(netdev); 4246 free_netdev(netdev);
4246 4247
4247 return NULL; 4248 return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 79462c0368a0..46984a52a94b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); 791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
792 params->num_tc = 1; 792 params->num_tc = 1;
793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
794
795 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
794} 796}
795 797
796static void mlx5e_build_rep_netdev(struct net_device *netdev) 798static void mlx5e_build_rep_netdev(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ec63158ab643..9df9fc0d26f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, 895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, 896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
897 897
898 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
899 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, 898 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
900 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, 899 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
901 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, 900 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f991f669047e..a53e982a6863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
906 return 0; 906 return 0;
907} 907}
908 908
909int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 909static int mlx5_devlink_eswitch_check(struct devlink *devlink)
910{ 910{
911 struct mlx5_core_dev *dev; 911 struct mlx5_core_dev *dev = devlink_priv(devlink);
912 u16 cur_mlx5_mode, mlx5_mode = 0;
913 912
914 dev = devlink_priv(devlink); 913 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
914 return -EOPNOTSUPP;
915 915
916 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 916 if (!MLX5_CAP_GEN(dev, vport_group_manager))
917 return -EOPNOTSUPP; 917 return -EOPNOTSUPP;
918 918
919 cur_mlx5_mode = dev->priv.eswitch->mode; 919 if (dev->priv.eswitch->mode == SRIOV_NONE)
920
921 if (cur_mlx5_mode == SRIOV_NONE)
922 return -EOPNOTSUPP; 920 return -EOPNOTSUPP;
923 921
922 return 0;
923}
924
925int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 u16 cur_mlx5_mode, mlx5_mode = 0;
929 int err;
930
931 err = mlx5_devlink_eswitch_check(devlink);
932 if (err)
933 return err;
934
935 cur_mlx5_mode = dev->priv.eswitch->mode;
936
924 if (esw_mode_from_devlink(mode, &mlx5_mode)) 937 if (esw_mode_from_devlink(mode, &mlx5_mode))
925 return -EINVAL; 938 return -EINVAL;
926 939
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
937 950
938int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 951int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
939{ 952{
940 struct mlx5_core_dev *dev; 953 struct mlx5_core_dev *dev = devlink_priv(devlink);
941 954 int err;
942 dev = devlink_priv(devlink);
943
944 if (!MLX5_CAP_GEN(dev, vport_group_manager))
945 return -EOPNOTSUPP;
946 955
947 if (dev->priv.eswitch->mode == SRIOV_NONE) 956 err = mlx5_devlink_eswitch_check(devlink);
948 return -EOPNOTSUPP; 957 if (err)
958 return err;
949 959
950 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); 960 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
951} 961}
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
954{ 964{
955 struct mlx5_core_dev *dev = devlink_priv(devlink); 965 struct mlx5_core_dev *dev = devlink_priv(devlink);
956 struct mlx5_eswitch *esw = dev->priv.eswitch; 966 struct mlx5_eswitch *esw = dev->priv.eswitch;
957 int num_vports = esw->enabled_vports;
958 int err, vport; 967 int err, vport;
959 u8 mlx5_mode; 968 u8 mlx5_mode;
960 969
961 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 970 err = mlx5_devlink_eswitch_check(devlink);
962 return -EOPNOTSUPP; 971 if (err)
963 972 return err;
964 if (esw->mode == SRIOV_NONE)
965 return -EOPNOTSUPP;
966 973
967 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 974 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
968 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 975 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
985 if (err) 992 if (err)
986 goto out; 993 goto out;
987 994
988 for (vport = 1; vport < num_vports; vport++) { 995 for (vport = 1; vport < esw->enabled_vports; vport++) {
989 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
990 if (err) { 997 if (err) {
991 esw_warn(dev, "Failed to set min inline on vport %d\n", 998 esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1010{ 1017{
1011 struct mlx5_core_dev *dev = devlink_priv(devlink); 1018 struct mlx5_core_dev *dev = devlink_priv(devlink);
1012 struct mlx5_eswitch *esw = dev->priv.eswitch; 1019 struct mlx5_eswitch *esw = dev->priv.eswitch;
1020 int err;
1013 1021
1014 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1022 err = mlx5_devlink_eswitch_check(devlink);
1015 return -EOPNOTSUPP; 1023 if (err)
1016 1024 return err;
1017 if (esw->mode == SRIOV_NONE)
1018 return -EOPNOTSUPP;
1019 1025
1020 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 1026 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1021} 1027}
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1062 struct mlx5_eswitch *esw = dev->priv.eswitch; 1068 struct mlx5_eswitch *esw = dev->priv.eswitch;
1063 int err; 1069 int err;
1064 1070
1065 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1071 err = mlx5_devlink_eswitch_check(devlink);
1066 return -EOPNOTSUPP; 1072 if (err)
1067 1073 return err;
1068 if (esw->mode == SRIOV_NONE)
1069 return -EOPNOTSUPP;
1070 1074
1071 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 1075 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1072 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || 1076 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1105{ 1109{
1106 struct mlx5_core_dev *dev = devlink_priv(devlink); 1110 struct mlx5_core_dev *dev = devlink_priv(devlink);
1107 struct mlx5_eswitch *esw = dev->priv.eswitch; 1111 struct mlx5_eswitch *esw = dev->priv.eswitch;
1112 int err;
1108 1113
1109 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1114 err = mlx5_devlink_eswitch_check(devlink);
1110 return -EOPNOTSUPP; 1115 if (err)
1111 1116 return err;
1112 if (esw->mode == SRIOV_NONE)
1113 return -EOPNOTSUPP;
1114 1117
1115 *encap = esw->offloads.encap; 1118 *encap = esw->offloads.encap;
1116 return 0; 1119 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4f577a5abf88..13be264587f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
175 }, 175 },
176}; 176};
177 177
178#define FW_INIT_TIMEOUT_MILI 2000 178#define FW_INIT_TIMEOUT_MILI 2000
179#define FW_INIT_WAIT_MS 2 179#define FW_INIT_WAIT_MS 2
180#define FW_PRE_INIT_TIMEOUT_MILI 10000
180 181
181static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 182static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
182{ 183{
@@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1013 */ 1014 */
1014 dev->state = MLX5_DEVICE_STATE_UP; 1015 dev->state = MLX5_DEVICE_STATE_UP;
1015 1016
1017 /* wait for firmware to accept initialization segments configurations
1018 */
1019 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
1020 if (err) {
1021 dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
1022 FW_PRE_INIT_TIMEOUT_MILI);
1023 goto out;
1024 }
1025
1016 err = mlx5_cmd_init(dev); 1026 err = mlx5_cmd_init(dev);
1017 if (err) { 1027 if (err) {
1018 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); 1028 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index b7e4345c990d..019cef1d3cf7 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -661,8 +661,6 @@ restore_filters:
661 up_write(&vf->efx->filter_sem); 661 up_write(&vf->efx->filter_sem);
662 mutex_unlock(&vf->efx->mac_lock); 662 mutex_unlock(&vf->efx->mac_lock);
663 663
664 up_write(&vf->efx->filter_sem);
665
666 rc2 = efx_net_open(vf->efx->net_dev); 664 rc2 = efx_net_open(vf->efx->net_dev);
667 if (rc2) 665 if (rc2)
668 goto reset_nic; 666 goto reset_nic;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d16d11bfc046..6e4cbc6ce0ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2831 2831
2832 tx_q->tx_skbuff_dma[first_entry].buf = des; 2832 tx_q->tx_skbuff_dma[first_entry].buf = des;
2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2834 tx_q->tx_skbuff[first_entry] = skb;
2835 2834
2836 first->des0 = cpu_to_le32(des); 2835 first->des0 = cpu_to_le32(des);
2837 2836
@@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2865 2864
2866 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2865 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2867 2866
2867 /* Only the last descriptor gets to point to the skb. */
2868 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2869
2870 /* We've used all descriptors we need for this skb, however,
2871 * advance cur_tx so that it references a fresh descriptor.
2872 * ndo_start_xmit will fill this descriptor the next time it's
2873 * called and stmmac_tx_clean may clean up to this descriptor.
2874 */
2868 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2875 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2869 2876
2870 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2877 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2998 3005
2999 first = desc; 3006 first = desc;
3000 3007
3001 tx_q->tx_skbuff[first_entry] = skb;
3002
3003 enh_desc = priv->plat->enh_desc; 3008 enh_desc = priv->plat->enh_desc;
3004 /* To program the descriptors according to the size of the frame */ 3009 /* To program the descriptors according to the size of the frame */
3005 if (enh_desc) 3010 if (enh_desc)
@@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3047 skb->len); 3052 skb->len);
3048 } 3053 }
3049 3054
3050 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3055 /* Only the last descriptor gets to point to the skb. */
3056 tx_q->tx_skbuff[entry] = skb;
3051 3057
3058 /* We've used all descriptors we need for this skb, however,
3059 * advance cur_tx so that it references a fresh descriptor.
3060 * ndo_start_xmit will fill this descriptor the next time it's
3061 * called and stmmac_tx_clean may clean up to this descriptor.
3062 */
3063 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3052 tx_q->cur_tx = entry; 3064 tx_q->cur_tx = entry;
3053 3065
3054 if (netif_msg_pktdata(priv)) { 3066 if (netif_msg_pktdata(priv)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e9906500..d231042f19d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@ struct brcmf_fw {
442 const char *nvram_name; 442 const char *nvram_name;
443 u16 domain_nr; 443 u16 domain_nr;
444 u16 bus_nr; 444 u16 bus_nr;
445 void (*done)(struct device *dev, const struct firmware *fw, 445 void (*done)(struct device *dev, int err, const struct firmware *fw,
446 void *nvram_image, u32 nvram_len); 446 void *nvram_image, u32 nvram_len);
447}; 447};
448 448
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) 477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
478 goto fail; 478 goto fail;
479 479
480 fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); 480 fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
481 kfree(fwctx); 481 kfree(fwctx);
482 return; 482 return;
483 483
484fail: 484fail:
485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
486 release_firmware(fwctx->code); 486 release_firmware(fwctx->code);
487 device_release_driver(fwctx->dev); 487 fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
488 kfree(fwctx); 488 kfree(fwctx);
489} 489}
490 490
491static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) 491static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
492{ 492{
493 struct brcmf_fw *fwctx = ctx; 493 struct brcmf_fw *fwctx = ctx;
494 int ret; 494 int ret = 0;
495 495
496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); 496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
497 if (!fw) 497 if (!fw) {
498 ret = -ENOENT;
498 goto fail; 499 goto fail;
499
500 /* only requested code so done here */
501 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
502 fwctx->done(fwctx->dev, fw, NULL, 0);
503 kfree(fwctx);
504 return;
505 } 500 }
501 /* only requested code so done here */
502 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
503 goto done;
504
506 fwctx->code = fw; 505 fwctx->code = fw;
507 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, 506 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
508 fwctx->dev, GFP_KERNEL, fwctx, 507 fwctx->dev, GFP_KERNEL, fwctx,
509 brcmf_fw_request_nvram_done); 508 brcmf_fw_request_nvram_done);
510 509
511 if (!ret) 510 /* pass NULL to nvram callback for bcm47xx fallback */
512 return; 511 if (ret)
513 512 brcmf_fw_request_nvram_done(NULL, fwctx);
514 brcmf_fw_request_nvram_done(NULL, fwctx);
515 return; 513 return;
516 514
517fail: 515fail:
518 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 516 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
519 device_release_driver(fwctx->dev); 517done:
518 fwctx->done(fwctx->dev, ret, fw, NULL, 0);
520 kfree(fwctx); 519 kfree(fwctx);
521} 520}
522 521
523int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 522int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
524 const char *code, const char *nvram, 523 const char *code, const char *nvram,
525 void (*fw_cb)(struct device *dev, 524 void (*fw_cb)(struct device *dev, int err,
526 const struct firmware *fw, 525 const struct firmware *fw,
527 void *nvram_image, u32 nvram_len), 526 void *nvram_image, u32 nvram_len),
528 u16 domain_nr, u16 bus_nr) 527 u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
555 554
556int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 555int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
557 const char *code, const char *nvram, 556 const char *code, const char *nvram,
558 void (*fw_cb)(struct device *dev, 557 void (*fw_cb)(struct device *dev, int err,
559 const struct firmware *fw, 558 const struct firmware *fw,
560 void *nvram_image, u32 nvram_len)) 559 void *nvram_image, u32 nvram_len))
561{ 560{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d52ae3..8fa4b7e1ab3d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
73 */ 73 */
74int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 74int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
75 const char *code, const char *nvram, 75 const char *code, const char *nvram,
76 void (*fw_cb)(struct device *dev, 76 void (*fw_cb)(struct device *dev, int err,
77 const struct firmware *fw, 77 const struct firmware *fw,
78 void *nvram_image, u32 nvram_len), 78 void *nvram_image, u32 nvram_len),
79 u16 domain_nr, u16 bus_nr); 79 u16 domain_nr, u16 bus_nr);
80int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 80int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
81 const char *code, const char *nvram, 81 const char *code, const char *nvram,
82 void (*fw_cb)(struct device *dev, 82 void (*fw_cb)(struct device *dev, int err,
83 const struct firmware *fw, 83 const struct firmware *fw,
84 void *nvram_image, u32 nvram_len)); 84 void *nvram_image, u32 nvram_len));
85 85
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 72373e59308e..f59642b2c935 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); 2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
2146 struct brcmf_fws_mac_descriptor *entry; 2146 struct brcmf_fws_mac_descriptor *entry;
2147 2147
2148 if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) 2148 if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
2149 return; 2149 return;
2150 2150
2151 entry = &fws->desc.iface[ifp->ifidx]; 2151 entry = &fws->desc.iface[ifp->ifidx];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f36b96dc6acd..f878706613e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1650 .write32 = brcmf_pcie_buscore_write32, 1650 .write32 = brcmf_pcie_buscore_write32,
1651}; 1651};
1652 1652
1653static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, 1653static void brcmf_pcie_setup(struct device *dev, int ret,
1654 const struct firmware *fw,
1654 void *nvram, u32 nvram_len) 1655 void *nvram, u32 nvram_len)
1655{ 1656{
1656 struct brcmf_bus *bus = dev_get_drvdata(dev); 1657 struct brcmf_bus *bus;
1657 struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; 1658 struct brcmf_pciedev *pcie_bus_dev;
1658 struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; 1659 struct brcmf_pciedev_info *devinfo;
1659 struct brcmf_commonring **flowrings; 1660 struct brcmf_commonring **flowrings;
1660 int ret;
1661 u32 i; 1661 u32 i;
1662 1662
1663 /* check firmware loading result */
1664 if (ret)
1665 goto fail;
1666
1667 bus = dev_get_drvdata(dev);
1668 pcie_bus_dev = bus->bus_priv.pcie;
1669 devinfo = pcie_bus_dev->devinfo;
1663 brcmf_pcie_attach(devinfo); 1670 brcmf_pcie_attach(devinfo);
1664 1671
1665 /* Some of the firmwares have the size of the memory of the device 1672 /* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index e03450059b06..5653d6dd38f6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3982 .get_memdump = brcmf_sdio_bus_get_memdump, 3982 .get_memdump = brcmf_sdio_bus_get_memdump,
3983}; 3983};
3984 3984
3985static void brcmf_sdio_firmware_callback(struct device *dev, 3985static void brcmf_sdio_firmware_callback(struct device *dev, int err,
3986 const struct firmware *code, 3986 const struct firmware *code,
3987 void *nvram, u32 nvram_len) 3987 void *nvram, u32 nvram_len)
3988{ 3988{
3989 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3989 struct brcmf_bus *bus_if;
3990 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3990 struct brcmf_sdio_dev *sdiodev;
3991 struct brcmf_sdio *bus = sdiodev->bus; 3991 struct brcmf_sdio *bus;
3992 int err = 0;
3993 u8 saveclk; 3992 u8 saveclk;
3994 3993
3995 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); 3994 brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
3995 bus_if = dev_get_drvdata(dev);
3996 sdiodev = bus_if->bus_priv.sdio;
3997 if (err)
3998 goto fail;
3996 3999
3997 if (!bus_if->drvr) 4000 if (!bus_if->drvr)
3998 return; 4001 return;
3999 4002
4003 bus = sdiodev->bus;
4004
4000 /* try to download image and nvram to the dongle */ 4005 /* try to download image and nvram to the dongle */
4001 bus->alp_only = true; 4006 bus->alp_only = true;
4002 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); 4007 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4088,7 @@ release:
4083fail: 4088fail:
4084 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4089 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4085 device_release_driver(dev); 4090 device_release_driver(dev);
4091 device_release_driver(&sdiodev->func[2]->dev);
4086} 4092}
4087 4093
4088struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4094struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e4d545f9edee..0eea48e73331 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1159,17 +1159,18 @@ fail:
1159 return ret; 1159 return ret;
1160} 1160}
1161 1161
1162static void brcmf_usb_probe_phase2(struct device *dev, 1162static void brcmf_usb_probe_phase2(struct device *dev, int ret,
1163 const struct firmware *fw, 1163 const struct firmware *fw,
1164 void *nvram, u32 nvlen) 1164 void *nvram, u32 nvlen)
1165{ 1165{
1166 struct brcmf_bus *bus = dev_get_drvdata(dev); 1166 struct brcmf_bus *bus = dev_get_drvdata(dev);
1167 struct brcmf_usbdev_info *devinfo; 1167 struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
1168 int ret; 1168
1169 if (ret)
1170 goto error;
1169 1171
1170 brcmf_dbg(USB, "Start fw downloading\n"); 1172 brcmf_dbg(USB, "Start fw downloading\n");
1171 1173
1172 devinfo = bus->bus_priv.usb->devinfo;
1173 ret = check_file(fw->data); 1174 ret = check_file(fw->data);
1174 if (ret < 0) { 1175 if (ret < 0) {
1175 brcmf_err("invalid firmware\n"); 1176 brcmf_err("invalid firmware\n");
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index c00238491673..7b3b6fd63d7d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = {
2878 .link_is_up = xeon_link_is_up, 2878 .link_is_up = xeon_link_is_up,
2879 .db_ioread = skx_db_ioread, 2879 .db_ioread = skx_db_ioread,
2880 .db_iowrite = skx_db_iowrite, 2880 .db_iowrite = skx_db_iowrite,
2881 .db_size = sizeof(u64), 2881 .db_size = sizeof(u32),
2882 .ntb_ctl = SKX_NTBCNTL_OFFSET, 2882 .ntb_ctl = SKX_NTBCNTL_OFFSET,
2883 .mw_bar = {2, 4}, 2883 .mw_bar = {2, 4},
2884}; 2884};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 02ca45fdd892..10e5bf460139 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -177,14 +177,12 @@ struct ntb_transport_qp {
177 u64 rx_err_ver; 177 u64 rx_err_ver;
178 u64 rx_memcpy; 178 u64 rx_memcpy;
179 u64 rx_async; 179 u64 rx_async;
180 u64 dma_rx_prep_err;
181 u64 tx_bytes; 180 u64 tx_bytes;
182 u64 tx_pkts; 181 u64 tx_pkts;
183 u64 tx_ring_full; 182 u64 tx_ring_full;
184 u64 tx_err_no_buf; 183 u64 tx_err_no_buf;
185 u64 tx_memcpy; 184 u64 tx_memcpy;
186 u64 tx_async; 185 u64 tx_async;
187 u64 dma_tx_prep_err;
188}; 186};
189 187
190struct ntb_transport_mw { 188struct ntb_transport_mw {
@@ -254,8 +252,6 @@ enum {
254#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 252#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
255#define NTB_QP_DEF_NUM_ENTRIES 100 253#define NTB_QP_DEF_NUM_ENTRIES 100
256#define NTB_LINK_DOWN_TIMEOUT 10 254#define NTB_LINK_DOWN_TIMEOUT 10
257#define DMA_RETRIES 20
258#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
259 255
260static void ntb_transport_rxc_db(unsigned long data); 256static void ntb_transport_rxc_db(unsigned long data);
261static const struct ntb_ctx_ops ntb_transport_ops; 257static const struct ntb_ctx_ops ntb_transport_ops;
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
516 out_offset += snprintf(buf + out_offset, out_count - out_offset, 512 out_offset += snprintf(buf + out_offset, out_count - out_offset,
517 "free tx - \t%u\n", 513 "free tx - \t%u\n",
518 ntb_transport_tx_free_entry(qp)); 514 ntb_transport_tx_free_entry(qp));
519 out_offset += snprintf(buf + out_offset, out_count - out_offset,
520 "DMA tx prep err - \t%llu\n",
521 qp->dma_tx_prep_err);
522 out_offset += snprintf(buf + out_offset, out_count - out_offset,
523 "DMA rx prep err - \t%llu\n",
524 qp->dma_rx_prep_err);
525 515
526 out_offset += snprintf(buf + out_offset, out_count - out_offset, 516 out_offset += snprintf(buf + out_offset, out_count - out_offset,
527 "\n"); 517 "\n");
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
623 if (!mw->virt_addr) 613 if (!mw->virt_addr)
624 return -ENOMEM; 614 return -ENOMEM;
625 615
626 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 616 if (mw_num < qp_count % mw_count)
627 num_qps_mw = qp_count / mw_count + 1; 617 num_qps_mw = qp_count / mw_count + 1;
628 else 618 else
629 num_qps_mw = qp_count / mw_count; 619 num_qps_mw = qp_count / mw_count;
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
768 qp->tx_err_no_buf = 0; 758 qp->tx_err_no_buf = 0;
769 qp->tx_memcpy = 0; 759 qp->tx_memcpy = 0;
770 qp->tx_async = 0; 760 qp->tx_async = 0;
771 qp->dma_tx_prep_err = 0;
772 qp->dma_rx_prep_err = 0;
773} 761}
774 762
775static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 763static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1000 qp->event_handler = NULL; 988 qp->event_handler = NULL;
1001 ntb_qp_link_down_reset(qp); 989 ntb_qp_link_down_reset(qp);
1002 990
1003 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 991 if (mw_num < qp_count % mw_count)
1004 num_qps_mw = qp_count / mw_count + 1; 992 num_qps_mw = qp_count / mw_count + 1;
1005 else 993 else
1006 num_qps_mw = qp_count / mw_count; 994 num_qps_mw = qp_count / mw_count;
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1128 qp_count = ilog2(qp_bitmap); 1116 qp_count = ilog2(qp_bitmap);
1129 if (max_num_clients && max_num_clients < qp_count) 1117 if (max_num_clients && max_num_clients < qp_count)
1130 qp_count = max_num_clients; 1118 qp_count = max_num_clients;
1131 else if (mw_count < qp_count) 1119 else if (nt->mw_count < qp_count)
1132 qp_count = mw_count; 1120 qp_count = nt->mw_count;
1133 1121
1134 qp_bitmap &= BIT_ULL(qp_count) - 1; 1122 qp_bitmap &= BIT_ULL(qp_count) - 1;
1135 1123
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1317 struct dmaengine_unmap_data *unmap; 1305 struct dmaengine_unmap_data *unmap;
1318 dma_cookie_t cookie; 1306 dma_cookie_t cookie;
1319 void *buf = entry->buf; 1307 void *buf = entry->buf;
1320 int retries = 0;
1321 1308
1322 len = entry->len; 1309 len = entry->len;
1323 device = chan->device; 1310 device = chan->device;
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1346 1333
1347 unmap->from_cnt = 1; 1334 unmap->from_cnt = 1;
1348 1335
1349 for (retries = 0; retries < DMA_RETRIES; retries++) { 1336 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1350 txd = device->device_prep_dma_memcpy(chan, 1337 unmap->addr[0], len,
1351 unmap->addr[1], 1338 DMA_PREP_INTERRUPT);
1352 unmap->addr[0], len, 1339 if (!txd)
1353 DMA_PREP_INTERRUPT);
1354 if (txd)
1355 break;
1356
1357 set_current_state(TASK_INTERRUPTIBLE);
1358 schedule_timeout(DMA_OUT_RESOURCE_TO);
1359 }
1360
1361 if (!txd) {
1362 qp->dma_rx_prep_err++;
1363 goto err_get_unmap; 1340 goto err_get_unmap;
1364 }
1365 1341
1366 txd->callback_result = ntb_rx_copy_callback; 1342 txd->callback_result = ntb_rx_copy_callback;
1367 txd->callback_param = entry; 1343 txd->callback_param = entry;
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1606 struct dmaengine_unmap_data *unmap; 1582 struct dmaengine_unmap_data *unmap;
1607 dma_addr_t dest; 1583 dma_addr_t dest;
1608 dma_cookie_t cookie; 1584 dma_cookie_t cookie;
1609 int retries = 0;
1610 1585
1611 device = chan->device; 1586 device = chan->device;
1612 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; 1587 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1628 1603
1629 unmap->to_cnt = 1; 1604 unmap->to_cnt = 1;
1630 1605
1631 for (retries = 0; retries < DMA_RETRIES; retries++) { 1606 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1632 txd = device->device_prep_dma_memcpy(chan, dest, 1607 DMA_PREP_INTERRUPT);
1633 unmap->addr[0], len, 1608 if (!txd)
1634 DMA_PREP_INTERRUPT);
1635 if (txd)
1636 break;
1637
1638 set_current_state(TASK_INTERRUPTIBLE);
1639 schedule_timeout(DMA_OUT_RESOURCE_TO);
1640 }
1641
1642 if (!txd) {
1643 qp->dma_tx_prep_err++;
1644 goto err_get_unmap; 1609 goto err_get_unmap;
1645 }
1646 1610
1647 txd->callback_result = ntb_tx_copy_callback; 1611 txd->callback_result = ntb_tx_copy_callback;
1648 txd->callback_param = entry; 1612 txd->callback_param = entry;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 434e1d474f33..5cab2831ce99 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
90 90
91static unsigned int seg_order = 19; /* 512K */ 91static unsigned int seg_order = 19; /* 512K */
92module_param(seg_order, uint, 0644); 92module_param(seg_order, uint, 0644);
93MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); 93MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
94 94
95static unsigned int run_order = 32; /* 4G */ 95static unsigned int run_order = 32; /* 4G */
96module_param(run_order, uint, 0644); 96module_param(run_order, uint, 0644);
97MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); 97MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
98 98
99static bool use_dma; /* default to 0 */ 99static bool use_dma; /* default to 0 */
100module_param(use_dma, bool, 0644); 100module_param(use_dma, bool, 0644);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1482d132fbb8..e432ec887479 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
495 .flags = IRQCHIP_SKIP_SET_WAKE, 495 .flags = IRQCHIP_SKIP_SET_WAKE,
496}; 496};
497 497
498static void amd_gpio_irq_handler(struct irq_desc *desc) 498#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
499
500static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
499{ 501{
500 u32 i; 502 struct amd_gpio *gpio_dev = dev_id;
501 u32 off; 503 struct gpio_chip *gc = &gpio_dev->gc;
502 u32 reg; 504 irqreturn_t ret = IRQ_NONE;
503 u32 pin_reg; 505 unsigned int i, irqnr;
504 u64 reg64;
505 int handled = 0;
506 unsigned int irq;
507 unsigned long flags; 506 unsigned long flags;
508 struct irq_chip *chip = irq_desc_get_chip(desc); 507 u32 *regs, regval;
509 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 508 u64 status, mask;
510 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
511 509
512 chained_irq_enter(chip, desc); 510 /* Read the wake status */
513 /*enable GPIO interrupt again*/
514 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 511 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
515 reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 512 status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
516 reg64 = reg; 513 status <<= 32;
517 reg64 = reg64 << 32; 514 status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
518
519 reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
520 reg64 |= reg;
521 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 515 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
522 516
523 /* 517 /* Bit 0-45 contain the relevant status bits */
524 * first 46 bits indicates interrupt status. 518 status &= (1ULL << 46) - 1;
525 * one bit represents four interrupt sources. 519 regs = gpio_dev->base;
526 */ 520 for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
527 for (off = 0; off < 46 ; off++) { 521 if (!(status & mask))
528 if (reg64 & BIT(off)) { 522 continue;
529 for (i = 0; i < 4; i++) { 523 status &= ~mask;
530 pin_reg = readl(gpio_dev->base + 524
531 (off * 4 + i) * 4); 525 /* Each status bit covers four pins */
532 if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || 526 for (i = 0; i < 4; i++) {
533 (pin_reg & BIT(WAKE_STS_OFF))) { 527 regval = readl(regs + i);
534 irq = irq_find_mapping(gc->irqdomain, 528 if (!(regval & PIN_IRQ_PENDING))
535 off * 4 + i); 529 continue;
536 generic_handle_irq(irq); 530 irq = irq_find_mapping(gc->irqdomain, irqnr + i);
537 writel(pin_reg, 531 generic_handle_irq(irq);
538 gpio_dev->base 532 /* Clear interrupt */
539 + (off * 4 + i) * 4); 533 writel(regval, regs + i);
540 handled++; 534 ret = IRQ_HANDLED;
541 }
542 }
543 } 535 }
544 } 536 }
545 537
546 if (handled == 0) 538 /* Signal EOI to the GPIO unit */
547 handle_bad_irq(desc);
548
549 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 539 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
550 reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 540 regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
551 reg |= EOI_MASK; 541 regval |= EOI_MASK;
552 writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); 542 writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
553 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 543 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
554 544
555 chained_irq_exit(chip, desc); 545 return ret;
556} 546}
557 547
558static int amd_get_groups_count(struct pinctrl_dev *pctldev) 548static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
821 goto out2; 811 goto out2;
822 } 812 }
823 813
824 gpiochip_set_chained_irqchip(&gpio_dev->gc, 814 ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
825 &amd_gpio_irqchip, 815 KBUILD_MODNAME, gpio_dev);
826 irq_base, 816 if (ret)
827 amd_gpio_irq_handler); 817 goto out2;
818
828 platform_set_drvdata(pdev, gpio_dev); 819 platform_set_drvdata(pdev, gpio_dev);
829 820
830 dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); 821 dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index d3c5f5dfbbd7..222b6685b09f 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
798 break; 798 break;
799 case PIN_CONFIG_OUTPUT: 799 case PIN_CONFIG_OUTPUT:
800 __stm32_gpio_set(bank, offset, arg); 800 __stm32_gpio_set(bank, offset, arg);
801 ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); 801 ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
802 break; 802 break;
803 default: 803 default:
804 ret = -EINVAL; 804 ret = -EINVAL;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 8bc7ee1a8ca8..507512cc478b 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
870 QEDI_ERR(&qedi->dbg_ctx, 870 QEDI_ERR(&qedi->dbg_ctx,
871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", 871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); 872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
873 WARN_ON(1);
874 } 873 }
875} 874}
876 875
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 09a294634bc7..879d3b7462f9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1499,11 +1499,9 @@ err_idx:
1499 1499
1500void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) 1500void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
1501{ 1501{
1502 if (!test_and_clear_bit(idx, qedi->task_idx_map)) { 1502 if (!test_and_clear_bit(idx, qedi->task_idx_map))
1503 QEDI_ERR(&qedi->dbg_ctx, 1503 QEDI_ERR(&qedi->dbg_ctx,
1504 "FW task context, already cleared, tid=0x%x\n", idx); 1504 "FW task context, already cleared, tid=0x%x\n", idx);
1505 WARN_ON(1);
1506 }
1507} 1505}
1508 1506
1509void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, 1507void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0d8f81591bed..3fdca2cdd8da 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
1279 */ 1279 */
1280 if (dump_payload) 1280 if (dump_payload)
1281 goto after_immediate_data; 1281 goto after_immediate_data;
1282 /*
1283 * Check for underflow case where both EDTL and immediate data payload
1284 * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
1285 * already been set in target_cmd_size_check() as se_cmd->data_length.
1286 *
1287 * For this special case, fail the command and dump the immediate data
1288 * payload.
1289 */
1290 if (cmd->first_burst_len > cmd->se_cmd.data_length) {
1291 cmd->sense_reason = TCM_INVALID_CDB_FIELD;
1292 goto after_immediate_data;
1293 }
1282 1294
1283 immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1295 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1284 cmd->first_burst_len); 1296 cmd->first_burst_len);
@@ -4423,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession(
4423 * always sleep waiting for RX/TX thread shutdown to complete 4435 * always sleep waiting for RX/TX thread shutdown to complete
4424 * within iscsit_close_connection(). 4436 * within iscsit_close_connection().
4425 */ 4437 */
4426 if (!conn->conn_transport->rdma_shutdown) 4438 if (!conn->conn_transport->rdma_shutdown) {
4427 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4439 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4440 if (!sleep)
4441 return;
4442 }
4428 4443
4429 atomic_set(&conn->conn_logout_remove, 0); 4444 atomic_set(&conn->conn_logout_remove, 0);
4430 complete(&conn->conn_logout_comp); 4445 complete(&conn->conn_logout_comp);
@@ -4440,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid(
4440{ 4455{
4441 int sleep = 1; 4456 int sleep = 1;
4442 4457
4443 if (!conn->conn_transport->rdma_shutdown) 4458 if (!conn->conn_transport->rdma_shutdown) {
4444 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4459 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4460 if (!sleep)
4461 return;
4462 }
4445 4463
4446 atomic_set(&conn->conn_logout_remove, 0); 4464 atomic_set(&conn->conn_logout_remove, 0);
4447 complete(&conn->conn_logout_comp); 4465 complete(&conn->conn_logout_comp);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 9ab7090f7c83..0912de7c0cf8 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -136,7 +136,7 @@ int init_se_kmem_caches(void);
136void release_se_kmem_caches(void); 136void release_se_kmem_caches(void);
137u32 scsi_get_new_index(scsi_index_t); 137u32 scsi_get_new_index(scsi_index_t);
138void transport_subsystem_check_init(void); 138void transport_subsystem_check_init(void);
139void transport_cmd_finish_abort(struct se_cmd *, int); 139int transport_cmd_finish_abort(struct se_cmd *, int);
140unsigned char *transport_dump_cmd_direction(struct se_cmd *); 140unsigned char *transport_dump_cmd_direction(struct se_cmd *);
141void transport_dump_dev_state(struct se_device *, char *, int *); 141void transport_dump_dev_state(struct se_device *, char *, int *);
142void transport_dump_dev_info(struct se_device *, struct se_lun *, 142void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index dce1e1b47316..13f47bf4d16b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
75 kfree(tmr); 75 kfree(tmr);
76} 76}
77 77
78static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) 78static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 bool remove = true, send_tas; 81 bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
91 transport_send_task_abort(cmd); 91 transport_send_task_abort(cmd);
92 } 92 }
93 93
94 transport_cmd_finish_abort(cmd, remove); 94 return transport_cmd_finish_abort(cmd, remove);
95} 95}
96 96
97static int target_check_cdb_and_preempt(struct list_head *list, 97static int target_check_cdb_and_preempt(struct list_head *list,
@@ -184,8 +184,8 @@ void core_tmr_abort_task(
184 cancel_work_sync(&se_cmd->work); 184 cancel_work_sync(&se_cmd->work);
185 transport_wait_for_tasks(se_cmd); 185 transport_wait_for_tasks(se_cmd);
186 186
187 transport_cmd_finish_abort(se_cmd, true); 187 if (!transport_cmd_finish_abort(se_cmd, true))
188 target_put_sess_cmd(se_cmd); 188 target_put_sess_cmd(se_cmd);
189 189
190 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 190 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
191 " ref_tag: %llu\n", ref_tag); 191 " ref_tag: %llu\n", ref_tag);
@@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list(
281 cancel_work_sync(&cmd->work); 281 cancel_work_sync(&cmd->work);
282 transport_wait_for_tasks(cmd); 282 transport_wait_for_tasks(cmd);
283 283
284 transport_cmd_finish_abort(cmd, 1); 284 if (!transport_cmd_finish_abort(cmd, 1))
285 target_put_sess_cmd(cmd); 285 target_put_sess_cmd(cmd);
286 } 286 }
287} 287}
288 288
@@ -380,8 +380,8 @@ static void core_tmr_drain_state_list(
380 cancel_work_sync(&cmd->work); 380 cancel_work_sync(&cmd->work);
381 transport_wait_for_tasks(cmd); 381 transport_wait_for_tasks(cmd);
382 382
383 core_tmr_handle_tas_abort(cmd, tas); 383 if (!core_tmr_handle_tas_abort(cmd, tas))
384 target_put_sess_cmd(cmd); 384 target_put_sess_cmd(cmd);
385 } 385 }
386} 386}
387 387
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6025935036c9..f1b3a46bdcaf 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
651 percpu_ref_put(&lun->lun_ref); 651 percpu_ref_put(&lun->lun_ref);
652} 652}
653 653
654void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 654int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
655{ 655{
656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
657 int ret = 0;
657 658
658 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 659 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
659 transport_lun_remove_cmd(cmd); 660 transport_lun_remove_cmd(cmd);
@@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
665 cmd->se_tfo->aborted_task(cmd); 666 cmd->se_tfo->aborted_task(cmd);
666 667
667 if (transport_cmd_check_stop_to_fabric(cmd)) 668 if (transport_cmd_check_stop_to_fabric(cmd))
668 return; 669 return 1;
669 if (remove && ack_kref) 670 if (remove && ack_kref)
670 transport_put_cmd(cmd); 671 ret = transport_put_cmd(cmd);
672
673 return ret;
671} 674}
672 675
673static void target_complete_failure_work(struct work_struct *work) 676static void target_complete_failure_work(struct work_struct *work)
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 734cbf8d9676..dd9f1bebb5a3 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
344 int status; 344 int status;
345 345
346 token = (autofs_wqt_t) param->fail.token; 346 token = (autofs_wqt_t) param->fail.token;
347 status = param->fail.status ? param->fail.status : -ENOENT; 347 status = param->fail.status < 0 ? param->fail.status : -ENOENT;
348 return autofs4_wait_release(sbi, token, status); 348 return autofs4_wait_release(sbi, token, status);
349} 349}
350 350
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0fd081bd2a2f..fcef70602b27 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3271 if (!is_sync_kiocb(iocb)) 3271 if (!is_sync_kiocb(iocb))
3272 ctx->iocb = iocb; 3272 ctx->iocb = iocb;
3273 3273
3274 if (to->type & ITER_IOVEC) 3274 if (to->type == ITER_IOVEC)
3275 ctx->should_dirty = true; 3275 ctx->should_dirty = true;
3276 3276
3277 rc = setup_aio_ctx_iter(ctx, to, READ); 3277 rc = setup_aio_ctx_iter(ctx, to, READ);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index b08531977daa..3b147dc6af63 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
810 810
811 if (!pages) { 811 if (!pages) {
812 pages = vmalloc(max_pages * sizeof(struct page *)); 812 pages = vmalloc(max_pages * sizeof(struct page *));
813 if (!bv) { 813 if (!pages) {
814 kvfree(bv); 814 kvfree(bv);
815 return -ENOMEM; 815 return -ENOMEM;
816 } 816 }
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 27bc360c7ffd..a723df3e0197 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
849 struct cifs_fid *fid, __u16 search_flags, 849 struct cifs_fid *fid, __u16 search_flags,
850 struct cifs_search_info *srch_inf) 850 struct cifs_search_info *srch_inf)
851{ 851{
852 return CIFSFindFirst(xid, tcon, path, cifs_sb, 852 int rc;
853 &fid->netfid, search_flags, srch_inf, true); 853
854 rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
855 &fid->netfid, search_flags, srch_inf, true);
856 if (rc)
857 cifs_dbg(FYI, "find first failed=%d\n", rc);
858 return rc;
854} 859}
855 860
856static int 861static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index c58691834eb2..7e48561abd29 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); 982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
983 kfree(utf16_path); 983 kfree(utf16_path);
984 if (rc) { 984 if (rc) {
985 cifs_dbg(VFS, "open dir failed\n"); 985 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
986 return rc; 986 return rc;
987 } 987 }
988 988
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, 992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
993 fid->volatile_fid, 0, srch_inf); 993 fid->volatile_fid, 0, srch_inf);
994 if (rc) { 994 if (rc) {
995 cifs_dbg(VFS, "query directory failed\n"); 995 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
997 } 997 }
998 return rc; 998 return rc;
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
1809 1809
1810 sg = init_sg(rqst, sign); 1810 sg = init_sg(rqst, sign);
1811 if (!sg) { 1811 if (!sg) {
1812 cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); 1812 cifs_dbg(VFS, "%s: Failed to init sg", __func__);
1813 rc = -ENOMEM;
1813 goto free_req; 1814 goto free_req;
1814 } 1815 }
1815 1816
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
1817 iv = kzalloc(iv_len, GFP_KERNEL); 1818 iv = kzalloc(iv_len, GFP_KERNEL);
1818 if (!iv) { 1819 if (!iv) {
1819 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); 1820 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
1821 rc = -ENOMEM;
1820 goto free_sg; 1822 goto free_sg;
1821 } 1823 }
1822 iv[0] = 3; 1824 iv[0] = 3;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 3cb5c9e2d4e7..de50e749ff05 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
188 pcreatetime = (__u64 *)value; 188 pcreatetime = (__u64 *)value;
189 *pcreatetime = CIFS_I(inode)->createtime; 189 *pcreatetime = CIFS_I(inode)->createtime;
190 return sizeof(__u64); 190 return sizeof(__u64);
191
192 return rc;
193} 191}
194 192
195 193
diff --git a/fs/dax.c b/fs/dax.c
index 2a6889b3585f..9187f3b07f3e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
859 if (ret < 0) 859 if (ret < 0)
860 goto out; 860 goto out;
861 } 861 }
862 start_index = indices[pvec.nr - 1] + 1;
862 } 863 }
863out: 864out:
864 put_dax(dax_dev); 865 put_dax(dax_dev);
diff --git a/fs/exec.c b/fs/exec.c
index 72934df68471..904199086490 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
220 220
221 if (write) { 221 if (write) {
222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; 222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
223 unsigned long ptr_size;
223 struct rlimit *rlim; 224 struct rlimit *rlim;
224 225
226 /*
227 * Since the stack will hold pointers to the strings, we
228 * must account for them as well.
229 *
230 * The size calculation is the entire vma while each arg page is
231 * built, so each time we get here it's calculating how far it
232 * is currently (rather than each call being just the newly
233 * added size from the arg page). As a result, we need to
234 * always add the entire size of the pointers, so that on the
235 * last call to get_arg_page() we'll actually have the entire
236 * correct size.
237 */
238 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
239 if (ptr_size > ULONG_MAX - size)
240 goto fail;
241 size += ptr_size;
242
225 acct_arg_size(bprm, size / PAGE_SIZE); 243 acct_arg_size(bprm, size / PAGE_SIZE);
226 244
227 /* 245 /*
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
239 * to work from. 257 * to work from.
240 */ 258 */
241 rlim = current->signal->rlim; 259 rlim = current->signal->rlim;
242 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { 260 if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
243 put_page(page); 261 goto fail;
244 return NULL;
245 }
246 } 262 }
247 263
248 return page; 264 return page;
265
266fail:
267 put_page(page);
268 return NULL;
249} 269}
250 270
251static void put_arg_page(struct page *page) 271static void put_arg_page(struct page *page)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3b7c937a36b5..4689940a953c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
2591 struct ocfs2_lock_res *lockres; 2591 struct ocfs2_lock_res *lockres;
2592 2592
2593 lockres = &OCFS2_I(inode)->ip_inode_lockres; 2593 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2594 /* had_lock means that the currect process already takes the cluster
2595 * lock previously. If had_lock is 1, we have nothing to do here, and
2596 * it will get unlocked where we got the lock.
2597 */
2594 if (!had_lock) { 2598 if (!had_lock) {
2595 ocfs2_remove_holder(lockres, oh); 2599 ocfs2_remove_holder(lockres, oh);
2596 ocfs2_inode_unlock(inode, ex); 2600 ocfs2_inode_unlock(inode, ex);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3c5384d9b3a5..f70c3778d600 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
1328 void *buffer, 1328 void *buffer,
1329 size_t buffer_size) 1329 size_t buffer_size)
1330{ 1330{
1331 int ret; 1331 int ret, had_lock;
1332 struct buffer_head *di_bh = NULL; 1332 struct buffer_head *di_bh = NULL;
1333 struct ocfs2_lock_holder oh;
1333 1334
1334 ret = ocfs2_inode_lock(inode, &di_bh, 0); 1335 had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
1335 if (ret < 0) { 1336 if (had_lock < 0) {
1336 mlog_errno(ret); 1337 mlog_errno(had_lock);
1337 return ret; 1338 return had_lock;
1338 } 1339 }
1339 down_read(&OCFS2_I(inode)->ip_xattr_sem); 1340 down_read(&OCFS2_I(inode)->ip_xattr_sem);
1340 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1341 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1341 name, buffer, buffer_size); 1342 name, buffer, buffer_size);
1342 up_read(&OCFS2_I(inode)->ip_xattr_sem); 1343 up_read(&OCFS2_I(inode)->ip_xattr_sem);
1343 1344
1344 ocfs2_inode_unlock(inode, 0); 1345 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1345 1346
1346 brelse(di_bh); 1347 brelse(di_bh);
1347 1348
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
3537{ 3538{
3538 struct buffer_head *di_bh = NULL; 3539 struct buffer_head *di_bh = NULL;
3539 struct ocfs2_dinode *di; 3540 struct ocfs2_dinode *di;
3540 int ret, credits, ref_meta = 0, ref_credits = 0; 3541 int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
3541 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3542 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3542 struct inode *tl_inode = osb->osb_tl_inode; 3543 struct inode *tl_inode = osb->osb_tl_inode;
3543 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; 3544 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
3544 struct ocfs2_refcount_tree *ref_tree = NULL; 3545 struct ocfs2_refcount_tree *ref_tree = NULL;
3546 struct ocfs2_lock_holder oh;
3545 3547
3546 struct ocfs2_xattr_info xi = { 3548 struct ocfs2_xattr_info xi = {
3547 .xi_name_index = name_index, 3549 .xi_name_index = name_index,
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
3572 return -ENOMEM; 3574 return -ENOMEM;
3573 } 3575 }
3574 3576
3575 ret = ocfs2_inode_lock(inode, &di_bh, 1); 3577 had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
3576 if (ret < 0) { 3578 if (had_lock < 0) {
3579 ret = had_lock;
3577 mlog_errno(ret); 3580 mlog_errno(ret);
3578 goto cleanup_nolock; 3581 goto cleanup_nolock;
3579 } 3582 }
@@ -3670,7 +3673,7 @@ cleanup:
3670 if (ret) 3673 if (ret)
3671 mlog_errno(ret); 3674 mlog_errno(ret);
3672 } 3675 }
3673 ocfs2_inode_unlock(inode, 1); 3676 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
3674cleanup_nolock: 3677cleanup_nolock:
3675 brelse(di_bh); 3678 brelse(di_bh);
3676 brelse(xbs.xattr_bh); 3679 brelse(xbs.xattr_bh);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0315fea1d589..f80be4c5df9d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -455,24 +455,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
455 /* 455 /*
456 * allocate new block and move data 456 * allocate new block and move data
457 */ 457 */
458 switch (fs32_to_cpu(sb, usb1->fs_optim)) { 458 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
459 case UFS_OPTSPACE:
460 request = newcount; 459 request = newcount;
461 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree 460 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
462 > uspi->s_dsize * uspi->s_minfree / (2 * 100)) 461 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
463 break; 462 } else {
464 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
465 break;
466 default:
467 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
468
469 case UFS_OPTTIME:
470 request = uspi->s_fpb; 463 request = uspi->s_fpb;
471 if (uspi->cs_total.cs_nffree < uspi->s_dsize * 464 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
472 (uspi->s_minfree - 2) / 100) 465 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
473 break;
474 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
475 break;
476 } 466 }
477 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 467 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
478 if (result) { 468 if (result) {
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 9f4590261134..f36d6a53687d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -566,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
566 */ 566 */
567 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 567 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
568 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 568 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
569 if (inode->i_nlink == 0) { 569 if (inode->i_nlink == 0)
570 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 570 return -ESTALE;
571 return -1;
572 }
573 571
574 /* 572 /*
575 * Linux now has 32-bit uid and gid, so we can support EFT. 573 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -578,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
578 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 576 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
579 577
580 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 578 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
581 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 579 inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
582 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 580 inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
583 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 581 inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
584 inode->i_mtime.tv_nsec = 0; 582 inode->i_mtime.tv_nsec = 0;
585 inode->i_atime.tv_nsec = 0; 583 inode->i_atime.tv_nsec = 0;
586 inode->i_ctime.tv_nsec = 0; 584 inode->i_ctime.tv_nsec = 0;
@@ -614,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
614 */ 612 */
615 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 613 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
616 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 614 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
617 if (inode->i_nlink == 0) { 615 if (inode->i_nlink == 0)
618 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 616 return -ESTALE;
619 return -1;
620 }
621 617
622 /* 618 /*
623 * Linux now has 32-bit uid and gid, so we can support EFT. 619 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -657,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
657 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 653 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
658 struct buffer_head * bh; 654 struct buffer_head * bh;
659 struct inode *inode; 655 struct inode *inode;
660 int err; 656 int err = -EIO;
661 657
662 UFSD("ENTER, ino %lu\n", ino); 658 UFSD("ENTER, ino %lu\n", ino);
663 659
@@ -692,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
692 err = ufs1_read_inode(inode, 688 err = ufs1_read_inode(inode,
693 ufs_inode + ufs_inotofsbo(inode->i_ino)); 689 ufs_inode + ufs_inotofsbo(inode->i_ino));
694 } 690 }
695 691 brelse(bh);
696 if (err) 692 if (err)
697 goto bad_inode; 693 goto bad_inode;
694
698 inode->i_version++; 695 inode->i_version++;
699 ufsi->i_lastfrag = 696 ufsi->i_lastfrag =
700 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 697 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -703,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
703 700
704 ufs_set_inode_ops(inode); 701 ufs_set_inode_ops(inode);
705 702
706 brelse(bh);
707
708 UFSD("EXIT\n"); 703 UFSD("EXIT\n");
709 unlock_new_inode(inode); 704 unlock_new_inode(inode);
710 return inode; 705 return inode;
711 706
712bad_inode: 707bad_inode:
713 iget_failed(inode); 708 iget_failed(inode);
714 return ERR_PTR(-EIO); 709 return ERR_PTR(err);
715} 710}
716 711
717static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 712static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index d5300adbfd79..0a4f58a5073c 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1210,6 +1210,15 @@ magic_found:
1210 1210
1211 uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, 1211 uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
1212 uspi->s_minfree, 100); 1212 uspi->s_minfree, 100);
1213 if (uspi->s_minfree <= 5) {
1214 uspi->s_time_to_space = ~0ULL;
1215 uspi->s_space_to_time = 0;
1216 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
1217 } else {
1218 uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
1219 uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
1220 uspi->s_minfree - 2, 100) - 1;
1221 }
1213 1222
1214 /* 1223 /*
1215 * Compute another frequently used values 1224 * Compute another frequently used values
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 823d55a37586..150eef6f1233 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -792,6 +792,8 @@ struct ufs_sb_private_info {
792 __s32 fs_magic; /* filesystem magic */ 792 __s32 fs_magic; /* filesystem magic */
793 unsigned int s_dirblksize; 793 unsigned int s_dirblksize;
794 __u64 s_root_blocks; 794 __u64 s_root_blocks;
795 __u64 s_time_to_space;
796 __u64 s_space_to_time;
795}; 797};
796 798
797/* 799/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 09af0f7cd55e..3b91faacc1ba 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1316,9 +1316,12 @@ xfs_vm_bmap(
1316 * The swap code (ab-)uses ->bmap to get a block mapping and then 1316 * The swap code (ab-)uses ->bmap to get a block mapping and then
1317 * bypasseѕ the file system for actual I/O. We really can't allow 1317 * bypasseѕ the file system for actual I/O. We really can't allow
1318 * that on reflinks inodes, so we have to skip out here. And yes, 1318 * that on reflinks inodes, so we have to skip out here. And yes,
1319 * 0 is the magic code for a bmap error.. 1319 * 0 is the magic code for a bmap error.
1320 *
1321 * Since we don't pass back blockdev info, we can't return bmap
1322 * information for rt files either.
1320 */ 1323 */
1321 if (xfs_is_reflink_inode(ip)) 1324 if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1322 return 0; 1325 return 0;
1323 1326
1324 filemap_write_and_wait(mapping); 1327 filemap_write_and_wait(mapping);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 197f3fffc9a7..408c7820e200 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -210,7 +210,8 @@ struct acpi_device_flags {
210 u32 of_compatible_ok:1; 210 u32 of_compatible_ok:1;
211 u32 coherent_dma:1; 211 u32 coherent_dma:1;
212 u32 cca_seen:1; 212 u32 cca_seen:1;
213 u32 reserved:20; 213 u32 spi_i2c_slave:1;
214 u32 reserved:19;
214}; 215};
215 216
216/* File System */ 217/* File System */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 27e0dbaa6c0e..34c8f5600ce0 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -334,4 +334,44 @@
334 INTEL_KBL_GT3_IDS(info), \ 334 INTEL_KBL_GT3_IDS(info), \
335 INTEL_KBL_GT4_IDS(info) 335 INTEL_KBL_GT4_IDS(info)
336 336
337/* CFL S */
338#define INTEL_CFL_S_IDS(info) \
339 INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
340 INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
341 INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
342 INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
343 INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
344
345/* CFL H */
346#define INTEL_CFL_H_IDS(info) \
347 INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
348 INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
349
350/* CFL U */
351#define INTEL_CFL_U_IDS(info) \
352 INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
353 INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
354 INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
355 INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */
356
357/* CNL U 2+2 */
358#define INTEL_CNL_U_GT2_IDS(info) \
359 INTEL_VGA_DEVICE(0x5A52, info), \
360 INTEL_VGA_DEVICE(0x5A5A, info), \
361 INTEL_VGA_DEVICE(0x5A42, info), \
362 INTEL_VGA_DEVICE(0x5A4A, info)
363
364/* CNL Y 2+2 */
365#define INTEL_CNL_Y_GT2_IDS(info) \
366 INTEL_VGA_DEVICE(0x5A51, info), \
367 INTEL_VGA_DEVICE(0x5A59, info), \
368 INTEL_VGA_DEVICE(0x5A41, info), \
369 INTEL_VGA_DEVICE(0x5A49, info), \
370 INTEL_VGA_DEVICE(0x5A71, info), \
371 INTEL_VGA_DEVICE(0x5A79, info)
372
373#define INTEL_CNL_IDS(info) \
374 INTEL_CNL_U_GT2_IDS(info), \
375 INTEL_CNL_Y_GT2_IDS(info)
376
337#endif /* _I915_PCIIDS_H */ 377#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index 370c0a0473fc..d66432c6e675 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -43,6 +43,8 @@
43#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ 43#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
44#define _DT_BINDINGS_CLK_SUN50I_A64_H_ 44#define _DT_BINDINGS_CLK_SUN50I_A64_H_
45 45
46#define CLK_PLL_PERIPH0 11
47
46#define CLK_BUS_MIPI_DSI 28 48#define CLK_BUS_MIPI_DSI 28
47#define CLK_BUS_CE 29 49#define CLK_BUS_CE 29
48#define CLK_BUS_DMA 30 50#define CLK_BUS_DMA 30
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index c2afc41d6964..e139fe5c62ec 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -43,6 +43,8 @@
43#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ 43#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_
44#define _DT_BINDINGS_CLK_SUN8I_H3_H_ 44#define _DT_BINDINGS_CLK_SUN8I_H3_H_
45 45
46#define CLK_PLL_PERIPH0 9
47
46#define CLK_CPUX 14 48#define CLK_CPUX 14
47 49
48#define CLK_BUS_CE 20 50#define CLK_BUS_CE 20
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b74a3edcb3da..1ddd36bd2173 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -391,6 +391,8 @@ struct request_queue {
391 int nr_rqs[2]; /* # allocated [a]sync rqs */ 391 int nr_rqs[2]; /* # allocated [a]sync rqs */
392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
393 393
394 atomic_t shared_hctx_restart;
395
394 struct blk_queue_stats *stats; 396 struct blk_queue_stats *stats;
395 struct rq_wb *rq_wb; 397 struct rq_wb *rq_wb;
396 398
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 07ef550c6627..93315d6b21a8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -84,6 +84,7 @@ struct kmem_cache {
84 int red_left_pad; /* Left redzone padding size */ 84 int red_left_pad; /* Left redzone padding size */
85#ifdef CONFIG_SYSFS 85#ifdef CONFIG_SYSFS
86 struct kobject kobj; /* For sysfs */ 86 struct kobject kobj; /* For sysfs */
87 struct work_struct kobj_remove_work;
87#endif 88#endif
88#ifdef CONFIG_MEMCG 89#ifdef CONFIG_MEMCG
89 struct memcg_cache_params memcg_params; 90 struct memcg_cache_params memcg_params;
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 110f4532188c..f7043ccca81c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
29 */ 29 */
30struct tk_read_base { 30struct tk_read_base {
31 struct clocksource *clock; 31 struct clocksource *clock;
32 u64 (*read)(struct clocksource *cs);
33 u64 mask; 32 u64 mask;
34 u64 cycle_last; 33 u64 cycle_last;
35 u32 mult; 34 u32 mult;
@@ -58,7 +57,7 @@ struct tk_read_base {
58 * interval. 57 * interval.
59 * @xtime_remainder: Shifted nano seconds left over when rounding 58 * @xtime_remainder: Shifted nano seconds left over when rounding
60 * @cycle_interval 59 * @cycle_interval
61 * @raw_interval: Raw nano seconds accumulated per NTP interval. 60 * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
62 * @ntp_error: Difference between accumulated time and NTP time in ntp 61 * @ntp_error: Difference between accumulated time and NTP time in ntp
63 * shifted nano seconds. 62 * shifted nano seconds.
64 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and 63 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@ struct timekeeper {
100 u64 cycle_interval; 99 u64 cycle_interval;
101 u64 xtime_interval; 100 u64 xtime_interval;
102 s64 xtime_remainder; 101 s64 xtime_remainder;
103 u32 raw_interval; 102 u64 raw_interval;
104 /* The ntp_tick_length() value currently being used. 103 /* The ntp_tick_length() value currently being used.
105 * This cached copy ensures we consistently apply the tick 104 * This cached copy ensures we consistently apply the tick
106 * length for an entire tick, as ntp_tick_length may change 105 * length for an entire tick, as ntp_tick_length may change
diff --git a/include/net/wext.h b/include/net/wext.h
index 345911965dbb..454ff763eeba 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,7 +6,7 @@
6struct net; 6struct net;
7 7
8#ifdef CONFIG_WEXT_CORE 8#ifdef CONFIG_WEXT_CORE
9int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 9int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
10 void __user *arg); 10 void __user *arg);
11int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 11int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
12 unsigned long arg); 12 unsigned long arg);
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
14struct iw_statistics *get_wireless_stats(struct net_device *dev); 14struct iw_statistics *get_wireless_stats(struct net_device *dev);
15int call_commit_handler(struct net_device *dev); 15int call_commit_handler(struct net_device *dev);
16#else 16#else
17static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 17static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
18 void __user *arg) 18 void __user *arg)
19{ 19{
20 return -EINVAL; 20 return -EINVAL;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index f24a80d2d42e..7ccbd6a2bbe0 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -418,6 +418,19 @@ typedef struct drm_i915_irq_wait {
418 */ 418 */
419#define I915_PARAM_HAS_EXEC_CAPTURE 45 419#define I915_PARAM_HAS_EXEC_CAPTURE 45
420 420
421#define I915_PARAM_SLICE_MASK 46
422
423/* Assuming it's uniform for each slice, this queries the mask of subslices
424 * per-slice for this system.
425 */
426#define I915_PARAM_SUBSLICE_MASK 47
427
428/*
429 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
430 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
431 */
432#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
433
421typedef struct drm_i915_getparam { 434typedef struct drm_i915_getparam {
422 __s32 param; 435 __s32 param;
423 /* 436 /*
@@ -904,7 +917,17 @@ struct drm_i915_gem_execbuffer2 {
904 */ 917 */
905#define I915_EXEC_FENCE_OUT (1<<17) 918#define I915_EXEC_FENCE_OUT (1<<17)
906 919
907#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1)) 920/*
921 * Traditionally the execbuf ioctl has only considered the final element in
922 * the execobject[] to be the executable batch. Often though, the client
923 * will known the batch object prior to construction and being able to place
924 * it into the execobject[] array first can simplify the relocation tracking.
925 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
926 * execobject[] as the * batch instead (the default is to use the last
927 * element).
928 */
929#define I915_EXEC_BATCH_FIRST (1<<18)
930#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1))
908 931
909#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 932#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
910#define i915_execbuffer2_set_context_id(eb2, context) \ 933#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1308,13 +1331,18 @@ struct drm_i915_gem_context_param {
1308}; 1331};
1309 1332
1310enum drm_i915_oa_format { 1333enum drm_i915_oa_format {
1311 I915_OA_FORMAT_A13 = 1, 1334 I915_OA_FORMAT_A13 = 1, /* HSW only */
1312 I915_OA_FORMAT_A29, 1335 I915_OA_FORMAT_A29, /* HSW only */
1313 I915_OA_FORMAT_A13_B8_C8, 1336 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1314 I915_OA_FORMAT_B4_C8, 1337 I915_OA_FORMAT_B4_C8, /* HSW only */
1315 I915_OA_FORMAT_A45_B8_C8, 1338 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1316 I915_OA_FORMAT_B4_C8_A16, 1339 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1317 I915_OA_FORMAT_C4_B8, 1340 I915_OA_FORMAT_C4_B8, /* HSW+ */
1341
1342 /* Gen8+ */
1343 I915_OA_FORMAT_A12,
1344 I915_OA_FORMAT_A12_B8_C8,
1345 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1318 1346
1319 I915_OA_FORMAT_MAX /* non-ABI */ 1347 I915_OA_FORMAT_MAX /* non-ABI */
1320}; 1348};
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index a4a189a240d7..26c54f6d595d 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -104,10 +104,14 @@ struct drm_msm_gem_new {
104 __u32 handle; /* out */ 104 __u32 handle; /* out */
105}; 105};
106 106
107#define MSM_INFO_IOVA 0x01
108
109#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
110
107struct drm_msm_gem_info { 111struct drm_msm_gem_info {
108 __u32 handle; /* in */ 112 __u32 handle; /* in */
109 __u32 pad; 113 __u32 flags; /* in - combination of MSM_INFO_* flags */
110 __u64 offset; /* out, offset to pass to mmap() */ 114 __u64 offset; /* out, mmap() offset or iova */
111}; 115};
112 116
113#define MSM_PREP_READ 0x01 117#define MSM_PREP_READ 0x01
@@ -261,7 +265,6 @@ struct drm_msm_gem_madvise {
261#define DRM_MSM_GEM_SUBMIT 0x06 265#define DRM_MSM_GEM_SUBMIT 0x06
262#define DRM_MSM_WAIT_FENCE 0x07 266#define DRM_MSM_WAIT_FENCE 0x07
263#define DRM_MSM_GEM_MADVISE 0x08 267#define DRM_MSM_GEM_MADVISE 0x08
264#define DRM_MSM_NUM_IOCTLS 0x09
265 268
266#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) 269#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
267#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) 270#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 2831480c63a2..ee97196bb151 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
580 int ret = -ENOMEM, max_order = 0; 580 int ret = -ENOMEM, max_order = 0;
581 581
582 if (!has_aux(event)) 582 if (!has_aux(event))
583 return -ENOTSUPP; 583 return -EOPNOTSUPP;
584 584
585 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { 585 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
586 /* 586 /*
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index f8269036bf0b..52c4e907c14b 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
59 59
60 ops = container_of(fops, struct klp_ops, fops); 60 ops = container_of(fops, struct klp_ops, fops);
61 61
62 rcu_read_lock(); 62 /*
63 * A variant of synchronize_sched() is used to allow patching functions
64 * where RCU is not watching, see klp_synchronize_transition().
65 */
66 preempt_disable_notrace();
63 67
64 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
65 stack_node); 69 stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
115 119
116 klp_arch_set_pc(regs, (unsigned long)func->new_func); 120 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117unlock: 121unlock:
118 rcu_read_unlock(); 122 preempt_enable_notrace();
119} 123}
120 124
121/* 125/*
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index adc0cc64aa4b..b004a1fb6032 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work)
49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50 50
51/* 51/*
52 * This function is just a stub to implement a hard force
53 * of synchronize_sched(). This requires synchronizing
54 * tasks even in userspace and idle.
55 */
56static void klp_sync(struct work_struct *work)
57{
58}
59
60/*
61 * We allow to patch also functions where RCU is not watching,
62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
63 * to do the synchronization. Instead hard force the sched synchronization.
64 *
65 * This approach allows to use RCU functions for manipulating func_stack
66 * safely.
67 */
68static void klp_synchronize_transition(void)
69{
70 schedule_on_each_cpu(klp_sync);
71}
72
73/*
52 * The transition to the target patch state is complete. Clean up the data 74 * The transition to the target patch state is complete. Clean up the data
53 * structures. 75 * structures.
54 */ 76 */
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
73 * func->transition gets cleared, the handler may choose a 95 * func->transition gets cleared, the handler may choose a
74 * removed function. 96 * removed function.
75 */ 97 */
76 synchronize_rcu(); 98 klp_synchronize_transition();
77 } 99 }
78 100
79 if (klp_transition_patch->immediate) 101 if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
92 114
93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 115 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 if (klp_target_state == KLP_PATCHED) 116 if (klp_target_state == KLP_PATCHED)
95 synchronize_rcu(); 117 klp_synchronize_transition();
96 118
97 read_lock(&tasklist_lock); 119 read_lock(&tasklist_lock);
98 for_each_process_thread(g, task) { 120 for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
136 */ 158 */
137void klp_update_patch_state(struct task_struct *task) 159void klp_update_patch_state(struct task_struct *task)
138{ 160{
139 rcu_read_lock(); 161 /*
162 * A variant of synchronize_sched() is used to allow patching functions
163 * where RCU is not watching, see klp_synchronize_transition().
164 */
165 preempt_disable_notrace();
140 166
141 /* 167 /*
142 * This test_and_clear_tsk_thread_flag() call also serves as a read 168 * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 179 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154 task->patch_state = READ_ONCE(klp_target_state); 180 task->patch_state = READ_ONCE(klp_target_state);
155 181
156 rcu_read_unlock(); 182 preempt_enable_notrace();
157} 183}
158 184
159/* 185/*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 565 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540 566
541 /* Let any remaining calls to klp_update_patch_state() complete */ 567 /* Let any remaining calls to klp_update_patch_state() complete */
542 synchronize_rcu(); 568 klp_synchronize_transition();
543 569
544 klp_start_transition(); 570 klp_start_transition();
545} 571}
diff --git a/kernel/signal.c b/kernel/signal.c
index ca92bcfeb322..45b4c1ffe14e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
510 return !tsk->ptrace; 510 return !tsk->ptrace;
511} 511}
512 512
513static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) 513static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
514 bool *resched_timer)
514{ 515{
515 struct sigqueue *q, *first = NULL; 516 struct sigqueue *q, *first = NULL;
516 517
@@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
532still_pending: 533still_pending:
533 list_del_init(&first->list); 534 list_del_init(&first->list);
534 copy_siginfo(info, &first->info); 535 copy_siginfo(info, &first->info);
536
537 *resched_timer =
538 (first->flags & SIGQUEUE_PREALLOC) &&
539 (info->si_code == SI_TIMER) &&
540 (info->si_sys_private);
541
535 __sigqueue_free(first); 542 __sigqueue_free(first);
536 } else { 543 } else {
537 /* 544 /*
@@ -548,12 +555,12 @@ still_pending:
548} 555}
549 556
550static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 557static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
551 siginfo_t *info) 558 siginfo_t *info, bool *resched_timer)
552{ 559{
553 int sig = next_signal(pending, mask); 560 int sig = next_signal(pending, mask);
554 561
555 if (sig) 562 if (sig)
556 collect_signal(sig, pending, info); 563 collect_signal(sig, pending, info, resched_timer);
557 return sig; 564 return sig;
558} 565}
559 566
@@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
565 */ 572 */
566int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 573int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
567{ 574{
575 bool resched_timer = false;
568 int signr; 576 int signr;
569 577
570 /* We only dequeue private signals from ourselves, we don't let 578 /* We only dequeue private signals from ourselves, we don't let
571 * signalfd steal them 579 * signalfd steal them
572 */ 580 */
573 signr = __dequeue_signal(&tsk->pending, mask, info); 581 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
574 if (!signr) { 582 if (!signr) {
575 signr = __dequeue_signal(&tsk->signal->shared_pending, 583 signr = __dequeue_signal(&tsk->signal->shared_pending,
576 mask, info); 584 mask, info, &resched_timer);
577#ifdef CONFIG_POSIX_TIMERS 585#ifdef CONFIG_POSIX_TIMERS
578 /* 586 /*
579 * itimer signal ? 587 * itimer signal ?
@@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
621 current->jobctl |= JOBCTL_STOP_DEQUEUED; 629 current->jobctl |= JOBCTL_STOP_DEQUEUED;
622 } 630 }
623#ifdef CONFIG_POSIX_TIMERS 631#ifdef CONFIG_POSIX_TIMERS
624 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 632 if (resched_timer) {
625 /* 633 /*
626 * Release the siglock to ensure proper locking order 634 * Release the siglock to ensure proper locking order
627 * of timer locks outside of siglocks. Note, we leave 635 * of timer locks outside of siglocks. Note, we leave
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9652bc57fd09..b602c48cb841 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
118 tk->offs_boot = ktime_add(tk->offs_boot, delta); 118 tk->offs_boot = ktime_add(tk->offs_boot, delta);
119} 119}
120 120
121/*
122 * tk_clock_read - atomic clocksource read() helper
123 *
124 * This helper is necessary to use in the read paths because, while the
125 * seqlock ensures we don't return a bad value while structures are updated,
126 * it doesn't protect from potential crashes. There is the possibility that
127 * the tkr's clocksource may change between the read reference, and the
128 * clock reference passed to the read function. This can cause crashes if
129 * the wrong clocksource is passed to the wrong read function.
130 * This isn't necessary to use when holding the timekeeper_lock or doing
131 * a read of the fast-timekeeper tkrs (which is protected by its own locking
132 * and update logic).
133 */
134static inline u64 tk_clock_read(struct tk_read_base *tkr)
135{
136 struct clocksource *clock = READ_ONCE(tkr->clock);
137
138 return clock->read(clock);
139}
140
121#ifdef CONFIG_DEBUG_TIMEKEEPING 141#ifdef CONFIG_DEBUG_TIMEKEEPING
122#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ 142#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
123 143
@@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
175 */ 195 */
176 do { 196 do {
177 seq = read_seqcount_begin(&tk_core.seq); 197 seq = read_seqcount_begin(&tk_core.seq);
178 now = tkr->read(tkr->clock); 198 now = tk_clock_read(tkr);
179 last = tkr->cycle_last; 199 last = tkr->cycle_last;
180 mask = tkr->mask; 200 mask = tkr->mask;
181 max = tkr->clock->max_cycles; 201 max = tkr->clock->max_cycles;
@@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
209 u64 cycle_now, delta; 229 u64 cycle_now, delta;
210 230
211 /* read clocksource */ 231 /* read clocksource */
212 cycle_now = tkr->read(tkr->clock); 232 cycle_now = tk_clock_read(tkr);
213 233
214 /* calculate the delta since the last update_wall_time */ 234 /* calculate the delta since the last update_wall_time */
215 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); 235 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
238 ++tk->cs_was_changed_seq; 258 ++tk->cs_was_changed_seq;
239 old_clock = tk->tkr_mono.clock; 259 old_clock = tk->tkr_mono.clock;
240 tk->tkr_mono.clock = clock; 260 tk->tkr_mono.clock = clock;
241 tk->tkr_mono.read = clock->read;
242 tk->tkr_mono.mask = clock->mask; 261 tk->tkr_mono.mask = clock->mask;
243 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); 262 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
244 263
245 tk->tkr_raw.clock = clock; 264 tk->tkr_raw.clock = clock;
246 tk->tkr_raw.read = clock->read;
247 tk->tkr_raw.mask = clock->mask; 265 tk->tkr_raw.mask = clock->mask;
248 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; 266 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
249 267
@@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
262 /* Go back from cycles -> shifted ns */ 280 /* Go back from cycles -> shifted ns */
263 tk->xtime_interval = interval * clock->mult; 281 tk->xtime_interval = interval * clock->mult;
264 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 282 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
265 tk->raw_interval = (interval * clock->mult) >> clock->shift; 283 tk->raw_interval = interval * clock->mult;
266 284
267 /* if changing clocks, convert xtime_nsec shift units */ 285 /* if changing clocks, convert xtime_nsec shift units */
268 if (old_clock) { 286 if (old_clock) {
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
404 422
405 now += timekeeping_delta_to_ns(tkr, 423 now += timekeeping_delta_to_ns(tkr,
406 clocksource_delta( 424 clocksource_delta(
407 tkr->read(tkr->clock), 425 tk_clock_read(tkr),
408 tkr->cycle_last, 426 tkr->cycle_last,
409 tkr->mask)); 427 tkr->mask));
410 } while (read_seqcount_retry(&tkf->seq, seq)); 428 } while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs)
461 return cycles_at_suspend; 479 return cycles_at_suspend;
462} 480}
463 481
482static struct clocksource dummy_clock = {
483 .read = dummy_clock_read,
484};
485
464/** 486/**
465 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. 487 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
466 * @tk: Timekeeper to snapshot. 488 * @tk: Timekeeper to snapshot.
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
477 struct tk_read_base *tkr = &tk->tkr_mono; 499 struct tk_read_base *tkr = &tk->tkr_mono;
478 500
479 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 501 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
480 cycles_at_suspend = tkr->read(tkr->clock); 502 cycles_at_suspend = tk_clock_read(tkr);
481 tkr_dummy.read = dummy_clock_read; 503 tkr_dummy.clock = &dummy_clock;
482 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); 504 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
483 505
484 tkr = &tk->tkr_raw; 506 tkr = &tk->tkr_raw;
485 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 507 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
486 tkr_dummy.read = dummy_clock_read; 508 tkr_dummy.clock = &dummy_clock;
487 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); 509 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
488} 510}
489 511
@@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
649 */ 671 */
650static void timekeeping_forward_now(struct timekeeper *tk) 672static void timekeeping_forward_now(struct timekeeper *tk)
651{ 673{
652 struct clocksource *clock = tk->tkr_mono.clock;
653 u64 cycle_now, delta; 674 u64 cycle_now, delta;
654 u64 nsec; 675 u64 nsec;
655 676
656 cycle_now = tk->tkr_mono.read(clock); 677 cycle_now = tk_clock_read(&tk->tkr_mono);
657 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 678 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
658 tk->tkr_mono.cycle_last = cycle_now; 679 tk->tkr_mono.cycle_last = cycle_now;
659 tk->tkr_raw.cycle_last = cycle_now; 680 tk->tkr_raw.cycle_last = cycle_now;
@@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
929 950
930 do { 951 do {
931 seq = read_seqcount_begin(&tk_core.seq); 952 seq = read_seqcount_begin(&tk_core.seq);
932 953 now = tk_clock_read(&tk->tkr_mono);
933 now = tk->tkr_mono.read(tk->tkr_mono.clock);
934 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; 954 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
935 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; 955 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
936 base_real = ktime_add(tk->tkr_mono.base, 956 base_real = ktime_add(tk->tkr_mono.base,
@@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
1108 * Check whether the system counter value provided by the 1128 * Check whether the system counter value provided by the
1109 * device driver is on the current timekeeping interval. 1129 * device driver is on the current timekeeping interval.
1110 */ 1130 */
1111 now = tk->tkr_mono.read(tk->tkr_mono.clock); 1131 now = tk_clock_read(&tk->tkr_mono);
1112 interval_start = tk->tkr_mono.cycle_last; 1132 interval_start = tk->tkr_mono.cycle_last;
1113 if (!cycle_between(interval_start, cycles, now)) { 1133 if (!cycle_between(interval_start, cycles, now)) {
1114 clock_was_set_seq = tk->clock_was_set_seq; 1134 clock_was_set_seq = tk->clock_was_set_seq;
@@ -1629,7 +1649,7 @@ void timekeeping_resume(void)
1629 * The less preferred source will only be tried if there is no better 1649 * The less preferred source will only be tried if there is no better
1630 * usable source. The rtc part is handled separately in rtc core code. 1650 * usable source. The rtc part is handled separately in rtc core code.
1631 */ 1651 */
1632 cycle_now = tk->tkr_mono.read(clock); 1652 cycle_now = tk_clock_read(&tk->tkr_mono);
1633 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1653 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1634 cycle_now > tk->tkr_mono.cycle_last) { 1654 cycle_now > tk->tkr_mono.cycle_last) {
1635 u64 nsec, cyc_delta; 1655 u64 nsec, cyc_delta;
@@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1976 u32 shift, unsigned int *clock_set) 1996 u32 shift, unsigned int *clock_set)
1977{ 1997{
1978 u64 interval = tk->cycle_interval << shift; 1998 u64 interval = tk->cycle_interval << shift;
1979 u64 raw_nsecs; 1999 u64 snsec_per_sec;
1980 2000
1981 /* If the offset is smaller than a shifted interval, do nothing */ 2001 /* If the offset is smaller than a shifted interval, do nothing */
1982 if (offset < interval) 2002 if (offset < interval)
@@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1991 *clock_set |= accumulate_nsecs_to_secs(tk); 2011 *clock_set |= accumulate_nsecs_to_secs(tk);
1992 2012
1993 /* Accumulate raw time */ 2013 /* Accumulate raw time */
1994 raw_nsecs = (u64)tk->raw_interval << shift; 2014 tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
1995 raw_nsecs += tk->raw_time.tv_nsec; 2015 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
1996 if (raw_nsecs >= NSEC_PER_SEC) { 2016 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
1997 u64 raw_secs = raw_nsecs; 2017 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
1998 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 2018 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
1999 tk->raw_time.tv_sec += raw_secs; 2019 tk->raw_time.tv_sec++;
2000 } 2020 }
2001 tk->raw_time.tv_nsec = raw_nsecs; 2021 tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
2022 tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
2002 2023
2003 /* Accumulate error between NTP and clock interval */ 2024 /* Accumulate error between NTP and clock interval */
2004 tk->ntp_error += tk->ntp_tick << shift; 2025 tk->ntp_error += tk->ntp_tick << shift;
@@ -2030,7 +2051,7 @@ void update_wall_time(void)
2030#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 2051#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2031 offset = real_tk->cycle_interval; 2052 offset = real_tk->cycle_interval;
2032#else 2053#else
2033 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), 2054 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2034 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 2055 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2035#endif 2056#endif
2036 2057
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 3c6432df7e63..4c0888c4a68d 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -23,14 +23,14 @@
23 * the values[M, M+1, ..., N] into the ints array in get_options. 23 * the values[M, M+1, ..., N] into the ints array in get_options.
24 */ 24 */
25 25
26static int get_range(char **str, int *pint) 26static int get_range(char **str, int *pint, int n)
27{ 27{
28 int x, inc_counter, upper_range; 28 int x, inc_counter, upper_range;
29 29
30 (*str)++; 30 (*str)++;
31 upper_range = simple_strtol((*str), NULL, 0); 31 upper_range = simple_strtol((*str), NULL, 0);
32 inc_counter = upper_range - *pint; 32 inc_counter = upper_range - *pint;
33 for (x = *pint; x < upper_range; x++) 33 for (x = *pint; n && x < upper_range; x++, n--)
34 *pint++ = x; 34 *pint++ = x;
35 return inc_counter; 35 return inc_counter;
36} 36}
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
97 break; 97 break;
98 if (res == 3) { 98 if (res == 3) {
99 int range_nums; 99 int range_nums;
100 range_nums = get_range((char **)&str, ints + i); 100 range_nums = get_range((char **)&str, ints + i, nints - i);
101 if (range_nums < 0) 101 if (range_nums < 0)
102 break; 102 break;
103 /* 103 /*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 945fd1ca49b5..df4ebdb2b10a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
652 spin_unlock(ptl); 652 spin_unlock(ptl);
653 free_page_and_swap_cache(src_page); 653 free_page_and_swap_cache(src_page);
654 } 654 }
655 cond_resched();
656 } 655 }
657} 656}
658 657
diff --git a/mm/mmap.c b/mm/mmap.c
index 8e07976d5e47..a5e3dcd75e79 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1817,7 +1817,8 @@ check_current:
1817 /* Check if current node has a suitable gap */ 1817 /* Check if current node has a suitable gap */
1818 if (gap_start > high_limit) 1818 if (gap_start > high_limit)
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 if (gap_end >= low_limit && gap_end - gap_start >= length) 1820 if (gap_end >= low_limit &&
1821 gap_end > gap_start && gap_end - gap_start >= length)
1821 goto found; 1822 goto found;
1822 1823
1823 /* Visit right subtree if it looks promising */ 1824 /* Visit right subtree if it looks promising */
@@ -1920,7 +1921,8 @@ check_current:
1920 gap_end = vm_start_gap(vma); 1921 gap_end = vm_start_gap(vma);
1921 if (gap_end < low_limit) 1922 if (gap_end < low_limit)
1922 return -ENOMEM; 1923 return -ENOMEM;
1923 if (gap_start <= high_limit && gap_end - gap_start >= length) 1924 if (gap_start <= high_limit &&
1925 gap_end > gap_start && gap_end - gap_start >= length)
1924 goto found; 1926 goto found;
1925 1927
1926 /* Visit left subtree if it looks promising */ 1928 /* Visit left subtree if it looks promising */
@@ -2228,16 +2230,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2228 if (!(vma->vm_flags & VM_GROWSUP)) 2230 if (!(vma->vm_flags & VM_GROWSUP))
2229 return -EFAULT; 2231 return -EFAULT;
2230 2232
2231 /* Guard against wrapping around to address 0. */ 2233 /* Guard against exceeding limits of the address space. */
2232 address &= PAGE_MASK; 2234 address &= PAGE_MASK;
2233 address += PAGE_SIZE; 2235 if (address >= TASK_SIZE)
2234 if (!address)
2235 return -ENOMEM; 2236 return -ENOMEM;
2237 address += PAGE_SIZE;
2236 2238
2237 /* Enforce stack_guard_gap */ 2239 /* Enforce stack_guard_gap */
2238 gap_addr = address + stack_guard_gap; 2240 gap_addr = address + stack_guard_gap;
2239 if (gap_addr < address) 2241
2240 return -ENOMEM; 2242 /* Guard against overflow */
2243 if (gap_addr < address || gap_addr > TASK_SIZE)
2244 gap_addr = TASK_SIZE;
2245
2241 next = vma->vm_next; 2246 next = vma->vm_next;
2242 if (next && next->vm_start < gap_addr) { 2247 if (next && next->vm_start < gap_addr) {
2243 if (!(next->vm_flags & VM_GROWSUP)) 2248 if (!(next->vm_flags & VM_GROWSUP))
diff --git a/mm/slub.c b/mm/slub.c
index 7449593fca72..8addc535bcdc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
5625 return name; 5625 return name;
5626} 5626}
5627 5627
5628static void sysfs_slab_remove_workfn(struct work_struct *work)
5629{
5630 struct kmem_cache *s =
5631 container_of(work, struct kmem_cache, kobj_remove_work);
5632
5633 if (!s->kobj.state_in_sysfs)
5634 /*
5635 * For a memcg cache, this may be called during
5636 * deactivation and again on shutdown. Remove only once.
5637 * A cache is never shut down before deactivation is
5638 * complete, so no need to worry about synchronization.
5639 */
5640 return;
5641
5642#ifdef CONFIG_MEMCG
5643 kset_unregister(s->memcg_kset);
5644#endif
5645 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5646 kobject_del(&s->kobj);
5647 kobject_put(&s->kobj);
5648}
5649
5628static int sysfs_slab_add(struct kmem_cache *s) 5650static int sysfs_slab_add(struct kmem_cache *s)
5629{ 5651{
5630 int err; 5652 int err;
@@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
5632 struct kset *kset = cache_kset(s); 5654 struct kset *kset = cache_kset(s);
5633 int unmergeable = slab_unmergeable(s); 5655 int unmergeable = slab_unmergeable(s);
5634 5656
5657 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5658
5635 if (!kset) { 5659 if (!kset) {
5636 kobject_init(&s->kobj, &slab_ktype); 5660 kobject_init(&s->kobj, &slab_ktype);
5637 return 0; 5661 return 0;
@@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
5695 */ 5719 */
5696 return; 5720 return;
5697 5721
5698 if (!s->kobj.state_in_sysfs) 5722 kobject_get(&s->kobj);
5699 /* 5723 schedule_work(&s->kobj_remove_work);
5700 * For a memcg cache, this may be called during
5701 * deactivation and again on shutdown. Remove only once.
5702 * A cache is never shut down before deactivation is
5703 * complete, so no need to worry about synchronization.
5704 */
5705 return;
5706
5707#ifdef CONFIG_MEMCG
5708 kset_unregister(s->memcg_kset);
5709#endif
5710 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5711 kobject_del(&s->kobj);
5712} 5724}
5713 5725
5714void sysfs_slab_release(struct kmem_cache *s) 5726void sysfs_slab_release(struct kmem_cache *s)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 34a1c3e46ed7..ecc97f74ab18 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
287 if (p4d_none(*p4d)) 287 if (p4d_none(*p4d))
288 return NULL; 288 return NULL;
289 pud = pud_offset(p4d, addr); 289 pud = pud_offset(p4d, addr);
290 if (pud_none(*pud)) 290
291 /*
292 * Don't dereference bad PUD or PMD (below) entries. This will also
293 * identify huge mappings, which we may encounter on architectures
294 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
295 * identified as vmalloc addresses by is_vmalloc_addr(), but are
296 * not [unambiguously] associated with a struct page, so there is
297 * no correct value to return for them.
298 */
299 WARN_ON_ONCE(pud_bad(*pud));
300 if (pud_none(*pud) || pud_bad(*pud))
291 return NULL; 301 return NULL;
292 pmd = pmd_offset(pud, addr); 302 pmd = pmd_offset(pud, addr);
293 if (pmd_none(*pmd)) 303 WARN_ON_ONCE(pmd_bad(*pmd));
304 if (pmd_none(*pmd) || pmd_bad(*pmd))
294 return NULL; 305 return NULL;
295 306
296 ptep = pte_offset_map(pmd, addr); 307 ptep = pte_offset_map(pmd, addr);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 467069b73ce1..9649579b5b9f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
277 return 0; 277 return 0;
278 278
279out_free_newdev: 279out_free_newdev:
280 free_netdev(new_dev); 280 if (new_dev->reg_state == NETREG_UNINITIALIZED)
281 free_netdev(new_dev);
281 return err; 282 return err;
282} 283}
283 284
diff --git a/net/core/dev.c b/net/core/dev.c
index 6d60149287a1..7243421c9783 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5206 if (rc == BUSY_POLL_BUDGET) 5206 if (rc == BUSY_POLL_BUDGET)
5207 __napi_schedule(napi); 5207 __napi_schedule(napi);
5208 local_bh_enable(); 5208 local_bh_enable();
5209 if (local_softirq_pending())
5210 do_softirq();
5211} 5209}
5212 5210
5213void napi_busy_loop(unsigned int napi_id, 5211void napi_busy_loop(unsigned int napi_id,
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d293506..27fad31784a8 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
410 if (cmd == SIOCGIFNAME) 410 if (cmd == SIOCGIFNAME)
411 return dev_ifname(net, (struct ifreq __user *)arg); 411 return dev_ifname(net, (struct ifreq __user *)arg);
412 412
413 /*
414 * Take care of Wireless Extensions. Unfortunately struct iwreq
415 * isn't a proper subset of struct ifreq (it's 8 byte shorter)
416 * so we need to treat it specially, otherwise applications may
417 * fault if the struct they're passing happens to land at the
418 * end of a mapped page.
419 */
420 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
421 struct iwreq iwr;
422
423 if (copy_from_user(&iwr, arg, sizeof(iwr)))
424 return -EFAULT;
425
426 return wext_handle_ioctl(net, &iwr, cmd, arg);
427 }
428
413 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 429 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
414 return -EFAULT; 430 return -EFAULT;
415 431
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
559 ret = -EFAULT; 575 ret = -EFAULT;
560 return ret; 576 return ret;
561 } 577 }
562 /* Take care of Wireless Extensions */
563 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
564 return wext_handle_ioctl(net, &ifr, cmd, arg);
565 return -ENOTTY; 578 return -ENOTTY;
566 } 579 }
567} 580}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f21c4d3aeae0..3bba291c6c32 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
568 struct net *net = sock_net(skb->sk); 568 struct net *net = sock_net(skb->sk);
569 struct fib_rule_hdr *frh = nlmsg_data(nlh); 569 struct fib_rule_hdr *frh = nlmsg_data(nlh);
570 struct fib_rules_ops *ops = NULL; 570 struct fib_rules_ops *ops = NULL;
571 struct fib_rule *rule, *tmp; 571 struct fib_rule *rule, *r;
572 struct nlattr *tb[FRA_MAX+1]; 572 struct nlattr *tb[FRA_MAX+1];
573 struct fib_kuid_range range; 573 struct fib_kuid_range range;
574 int err = -EINVAL; 574 int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
668 668
669 /* 669 /*
670 * Check if this rule is a target to any of them. If so, 670 * Check if this rule is a target to any of them. If so,
671 * adjust to the next one with the same preference or
671 * disable them. As this operation is eventually very 672 * disable them. As this operation is eventually very
672 * expensive, it is only performed if goto rules have 673 * expensive, it is only performed if goto rules, except
673 * actually been added. 674 * current if it is goto rule, have actually been added.
674 */ 675 */
675 if (ops->nr_goto_rules > 0) { 676 if (ops->nr_goto_rules > 0) {
676 list_for_each_entry(tmp, &ops->rules_list, list) { 677 struct fib_rule *n;
677 if (rtnl_dereference(tmp->ctarget) == rule) { 678
678 RCU_INIT_POINTER(tmp->ctarget, NULL); 679 n = list_next_entry(rule, list);
680 if (&n->list == &ops->rules_list || n->pref != rule->pref)
681 n = NULL;
682 list_for_each_entry(r, &ops->rules_list, list) {
683 if (rtnl_dereference(r->ctarget) != rule)
684 continue;
685 rcu_assign_pointer(r->ctarget, n);
686 if (!n)
679 ops->unresolved_rules++; 687 ops->unresolved_rules++;
680 }
681 } 688 }
682 } 689 }
683 690
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5e61456f6bc7..467a2f4510a7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
931 + nla_total_size(1) /* IFLA_LINKMODE */ 931 + nla_total_size(1) /* IFLA_LINKMODE */
932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
933 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 933 + nla_total_size(4) /* IFLA_LINK_NETNSID */
934 + nla_total_size(4) /* IFLA_GROUP */
934 + nla_total_size(ext_filter_mask 935 + nla_total_size(ext_filter_mask
935 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
936 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1468,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1468 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1469 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1469 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1470 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1470 [IFLA_XDP] = { .type = NLA_NESTED }, 1471 [IFLA_XDP] = { .type = NLA_NESTED },
1472 [IFLA_GROUP] = { .type = NLA_U32 },
1471}; 1473};
1472 1474
1473static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1475static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 4b9518a0d248..6f95612b4d32 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
189} 189}
190 190
191static inline void dnrt_drop(struct dn_route *rt)
192{
193 dst_release(&rt->dst);
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
195}
196
197static void dn_dst_check_expire(unsigned long dummy) 191static void dn_dst_check_expire(unsigned long dummy)
198{ 192{
199 int i; 193 int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
248 } 242 }
249 *rtp = rt->dst.dn_next; 243 *rtp = rt->dst.dn_next;
250 rt->dst.dn_next = NULL; 244 rt->dst.dn_next = NULL;
251 dnrt_drop(rt); 245 dnrt_free(rt);
252 break; 246 break;
253 } 247 }
254 spin_unlock_bh(&dn_rt_hash_table[i].lock); 248 spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
350 dst_use(&rth->dst, now); 344 dst_use(&rth->dst, now);
351 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 345 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
352 346
353 dnrt_drop(rt); 347 dst_free(&rt->dst);
354 *rp = rth; 348 *rp = rth;
355 return 0; 349 return 0;
356 } 350 }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
380 for(; rt; rt = next) { 374 for(; rt; rt = next) {
381 next = rcu_dereference_raw(rt->dst.dn_next); 375 next = rcu_dereference_raw(rt->dst.dn_next);
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); 376 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
383 dst_free((struct dst_entry *)rt); 377 dnrt_free(rt);
384 } 378 }
385 379
386nothing_to_declare: 380nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
1187 if (dev_out->flags & IFF_LOOPBACK) 1181 if (dev_out->flags & IFF_LOOPBACK)
1188 flags |= RTCF_LOCAL; 1182 flags |= RTCF_LOCAL;
1189 1183
1190 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1184 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
1191 if (rt == NULL) 1185 if (rt == NULL)
1192 goto e_nobufs; 1186 goto e_nobufs;
1193 1187
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 8f6b5bbcbf69..ec9a396fa466 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1113 if (!pmc) 1113 if (!pmc)
1114 return; 1114 return;
1115 spin_lock_init(&pmc->lock);
1115 spin_lock_bh(&im->lock); 1116 spin_lock_bh(&im->lock);
1116 pmc->interface = im->interface; 1117 pmc->interface = im->interface;
1117 in_dev_hold(in_dev); 1118 in_dev_hold(in_dev);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b436d0775631..129d1a3616f8 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
446 return 0; 446 return 0;
447 447
448drop: 448drop:
449 if (tun_dst)
450 dst_release((struct dst_entry *)tun_dst);
449 kfree_skb(skb); 451 kfree_skb(skb);
450 return 0; 452 return 0;
451} 453}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a4fb1e629fb..686c92375e81 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay) 333 unsigned long delay)
334{ 334{
335 if (!delayed_work_pending(&ifp->dad_work)) 335 in6_ifa_hold(ifp);
336 in6_ifa_hold(ifp); 336 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
337 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 337 in6_ifa_put(ifp);
338} 338}
339 339
340static int snmp6_alloc_dev(struct inet6_dev *idev) 340static int snmp6_alloc_dev(struct inet6_dev *idev)
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index eea23b57c6a5..ec849d88a662 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
33 int flags, pol_lookup_t lookup) 33 int flags, pol_lookup_t lookup)
34{ 34{
35 struct rt6_info *rt;
36 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
37 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
38 .flags = FIB_LOOKUP_NOREF, 37 .flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
44 fib_rules_lookup(net->ipv6.fib6_rules_ops, 43 fib_rules_lookup(net->ipv6.fib6_rules_ops,
45 flowi6_to_flowi(fl6), flags, &arg); 44 flowi6_to_flowi(fl6), flags, &arg);
46 45
47 rt = arg.result; 46 if (arg.result)
47 return arg.result;
48 48
49 if (!rt) { 49 dst_hold(&net->ipv6.ip6_null_entry->dst);
50 dst_hold(&net->ipv6.ip6_null_entry->dst); 50 return &net->ipv6.ip6_null_entry->dst;
51 return &net->ipv6.ip6_null_entry->dst;
52 }
53
54 if (rt->rt6i_flags & RTF_REJECT &&
55 rt->dst.error == -EAGAIN) {
56 ip6_rt_put(rt);
57 rt = net->ipv6.ip6_null_entry;
58 dst_hold(&rt->dst);
59 }
60
61 return &rt->dst;
62} 51}
63 52
64static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 53static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
121 flp6->saddr = saddr; 110 flp6->saddr = saddr;
122 } 111 }
123 err = rt->dst.error; 112 err = rt->dst.error;
124 goto out; 113 if (err != -EAGAIN)
114 goto out;
125 } 115 }
126again: 116again:
127 ip6_rt_put(rt); 117 ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d4bf2c68a545..e6b78ba0e636 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
289 struct rt6_info *rt; 289 struct rt6_info *rt;
290 290
291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
292 if (rt->rt6i_flags & RTF_REJECT && 292 if (rt->dst.error == -EAGAIN) {
293 rt->dst.error == -EAGAIN) {
294 ip6_rt_put(rt); 293 ip6_rt_put(rt);
295 rt = net->ipv6.ip6_null_entry; 294 rt = net->ipv6.ip6_null_entry;
296 dst_hold(&rt->dst); 295 dst_hold(&rt->dst);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c3581973f5d7..8c6c3c8e7eef 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -858,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
858 return 0; 858 return 0;
859 859
860drop: 860drop:
861 if (tun_dst)
862 dst_release((struct dst_entry *)tun_dst);
861 kfree_skb(skb); 863 kfree_skb(skb);
862 return 0; 864 return 0;
863} 865}
@@ -1246,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1246 fl6.flowi6_proto = IPPROTO_IPIP; 1248 fl6.flowi6_proto = IPPROTO_IPIP;
1247 fl6.daddr = key->u.ipv6.dst; 1249 fl6.daddr = key->u.ipv6.dst;
1248 fl6.flowlabel = key->label; 1250 fl6.flowlabel = key->label;
1249 dsfield = ip6_tclass(key->label); 1251 dsfield = key->tos;
1250 } else { 1252 } else {
1251 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1253 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1252 encap_limit = t->parms.encap_limit; 1254 encap_limit = t->parms.encap_limit;
@@ -1317,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1317 fl6.flowi6_proto = IPPROTO_IPV6; 1319 fl6.flowi6_proto = IPPROTO_IPV6;
1318 fl6.daddr = key->u.ipv6.dst; 1320 fl6.daddr = key->u.ipv6.dst;
1319 fl6.flowlabel = key->label; 1321 fl6.flowlabel = key->label;
1320 dsfield = ip6_tclass(key->label); 1322 dsfield = key->tos;
1321 } else { 1323 } else {
1322 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1324 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1323 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1325 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 0a4e28477ad9..54369225766e 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
217 unsigned int *_toklen) 217 unsigned int *_toklen)
218{ 218{
219 const __be32 *xdr = *_xdr; 219 const __be32 *xdr = *_xdr;
220 unsigned int toklen = *_toklen, n_parts, loop, tmp; 220 unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
221 221
222 /* there must be at least one name, and at least #names+1 length 222 /* there must be at least one name, and at least #names+1 length
223 * words */ 223 * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
247 toklen -= 4; 247 toklen -= 4;
248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
249 return -EINVAL; 249 return -EINVAL;
250 if (tmp > toklen) 250 paddedlen = (tmp + 3) & ~3;
251 if (paddedlen > toklen)
251 return -EINVAL; 252 return -EINVAL;
252 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 253 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
253 if (!princ->name_parts[loop]) 254 if (!princ->name_parts[loop])
254 return -ENOMEM; 255 return -ENOMEM;
255 memcpy(princ->name_parts[loop], xdr, tmp); 256 memcpy(princ->name_parts[loop], xdr, tmp);
256 princ->name_parts[loop][tmp] = 0; 257 princ->name_parts[loop][tmp] = 0;
257 tmp = (tmp + 3) & ~3; 258 toklen -= paddedlen;
258 toklen -= tmp; 259 xdr += paddedlen >> 2;
259 xdr += tmp >> 2;
260 } 260 }
261 261
262 if (toklen < 4) 262 if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
265 toklen -= 4; 265 toklen -= 4;
266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
267 return -EINVAL; 267 return -EINVAL;
268 if (tmp > toklen) 268 paddedlen = (tmp + 3) & ~3;
269 if (paddedlen > toklen)
269 return -EINVAL; 270 return -EINVAL;
270 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 271 princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
271 if (!princ->realm) 272 if (!princ->realm)
272 return -ENOMEM; 273 return -ENOMEM;
273 memcpy(princ->realm, xdr, tmp); 274 memcpy(princ->realm, xdr, tmp);
274 princ->realm[tmp] = 0; 275 princ->realm[tmp] = 0;
275 tmp = (tmp + 3) & ~3; 276 toklen -= paddedlen;
276 toklen -= tmp; 277 xdr += paddedlen >> 2;
277 xdr += tmp >> 2;
278 278
279 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 279 _debug("%s/...@%s", princ->name_parts[0], princ->realm);
280 280
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
293 unsigned int *_toklen) 293 unsigned int *_toklen)
294{ 294{
295 const __be32 *xdr = *_xdr; 295 const __be32 *xdr = *_xdr;
296 unsigned int toklen = *_toklen, len; 296 unsigned int toklen = *_toklen, len, paddedlen;
297 297
298 /* there must be at least one tag and one length word */ 298 /* there must be at least one tag and one length word */
299 if (toklen <= 8) 299 if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
307 toklen -= 8; 307 toklen -= 8;
308 if (len > max_data_size) 308 if (len > max_data_size)
309 return -EINVAL; 309 return -EINVAL;
310 paddedlen = (len + 3) & ~3;
311 if (paddedlen > toklen)
312 return -EINVAL;
310 td->data_len = len; 313 td->data_len = len;
311 314
312 if (len > 0) { 315 if (len > 0) {
313 td->data = kmemdup(xdr, len, GFP_KERNEL); 316 td->data = kmemdup(xdr, len, GFP_KERNEL);
314 if (!td->data) 317 if (!td->data)
315 return -ENOMEM; 318 return -ENOMEM;
316 len = (len + 3) & ~3; 319 toklen -= paddedlen;
317 toklen -= len; 320 xdr += paddedlen >> 2;
318 xdr += len >> 2;
319 } 321 }
320 322
321 _debug("tag %x len %x", td->tag, td->data_len); 323 _debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
387 const __be32 **_xdr, unsigned int *_toklen) 389 const __be32 **_xdr, unsigned int *_toklen)
388{ 390{
389 const __be32 *xdr = *_xdr; 391 const __be32 *xdr = *_xdr;
390 unsigned int toklen = *_toklen, len; 392 unsigned int toklen = *_toklen, len, paddedlen;
391 393
392 /* there must be at least one length word */ 394 /* there must be at least one length word */
393 if (toklen <= 4) 395 if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
399 toklen -= 4; 401 toklen -= 4;
400 if (len > AFSTOKEN_K5_TIX_MAX) 402 if (len > AFSTOKEN_K5_TIX_MAX)
401 return -EINVAL; 403 return -EINVAL;
404 paddedlen = (len + 3) & ~3;
405 if (paddedlen > toklen)
406 return -EINVAL;
402 *_tktlen = len; 407 *_tktlen = len;
403 408
404 _debug("ticket len %u", len); 409 _debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
407 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 412 *_ticket = kmemdup(xdr, len, GFP_KERNEL);
408 if (!*_ticket) 413 if (!*_ticket)
409 return -ENOMEM; 414 return -ENOMEM;
410 len = (len + 3) & ~3; 415 toklen -= paddedlen;
411 toklen -= len; 416 xdr += paddedlen >> 2;
412 xdr += len >> 2;
413 } 417 }
414 418
415 *_xdr = xdr; 419 *_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
552{ 556{
553 const __be32 *xdr = prep->data, *token; 557 const __be32 *xdr = prep->data, *token;
554 const char *cp; 558 const char *cp;
555 unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 559 unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
556 size_t datalen = prep->datalen; 560 size_t datalen = prep->datalen;
557 int ret; 561 int ret;
558 562
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
578 if (len < 1 || len > AFSTOKEN_CELL_MAX) 582 if (len < 1 || len > AFSTOKEN_CELL_MAX)
579 goto not_xdr; 583 goto not_xdr;
580 datalen -= 4; 584 datalen -= 4;
581 tmp = (len + 3) & ~3; 585 paddedlen = (len + 3) & ~3;
582 if (tmp > datalen) 586 if (paddedlen > datalen)
583 goto not_xdr; 587 goto not_xdr;
584 588
585 cp = (const char *) xdr; 589 cp = (const char *) xdr;
586 for (loop = 0; loop < len; loop++) 590 for (loop = 0; loop < len; loop++)
587 if (!isprint(cp[loop])) 591 if (!isprint(cp[loop]))
588 goto not_xdr; 592 goto not_xdr;
589 if (len < tmp) 593 for (; loop < paddedlen; loop++)
590 for (; loop < tmp; loop++) 594 if (cp[loop])
591 if (cp[loop]) 595 goto not_xdr;
592 goto not_xdr;
593 _debug("cellname: [%u/%u] '%*.*s'", 596 _debug("cellname: [%u/%u] '%*.*s'",
594 len, tmp, len, len, (const char *) xdr); 597 len, paddedlen, len, len, (const char *) xdr);
595 datalen -= tmp; 598 datalen -= paddedlen;
596 xdr += tmp >> 2; 599 xdr += paddedlen >> 2;
597 600
598 /* get the token count */ 601 /* get the token count */
599 if (datalen < 12) 602 if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
614 sec_ix = ntohl(*xdr); 617 sec_ix = ntohl(*xdr);
615 datalen -= 4; 618 datalen -= 4;
616 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 619 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
617 if (toklen < 20 || toklen > datalen) 620 paddedlen = (toklen + 3) & ~3;
621 if (toklen < 20 || toklen > datalen || paddedlen > datalen)
618 goto not_xdr; 622 goto not_xdr;
619 datalen -= (toklen + 3) & ~3; 623 datalen -= paddedlen;
620 xdr += (toklen + 3) >> 2; 624 xdr += paddedlen >> 2;
621 625
622 } while (--loop > 0); 626 } while (--loop > 0);
623 627
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8c589230794f..3dcd0ecf3d99 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
275 if (sctp_sk(sk)->bind_hash) 275 if (sctp_sk(sk)->bind_hash)
276 sctp_put_port(sk); 276 sctp_put_port(sk);
277 277
278 sctp_sk(sk)->ep = NULL;
278 sock_put(sk); 279 sock_put(sk);
279 } 280 }
280 281
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 048954eee984..9a647214a91e 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -278,7 +278,6 @@ out:
278 278
279static int sctp_sock_dump(struct sock *sk, void *p) 279static int sctp_sock_dump(struct sock *sk, void *p)
280{ 280{
281 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
282 struct sctp_comm_param *commp = p; 281 struct sctp_comm_param *commp = p;
283 struct sk_buff *skb = commp->skb; 282 struct sk_buff *skb = commp->skb;
284 struct netlink_callback *cb = commp->cb; 283 struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
287 int err = 0; 286 int err = 0;
288 287
289 lock_sock(sk); 288 lock_sock(sk);
290 list_for_each_entry(assoc, &ep->asocs, asocs) { 289 if (!sctp_sk(sk)->ep)
290 goto release;
291 list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
291 if (cb->args[4] < cb->args[1]) 292 if (cb->args[4] < cb->args[1])
292 goto next; 293 goto next;
293 294
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 30aa0a529215..3a8318e518f1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
4666 if (err) 4666 if (err)
4667 return err; 4667 return err;
4668 4668
4669 sctp_transport_get_idx(net, &hti, pos); 4669 obj = sctp_transport_get_idx(net, &hti, pos + 1);
4670 obj = sctp_transport_get_next(net, &hti); 4670 for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
4671 for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
4672 struct sctp_transport *transport = obj; 4671 struct sctp_transport *transport = obj;
4673 4672
4674 if (!sctp_transport_hold(transport)) 4673 if (!sctp_transport_hold(transport))
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 1a4db6790e20..6cdb054484d6 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
914 * Main IOCTl dispatcher. 914 * Main IOCTl dispatcher.
915 * Check the type of IOCTL and call the appropriate wrapper... 915 * Check the type of IOCTL and call the appropriate wrapper...
916 */ 916 */
917static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, 917static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
918 unsigned int cmd, 918 unsigned int cmd,
919 struct iw_request_info *info, 919 struct iw_request_info *info,
920 wext_ioctl_func standard, 920 wext_ioctl_func standard,
921 wext_ioctl_func private) 921 wext_ioctl_func private)
922{ 922{
923 struct iwreq *iwr = (struct iwreq *) ifr;
924 struct net_device *dev; 923 struct net_device *dev;
925 iw_handler handler; 924 iw_handler handler;
926 925
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
928 * The copy_to/from_user() of ifr is also dealt with in there */ 927 * The copy_to/from_user() of ifr is also dealt with in there */
929 928
930 /* Make sure the device exist */ 929 /* Make sure the device exist */
931 if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) 930 if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
932 return -ENODEV; 931 return -ENODEV;
933 932
934 /* A bunch of special cases, then the generic case... 933 /* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
957 else if (private) 956 else if (private)
958 return private(dev, iwr, cmd, info, handler); 957 return private(dev, iwr, cmd, info, handler);
959 } 958 }
960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl)
962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
963 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
964} 960}
965 961
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
977} 973}
978 974
979/* entry point from dev ioctl */ 975/* entry point from dev ioctl */
980static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, 976static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
981 unsigned int cmd, struct iw_request_info *info, 977 unsigned int cmd, struct iw_request_info *info,
982 wext_ioctl_func standard, 978 wext_ioctl_func standard,
983 wext_ioctl_func private) 979 wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
987 if (ret) 983 if (ret)
988 return ret; 984 return ret;
989 985
990 dev_load(net, ifr->ifr_name); 986 dev_load(net, iwr->ifr_name);
991 rtnl_lock(); 987 rtnl_lock();
992 ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); 988 ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
993 rtnl_unlock(); 989 rtnl_unlock();
994 990
995 return ret; 991 return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev,
1039} 1035}
1040 1036
1041 1037
1042int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1038int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
1043 void __user *arg) 1039 void __user *arg)
1044{ 1040{
1045 struct iw_request_info info = { .cmd = cmd, .flags = 0 }; 1041 struct iw_request_info info = { .cmd = cmd, .flags = 0 };
1046 int ret; 1042 int ret;
1047 1043
1048 ret = wext_ioctl_dispatch(net, ifr, cmd, &info, 1044 ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
1049 ioctl_standard_call, 1045 ioctl_standard_call,
1050 ioctl_private_call); 1046 ioctl_private_call);
1051 if (ret >= 0 && 1047 if (ret >= 0 &&
1052 IW_IS_GET(cmd) && 1048 IW_IS_GET(cmd) &&
1053 copy_to_user(arg, ifr, sizeof(struct iwreq))) 1049 copy_to_user(arg, iwr, sizeof(struct iwreq)))
1054 return -EFAULT; 1050 return -EFAULT;
1055 1051
1056 return ret; 1052 return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
1107 info.cmd = cmd; 1103 info.cmd = cmd;
1108 info.flags = IW_REQUEST_FLAG_COMPAT; 1104 info.flags = IW_REQUEST_FLAG_COMPAT;
1109 1105
1110 ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, 1106 ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
1111 compat_standard_call, 1107 compat_standard_call,
1112 compat_private_call); 1108 compat_private_call);
1113 1109
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index ce753a408c56..c583a1e1bd3c 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -14,7 +14,15 @@ __headers:
14include scripts/Kbuild.include 14include scripts/Kbuild.include
15 15
16srcdir := $(srctree)/$(obj) 16srcdir := $(srctree)/$(obj)
17subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) 17
18# When make is run under a fakechroot environment, the function
19# $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular
20# files. So, we are using a combination of sort/dir/wildcard which works
21# with fakechroot.
22subdirs := $(patsubst $(srcdir)/%/,%,\
23 $(filter-out $(srcdir)/,\
24 $(sort $(dir $(wildcard $(srcdir)/*/)))))
25
18# caller may set destination dir (when installing to asm/) 26# caller may set destination dir (when installing to asm/)
19_dst := $(if $(dst),$(dst),$(obj)) 27_dst := $(if $(dst),$(dst),$(obj))
20 28
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
index 3bffdcaaa274..b724a0290c75 100644
--- a/scripts/genksyms/genksyms.h
+++ b/scripts/genksyms/genksyms.h
@@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start,
75int yylex(void); 75int yylex(void);
76int yyparse(void); 76int yyparse(void);
77 77
78void error_with_pos(const char *, ...); 78void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2)));
79 79
80/*----------------------------------------------------------------------*/ 80/*----------------------------------------------------------------------*/
81#define xmalloc(size) ({ void *__ptr = malloc(size); \ 81#define xmalloc(size) ({ void *__ptr = malloc(size); \
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 90a091b6ae4d..eb8144643b78 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -196,7 +196,7 @@ clean-files += config.pot linux.pot
196 196
197# Check that we have the required ncurses stuff installed for lxdialog (menuconfig) 197# Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
198PHONY += $(obj)/dochecklxdialog 198PHONY += $(obj)/dochecklxdialog
199$(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog 199$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog
200$(obj)/dochecklxdialog: 200$(obj)/dochecklxdialog:
201 $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) 201 $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf)
202 202
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a9bc5334a478..003114779815 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS];
271static int items_num; 271static int items_num;
272static int global_exit; 272static int global_exit;
273/* the currently selected button */ 273/* the currently selected button */
274const char *current_instructions = menu_instructions; 274static const char *current_instructions = menu_instructions;
275 275
276static char *dialog_input_result; 276static char *dialog_input_result;
277static int dialog_input_result_len; 277static int dialog_input_result_len;
@@ -305,7 +305,7 @@ struct function_keys {
305}; 305};
306 306
307static const int function_keys_num = 9; 307static const int function_keys_num = 9;
308struct function_keys function_keys[] = { 308static struct function_keys function_keys[] = {
309 { 309 {
310 .key_str = "F1", 310 .key_str = "F1",
311 .func = "Help", 311 .func = "Help",
@@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag)
508 index = (index + items_num) % items_num; 508 index = (index + items_num) % items_num;
509 while (true) { 509 while (true) {
510 char *str = k_menu_items[index].str; 510 char *str = k_menu_items[index].str;
511 if (strcasestr(str, match_str) != 0) 511 if (strcasestr(str, match_str) != NULL)
512 return index; 512 return index;
513 if (flag == FIND_NEXT_MATCH_UP || 513 if (flag == FIND_NEXT_MATCH_UP ||
514 flag == MATCH_TINKER_PATTERN_UP) 514 flag == MATCH_TINKER_PATTERN_UP)
@@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans)
1067 1067
1068static void conf(struct menu *menu) 1068static void conf(struct menu *menu)
1069{ 1069{
1070 struct menu *submenu = 0; 1070 struct menu *submenu = NULL;
1071 const char *prompt = menu_get_prompt(menu); 1071 const char *prompt = menu_get_prompt(menu);
1072 struct symbol *sym; 1072 struct symbol *sym;
1073 int res; 1073 int res;
@@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu)
1234static void conf_choice(struct menu *menu) 1234static void conf_choice(struct menu *menu)
1235{ 1235{
1236 const char *prompt = _(menu_get_prompt(menu)); 1236 const char *prompt = _(menu_get_prompt(menu));
1237 struct menu *child = 0; 1237 struct menu *child = NULL;
1238 struct symbol *active; 1238 struct symbol *active;
1239 int selected_index = 0; 1239 int selected_index = 0;
1240 int last_top_row = 0; 1240 int last_top_row = 0;
@@ -1456,7 +1456,7 @@ static void conf_save(void)
1456 } 1456 }
1457} 1457}
1458 1458
1459void setup_windows(void) 1459static void setup_windows(void)
1460{ 1460{
1461 int lines, columns; 1461 int lines, columns;
1462 1462
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 4b2f44c20caf..a64b1c31253e 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -129,7 +129,7 @@ static void no_colors_theme(void)
129 mkattrn(FUNCTION_TEXT, A_REVERSE); 129 mkattrn(FUNCTION_TEXT, A_REVERSE);
130} 130}
131 131
132void set_colors() 132void set_colors(void)
133{ 133{
134 start_color(); 134 start_color();
135 use_default_colors(); 135 use_default_colors();
@@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no)
192 int lines = 0; 192 int lines = 0;
193 193
194 if (!text) 194 if (!text)
195 return 0; 195 return NULL;
196 196
197 for (i = 0; text[i] != '\0' && lines < line_no; i++) 197 for (i = 0; text[i] != '\0' && lines < line_no; i++)
198 if (text[i] == '\n') 198 if (text[i] == '\n')
diff --git a/scripts/tags.sh b/scripts/tags.sh
index d661f2f3ef61..d23dcbf17457 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@ all_compiled_sources()
106 case "$i" in 106 case "$i" in
107 *.[cS]) 107 *.[cS])
108 j=${i/\.[cS]/\.o} 108 j=${i/\.[cS]/\.o}
109 j="${j#$tree}"
109 if [ -e $j ]; then 110 if [ -e $j ]; then
110 echo $i 111 echo $i
111 fi 112 fi
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 5088d4b8db22..009e6c98754e 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2492 struct snd_pcm_substream *substream; 2492 struct snd_pcm_substream *substream;
2493 const struct snd_pcm_chmap_elem *map; 2493 const struct snd_pcm_chmap_elem *map;
2494 2494
2495 if (snd_BUG_ON(!info->chmap)) 2495 if (!info->chmap)
2496 return -EINVAL; 2496 return -EINVAL;
2497 substream = snd_pcm_chmap_substream(info, idx); 2497 substream = snd_pcm_chmap_substream(info, idx);
2498 if (!substream) 2498 if (!substream)
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2524 unsigned int __user *dst; 2524 unsigned int __user *dst;
2525 int c, count = 0; 2525 int c, count = 0;
2526 2526
2527 if (snd_BUG_ON(!info->chmap)) 2527 if (!info->chmap)
2528 return -EINVAL; 2528 return -EINVAL;
2529 if (size < 8) 2529 if (size < 8)
2530 return -ENOMEM; 2530 return -ENOMEM;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9e6f54f8c45d..1e26854b3425 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
682 cycle = increment_cycle_count(cycle, 1); 682 cycle = increment_cycle_count(cycle, 1);
683 if (s->handle_packet(s, 0, cycle, i) < 0) { 683 if (s->handle_packet(s, 0, cycle, i) < 0) {
684 s->packet_index = -1; 684 s->packet_index = -1;
685 amdtp_stream_pcm_abort(s); 685 if (in_interrupt())
686 amdtp_stream_pcm_abort(s);
687 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
686 return; 688 return;
687 } 689 }
688 } 690 }
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
734 /* Queueing error or detecting invalid payload. */ 736 /* Queueing error or detecting invalid payload. */
735 if (i < packets) { 737 if (i < packets) {
736 s->packet_index = -1; 738 s->packet_index = -1;
737 amdtp_stream_pcm_abort(s); 739 if (in_interrupt())
740 amdtp_stream_pcm_abort(s);
741 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
738 return; 742 return;
739 } 743 }
740 744
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index 7e8831722821..ea1a91e99875 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -135,7 +135,7 @@ struct amdtp_stream {
135 /* For a PCM substream processing. */ 135 /* For a PCM substream processing. */
136 struct snd_pcm_substream *pcm; 136 struct snd_pcm_substream *pcm;
137 struct tasklet_struct period_tasklet; 137 struct tasklet_struct period_tasklet;
138 unsigned int pcm_buffer_pointer; 138 snd_pcm_uframes_t pcm_buffer_pointer;
139 unsigned int pcm_period_pointer; 139 unsigned int pcm_period_pointer;
140 140
141 /* To wait for first packet. */ 141 /* To wait for first packet. */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1770f085c2a6..01eb1dc7b5b3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -370,10 +370,12 @@ enum {
370#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 370#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
371#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) 371#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
372#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 372#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
373#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
373#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) 374#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
374#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 375#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
375 IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ 376#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
376 IS_GLK(pci) 377 IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
378 IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci))
377 379
378static char *driver_short_names[] = { 380static char *driver_short_names[] = {
379 [AZX_DRIVER_ICH] = "HDA Intel", 381 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
2378 /* Kabylake-H */ 2380 /* Kabylake-H */
2379 { PCI_DEVICE(0x8086, 0xa2f0), 2381 { PCI_DEVICE(0x8086, 0xa2f0),
2380 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2382 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2383 /* Coffelake */
2384 { PCI_DEVICE(0x8086, 0xa348),
2385 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
2381 /* Broxton-P(Apollolake) */ 2386 /* Broxton-P(Apollolake) */
2382 { PCI_DEVICE(0x8086, 0x5a98), 2387 { PCI_DEVICE(0x8086, 0x5a98),
2383 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2388 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 84e7e698411e..a2670e9d652d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp,
619 struct map *map, unsigned long offs) 619 struct map *map, unsigned long offs)
620{ 620{
621 struct symbol *sym; 621 struct symbol *sym;
622 u64 addr = tp->address + tp->offset - offs; 622 u64 addr = tp->address - offs;
623 623
624 sym = map__find_symbol(map, addr); 624 sym = map__find_symbol(map, addr);
625 if (!sym) 625 if (!sym)
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index a676d3eefefb..13f5198ba0ee 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -305,7 +305,7 @@ function perf_test()
305 echo "Running remote perf test $WITH DMA" 305 echo "Running remote perf test $WITH DMA"
306 write_file "" $REMOTE_PERF/run 306 write_file "" $REMOTE_PERF/run
307 echo -n " " 307 echo -n " "
308 read_file $LOCAL_PERF/run 308 read_file $REMOTE_PERF/run
309 echo " Passed" 309 echo " Passed"
310 310
311 _modprobe -r ntb_perf 311 _modprobe -r ntb_perf