aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-07-05 05:40:20 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-07-05 06:53:27 -0400
commit94b4f3ba483ace6dd4a3f881e19cc18bdbafa6ef (patch)
tree5bcd45d4151f6ea09f4b617a79887a23cf961163
parent9777cca0c4d3d35a97cb5711be575967712bd59b (diff)
drm/i915: Split out runtime configuration of device info to its own file
Let's reclaim a few hundred lines from i915_drv.c by splitting out the runtime configuration of the "constant" dev_priv->info. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1467711623-2905-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld <matthew.auld@intel.com>
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c413
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c388
4 files changed, 421 insertions, 391 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 618293c8c9d9..684fc1cd08fa 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -14,6 +14,7 @@ i915-y := i915_drv.o \
14 i915_suspend.o \ 14 i915_suspend.o \
15 i915_sysfs.o \ 15 i915_sysfs.o \
16 intel_csr.o \ 16 intel_csr.o \
17 intel_device_info.o \
17 intel_pm.o \ 18 intel_pm.o \
18 intel_runtime_pm.o 19 intel_runtime_pm.o
19 20
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 695001ffd547..87295f6e5520 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -748,394 +748,6 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
748} 748}
749#endif 749#endif
750 750
751static void i915_dump_device_info(struct drm_i915_private *dev_priv)
752{
753 const struct intel_device_info *info = &dev_priv->info;
754
755#define PRINT_S(name) "%s"
756#define SEP_EMPTY
757#define PRINT_FLAG(name) info->name ? #name "," : ""
758#define SEP_COMMA ,
759 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
760 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
761 info->gen,
762 dev_priv->dev->pdev->device,
763 dev_priv->dev->pdev->revision,
764 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
765#undef PRINT_S
766#undef SEP_EMPTY
767#undef PRINT_FLAG
768#undef SEP_COMMA
769}
770
771static void cherryview_sseu_info_init(struct drm_device *dev)
772{
773 struct drm_i915_private *dev_priv = dev->dev_private;
774 struct intel_device_info *info;
775 u32 fuse, eu_dis;
776
777 info = (struct intel_device_info *)&dev_priv->info;
778 fuse = I915_READ(CHV_FUSE_GT);
779
780 info->slice_total = 1;
781
782 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
783 info->subslice_per_slice++;
784 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
785 CHV_FGT_EU_DIS_SS0_R1_MASK);
786 info->eu_total += 8 - hweight32(eu_dis);
787 }
788
789 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
790 info->subslice_per_slice++;
791 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
792 CHV_FGT_EU_DIS_SS1_R1_MASK);
793 info->eu_total += 8 - hweight32(eu_dis);
794 }
795
796 info->subslice_total = info->subslice_per_slice;
797 /*
798 * CHV expected to always have a uniform distribution of EU
799 * across subslices.
800 */
801 info->eu_per_subslice = info->subslice_total ?
802 info->eu_total / info->subslice_total :
803 0;
804 /*
805 * CHV supports subslice power gating on devices with more than
806 * one subslice, and supports EU power gating on devices with
807 * more than one EU pair per subslice.
808 */
809 info->has_slice_pg = 0;
810 info->has_subslice_pg = (info->subslice_total > 1);
811 info->has_eu_pg = (info->eu_per_subslice > 2);
812}
813
814static void gen9_sseu_info_init(struct drm_device *dev)
815{
816 struct drm_i915_private *dev_priv = dev->dev_private;
817 struct intel_device_info *info;
818 int s_max = 3, ss_max = 4, eu_max = 8;
819 int s, ss;
820 u32 fuse2, s_enable, ss_disable, eu_disable;
821 u8 eu_mask = 0xff;
822
823 info = (struct intel_device_info *)&dev_priv->info;
824 fuse2 = I915_READ(GEN8_FUSE2);
825 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
826 GEN8_F2_S_ENA_SHIFT;
827 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
828 GEN9_F2_SS_DIS_SHIFT;
829
830 info->slice_total = hweight32(s_enable);
831 /*
832 * The subslice disable field is global, i.e. it applies
833 * to each of the enabled slices.
834 */
835 info->subslice_per_slice = ss_max - hweight32(ss_disable);
836 info->subslice_total = info->slice_total *
837 info->subslice_per_slice;
838
839 /*
840 * Iterate through enabled slices and subslices to
841 * count the total enabled EU.
842 */
843 for (s = 0; s < s_max; s++) {
844 if (!(s_enable & (0x1 << s)))
845 /* skip disabled slice */
846 continue;
847
848 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
849 for (ss = 0; ss < ss_max; ss++) {
850 int eu_per_ss;
851
852 if (ss_disable & (0x1 << ss))
853 /* skip disabled subslice */
854 continue;
855
856 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
857 eu_mask);
858
859 /*
860 * Record which subslice(s) has(have) 7 EUs. we
861 * can tune the hash used to spread work among
862 * subslices if they are unbalanced.
863 */
864 if (eu_per_ss == 7)
865 info->subslice_7eu[s] |= 1 << ss;
866
867 info->eu_total += eu_per_ss;
868 }
869 }
870
871 /*
872 * SKL is expected to always have a uniform distribution
873 * of EU across subslices with the exception that any one
874 * EU in any one subslice may be fused off for die
875 * recovery. BXT is expected to be perfectly uniform in EU
876 * distribution.
877 */
878 info->eu_per_subslice = info->subslice_total ?
879 DIV_ROUND_UP(info->eu_total,
880 info->subslice_total) : 0;
881 /*
882 * SKL supports slice power gating on devices with more than
883 * one slice, and supports EU power gating on devices with
884 * more than one EU pair per subslice. BXT supports subslice
885 * power gating on devices with more than one subslice, and
886 * supports EU power gating on devices with more than one EU
887 * pair per subslice.
888 */
889 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
890 (info->slice_total > 1));
891 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
892 info->has_eu_pg = (info->eu_per_subslice > 2);
893
894 if (IS_BROXTON(dev)) {
895#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss))
896 /*
897 * There is a HW issue in 2x6 fused down parts that requires
898 * Pooled EU to be enabled as a WA. The pool configuration
899 * changes depending upon which subslice is fused down. This
900 * doesn't affect if the device has all 3 subslices enabled.
901 */
902 /* WaEnablePooledEuFor2x6:bxt */
903 info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
904 (info->subslice_per_slice == 2 &&
905 INTEL_REVID(dev) < BXT_REVID_C0));
906
907 info->min_eu_in_pool = 0;
908 if (info->has_pooled_eu) {
909 if (IS_SS_DISABLED(ss_disable, 0) ||
910 IS_SS_DISABLED(ss_disable, 2))
911 info->min_eu_in_pool = 3;
912 else if (IS_SS_DISABLED(ss_disable, 1))
913 info->min_eu_in_pool = 6;
914 else
915 info->min_eu_in_pool = 9;
916 }
917#undef IS_SS_DISABLED
918 }
919}
920
921static void broadwell_sseu_info_init(struct drm_device *dev)
922{
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_device_info *info;
925 const int s_max = 3, ss_max = 3, eu_max = 8;
926 int s, ss;
927 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
928
929 fuse2 = I915_READ(GEN8_FUSE2);
930 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
931 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
932
933 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
934 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
935 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
936 (32 - GEN8_EU_DIS0_S1_SHIFT));
937 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
938 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
939 (32 - GEN8_EU_DIS1_S2_SHIFT));
940
941
942 info = (struct intel_device_info *)&dev_priv->info;
943 info->slice_total = hweight32(s_enable);
944
945 /*
946 * The subslice disable field is global, i.e. it applies
947 * to each of the enabled slices.
948 */
949 info->subslice_per_slice = ss_max - hweight32(ss_disable);
950 info->subslice_total = info->slice_total * info->subslice_per_slice;
951
952 /*
953 * Iterate through enabled slices and subslices to
954 * count the total enabled EU.
955 */
956 for (s = 0; s < s_max; s++) {
957 if (!(s_enable & (0x1 << s)))
958 /* skip disabled slice */
959 continue;
960
961 for (ss = 0; ss < ss_max; ss++) {
962 u32 n_disabled;
963
964 if (ss_disable & (0x1 << ss))
965 /* skip disabled subslice */
966 continue;
967
968 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
969
970 /*
971 * Record which subslices have 7 EUs.
972 */
973 if (eu_max - n_disabled == 7)
974 info->subslice_7eu[s] |= 1 << ss;
975
976 info->eu_total += eu_max - n_disabled;
977 }
978 }
979
980 /*
981 * BDW is expected to always have a uniform distribution of EU across
982 * subslices with the exception that any one EU in any one subslice may
983 * be fused off for die recovery.
984 */
985 info->eu_per_subslice = info->subslice_total ?
986 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
987
988 /*
989 * BDW supports slice power gating on devices with more than
990 * one slice.
991 */
992 info->has_slice_pg = (info->slice_total > 1);
993 info->has_subslice_pg = 0;
994 info->has_eu_pg = 0;
995}
996
997/*
998 * Determine various intel_device_info fields at runtime.
999 *
1000 * Use it when either:
1001 * - it's judged too laborious to fill n static structures with the limit
1002 * when a simple if statement does the job,
1003 * - run-time checks (eg read fuse/strap registers) are needed.
1004 *
1005 * This function needs to be called:
1006 * - after the MMIO has been setup as we are reading registers,
1007 * - after the PCH has been detected,
1008 * - before the first usage of the fields it can tweak.
1009 */
1010static void intel_device_info_runtime_init(struct drm_device *dev)
1011{
1012 struct drm_i915_private *dev_priv = dev->dev_private;
1013 struct intel_device_info *info;
1014 enum pipe pipe;
1015
1016 info = (struct intel_device_info *)&dev_priv->info;
1017
1018 /*
1019 * Skylake and Broxton currently don't expose the topmost plane as its
1020 * use is exclusive with the legacy cursor and we only want to expose
1021 * one of those, not both. Until we can safely expose the topmost plane
1022 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
1023 * we don't expose the topmost plane at all to prevent ABI breakage
1024 * down the line.
1025 */
1026 if (IS_BROXTON(dev)) {
1027 info->num_sprites[PIPE_A] = 2;
1028 info->num_sprites[PIPE_B] = 2;
1029 info->num_sprites[PIPE_C] = 1;
1030 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1031 for_each_pipe(dev_priv, pipe)
1032 info->num_sprites[pipe] = 2;
1033 else
1034 for_each_pipe(dev_priv, pipe)
1035 info->num_sprites[pipe] = 1;
1036
1037 if (i915.disable_display) {
1038 DRM_INFO("Display disabled (module parameter)\n");
1039 info->num_pipes = 0;
1040 } else if (info->num_pipes > 0 &&
1041 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
1042 HAS_PCH_SPLIT(dev)) {
1043 u32 fuse_strap = I915_READ(FUSE_STRAP);
1044 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
1045
1046 /*
1047 * SFUSE_STRAP is supposed to have a bit signalling the display
1048 * is fused off. Unfortunately it seems that, at least in
1049 * certain cases, fused off display means that PCH display
1050 * reads don't land anywhere. In that case, we read 0s.
1051 *
1052 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
1053 * should be set when taking over after the firmware.
1054 */
1055 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
1056 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
1057 (dev_priv->pch_type == PCH_CPT &&
1058 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
1059 DRM_INFO("Display fused off, disabling\n");
1060 info->num_pipes = 0;
1061 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
1062 DRM_INFO("PipeC fused off\n");
1063 info->num_pipes -= 1;
1064 }
1065 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
1066 u32 dfsm = I915_READ(SKL_DFSM);
1067 u8 disabled_mask = 0;
1068 bool invalid;
1069 int num_bits;
1070
1071 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
1072 disabled_mask |= BIT(PIPE_A);
1073 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
1074 disabled_mask |= BIT(PIPE_B);
1075 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
1076 disabled_mask |= BIT(PIPE_C);
1077
1078 num_bits = hweight8(disabled_mask);
1079
1080 switch (disabled_mask) {
1081 case BIT(PIPE_A):
1082 case BIT(PIPE_B):
1083 case BIT(PIPE_A) | BIT(PIPE_B):
1084 case BIT(PIPE_A) | BIT(PIPE_C):
1085 invalid = true;
1086 break;
1087 default:
1088 invalid = false;
1089 }
1090
1091 if (num_bits > info->num_pipes || invalid)
1092 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
1093 disabled_mask);
1094 else
1095 info->num_pipes -= num_bits;
1096 }
1097
1098 /* Initialize slice/subslice/EU info */
1099 if (IS_CHERRYVIEW(dev))
1100 cherryview_sseu_info_init(dev);
1101 else if (IS_BROADWELL(dev))
1102 broadwell_sseu_info_init(dev);
1103 else if (INTEL_INFO(dev)->gen >= 9)
1104 gen9_sseu_info_init(dev);
1105
1106 info->has_snoop = !info->has_llc;
1107
1108 /* Snooping is broken on BXT A stepping. */
1109 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1110 info->has_snoop = false;
1111
1112 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
1113 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
1114 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
1115 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
1116 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
1117 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
1118 info->has_slice_pg ? "y" : "n");
1119 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
1120 info->has_subslice_pg ? "y" : "n");
1121 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
1122 info->has_eu_pg ? "y" : "n");
1123
1124 i915.enable_execlists =
1125 intel_sanitize_enable_execlists(dev_priv,
1126 i915.enable_execlists);
1127
1128 /*
1129 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1130 * user's requested state against the hardware/driver capabilities. We
1131 * do this now so that we can print out any log messages once rather
1132 * than every time we check intel_enable_ppgtt().
1133 */
1134 i915.enable_ppgtt =
1135 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1136 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1137}
1138
1139static void intel_init_dpio(struct drm_i915_private *dev_priv) 751static void intel_init_dpio(struct drm_i915_private *dev_priv)
1140{ 752{
1141 /* 753 /*
@@ -1213,7 +825,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1213 return -ENODEV; 825 return -ENODEV;
1214 826
1215 /* Setup the write-once "constant" device info */ 827 /* Setup the write-once "constant" device info */
1216 device_info = (struct intel_device_info *)&dev_priv->info; 828 device_info = mkwrite_device_info(dev_priv);
1217 memcpy(device_info, match_info, sizeof(*device_info)); 829 memcpy(device_info, match_info, sizeof(*device_info));
1218 device_info->device_id = dev_priv->drm.pdev->device; 830 device_info->device_id = dev_priv->drm.pdev->device;
1219 831
@@ -1254,7 +866,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1254 866
1255 intel_display_crc_init(&dev_priv->drm); 867 intel_display_crc_init(&dev_priv->drm);
1256 868
1257 i915_dump_device_info(dev_priv); 869 intel_device_info_dump(dev_priv);
1258 870
1259 /* Not all pre-production machines fall into this category, only the 871 /* Not all pre-production machines fall into this category, only the
1260 * very first ones. Almost everything should work, except for maybe 872 * very first ones. Almost everything should work, except for maybe
@@ -1368,6 +980,23 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1368 pci_dev_put(dev_priv->bridge_dev); 980 pci_dev_put(dev_priv->bridge_dev);
1369} 981}
1370 982
983static void intel_sanitize_options(struct drm_i915_private *dev_priv)
984{
985 i915.enable_execlists =
986 intel_sanitize_enable_execlists(dev_priv,
987 i915.enable_execlists);
988
989 /*
990 * i915.enable_ppgtt is read-only, so do an early pass to validate the
991 * user's requested state against the hardware/driver capabilities. We
992 * do this now so that we can print out any log messages once rather
993 * than every time we check intel_enable_ppgtt().
994 */
995 i915.enable_ppgtt =
996 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
997 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
998}
999
1371/** 1000/**
1372 * i915_driver_init_hw - setup state requiring device access 1001 * i915_driver_init_hw - setup state requiring device access
1373 * @dev_priv: device private 1002 * @dev_priv: device private
@@ -1385,7 +1014,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1385 if (i915_inject_load_failure()) 1014 if (i915_inject_load_failure())
1386 return -ENODEV; 1015 return -ENODEV;
1387 1016
1388 intel_device_info_runtime_init(dev); 1017 intel_device_info_runtime_init(dev_priv);
1018
1019 intel_sanitize_options(dev_priv);
1389 1020
1390 ret = i915_ggtt_init_hw(dev); 1021 ret = i915_ggtt_init_hw(dev);
1391 if (ret) 1022 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 594ddbba3a64..d33b370a057d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3750,6 +3750,16 @@ static inline void intel_register_dsm_handler(void) { return; }
3750static inline void intel_unregister_dsm_handler(void) { return; } 3750static inline void intel_unregister_dsm_handler(void) { return; }
3751#endif /* CONFIG_ACPI */ 3751#endif /* CONFIG_ACPI */
3752 3752
3753/* intel_device_info.c */
3754static inline struct intel_device_info *
3755mkwrite_device_info(struct drm_i915_private *dev_priv)
3756{
3757 return (struct intel_device_info *)&dev_priv->info;
3758}
3759
3760void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3761void intel_device_info_dump(struct drm_i915_private *dev_priv);
3762
3753/* modesetting */ 3763/* modesetting */
3754extern void intel_modeset_init_hw(struct drm_device *dev); 3764extern void intel_modeset_init_hw(struct drm_device *dev);
3755extern void intel_modeset_init(struct drm_device *dev); 3765extern void intel_modeset_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644
index 000000000000..cba137f9ad3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26
27void intel_device_info_dump(struct drm_i915_private *dev_priv)
28{
29 const struct intel_device_info *info = &dev_priv->info;
30
31#define PRINT_S(name) "%s"
32#define SEP_EMPTY
33#define PRINT_FLAG(name) info->name ? #name "," : ""
34#define SEP_COMMA ,
35 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
36 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
37 info->gen,
38 dev_priv->drm.pdev->device,
39 dev_priv->drm.pdev->revision,
40 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
41#undef PRINT_S
42#undef SEP_EMPTY
43#undef PRINT_FLAG
44#undef SEP_COMMA
45}
46
47static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
48{
49 struct intel_device_info *info = mkwrite_device_info(dev_priv);
50 u32 fuse, eu_dis;
51
52 fuse = I915_READ(CHV_FUSE_GT);
53
54 info->slice_total = 1;
55
56 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
57 info->subslice_per_slice++;
58 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
59 CHV_FGT_EU_DIS_SS0_R1_MASK);
60 info->eu_total += 8 - hweight32(eu_dis);
61 }
62
63 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
64 info->subslice_per_slice++;
65 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
66 CHV_FGT_EU_DIS_SS1_R1_MASK);
67 info->eu_total += 8 - hweight32(eu_dis);
68 }
69
70 info->subslice_total = info->subslice_per_slice;
71 /*
72 * CHV expected to always have a uniform distribution of EU
73 * across subslices.
74 */
75 info->eu_per_subslice = info->subslice_total ?
76 info->eu_total / info->subslice_total :
77 0;
78 /*
79 * CHV supports subslice power gating on devices with more than
80 * one subslice, and supports EU power gating on devices with
81 * more than one EU pair per subslice.
82 */
83 info->has_slice_pg = 0;
84 info->has_subslice_pg = (info->subslice_total > 1);
85 info->has_eu_pg = (info->eu_per_subslice > 2);
86}
87
88static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
89{
90 struct intel_device_info *info = mkwrite_device_info(dev_priv);
91 int s_max = 3, ss_max = 4, eu_max = 8;
92 int s, ss;
93 u32 fuse2, s_enable, ss_disable, eu_disable;
94 u8 eu_mask = 0xff;
95
96 fuse2 = I915_READ(GEN8_FUSE2);
97 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
98 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
99
100 info->slice_total = hweight32(s_enable);
101 /*
102 * The subslice disable field is global, i.e. it applies
103 * to each of the enabled slices.
104 */
105 info->subslice_per_slice = ss_max - hweight32(ss_disable);
106 info->subslice_total = info->slice_total * info->subslice_per_slice;
107
108 /*
109 * Iterate through enabled slices and subslices to
110 * count the total enabled EU.
111 */
112 for (s = 0; s < s_max; s++) {
113 if (!(s_enable & BIT(s)))
114 /* skip disabled slice */
115 continue;
116
117 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
118 for (ss = 0; ss < ss_max; ss++) {
119 int eu_per_ss;
120
121 if (ss_disable & BIT(ss))
122 /* skip disabled subslice */
123 continue;
124
125 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
126 eu_mask);
127
128 /*
129 * Record which subslice(s) has(have) 7 EUs. we
130 * can tune the hash used to spread work among
131 * subslices if they are unbalanced.
132 */
133 if (eu_per_ss == 7)
134 info->subslice_7eu[s] |= BIT(ss);
135
136 info->eu_total += eu_per_ss;
137 }
138 }
139
140 /*
141 * SKL is expected to always have a uniform distribution
142 * of EU across subslices with the exception that any one
143 * EU in any one subslice may be fused off for die
144 * recovery. BXT is expected to be perfectly uniform in EU
145 * distribution.
146 */
147 info->eu_per_subslice = info->subslice_total ?
148 DIV_ROUND_UP(info->eu_total,
149 info->subslice_total) : 0;
150 /*
151 * SKL supports slice power gating on devices with more than
152 * one slice, and supports EU power gating on devices with
153 * more than one EU pair per subslice. BXT supports subslice
154 * power gating on devices with more than one subslice, and
155 * supports EU power gating on devices with more than one EU
156 * pair per subslice.
157 */
158 info->has_slice_pg =
159 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
160 info->slice_total > 1;
161 info->has_subslice_pg =
162 IS_BROXTON(dev_priv) && info->subslice_total > 1;
163 info->has_eu_pg = info->eu_per_subslice > 2;
164
165 if (IS_BROXTON(dev_priv)) {
166#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
167 /*
168 * There is a HW issue in 2x6 fused down parts that requires
169 * Pooled EU to be enabled as a WA. The pool configuration
170 * changes depending upon which subslice is fused down. This
171 * doesn't affect if the device has all 3 subslices enabled.
172 */
173 /* WaEnablePooledEuFor2x6:bxt */
174 info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
175 (info->subslice_per_slice == 2 &&
176 INTEL_REVID(dev_priv) < BXT_REVID_C0));
177
178 info->min_eu_in_pool = 0;
179 if (info->has_pooled_eu) {
180 if (IS_SS_DISABLED(ss_disable, 0) ||
181 IS_SS_DISABLED(ss_disable, 2))
182 info->min_eu_in_pool = 3;
183 else if (IS_SS_DISABLED(ss_disable, 1))
184 info->min_eu_in_pool = 6;
185 else
186 info->min_eu_in_pool = 9;
187 }
188#undef IS_SS_DISABLED
189 }
190}
191
192static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
193{
194 struct intel_device_info *info = mkwrite_device_info(dev_priv);
195 const int s_max = 3, ss_max = 3, eu_max = 8;
196 int s, ss;
197 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
198
199 fuse2 = I915_READ(GEN8_FUSE2);
200 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
201 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
202
203 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
204 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
205 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
206 (32 - GEN8_EU_DIS0_S1_SHIFT));
207 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
208 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
209 (32 - GEN8_EU_DIS1_S2_SHIFT));
210
211 info->slice_total = hweight32(s_enable);
212
213 /*
214 * The subslice disable field is global, i.e. it applies
215 * to each of the enabled slices.
216 */
217 info->subslice_per_slice = ss_max - hweight32(ss_disable);
218 info->subslice_total = info->slice_total * info->subslice_per_slice;
219
220 /*
221 * Iterate through enabled slices and subslices to
222 * count the total enabled EU.
223 */
224 for (s = 0; s < s_max; s++) {
225 if (!(s_enable & (0x1 << s)))
226 /* skip disabled slice */
227 continue;
228
229 for (ss = 0; ss < ss_max; ss++) {
230 u32 n_disabled;
231
232 if (ss_disable & (0x1 << ss))
233 /* skip disabled subslice */
234 continue;
235
236 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
237
238 /*
239 * Record which subslices have 7 EUs.
240 */
241 if (eu_max - n_disabled == 7)
242 info->subslice_7eu[s] |= 1 << ss;
243
244 info->eu_total += eu_max - n_disabled;
245 }
246 }
247
248 /*
249 * BDW is expected to always have a uniform distribution of EU across
250 * subslices with the exception that any one EU in any one subslice may
251 * be fused off for die recovery.
252 */
253 info->eu_per_subslice = info->subslice_total ?
254 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
255
256 /*
257 * BDW supports slice power gating on devices with more than
258 * one slice.
259 */
260 info->has_slice_pg = (info->slice_total > 1);
261 info->has_subslice_pg = 0;
262 info->has_eu_pg = 0;
263}
264
265/*
266 * Determine various intel_device_info fields at runtime.
267 *
268 * Use it when either:
269 * - it's judged too laborious to fill n static structures with the limit
270 * when a simple if statement does the job,
271 * - run-time checks (eg read fuse/strap registers) are needed.
272 *
273 * This function needs to be called:
274 * - after the MMIO has been setup as we are reading registers,
275 * - after the PCH has been detected,
276 * - before the first usage of the fields it can tweak.
277 */
278void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
279{
280 struct intel_device_info *info = mkwrite_device_info(dev_priv);
281 enum pipe pipe;
282
283 /*
284 * Skylake and Broxton currently don't expose the topmost plane as its
285 * use is exclusive with the legacy cursor and we only want to expose
286 * one of those, not both. Until we can safely expose the topmost plane
287 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
288 * we don't expose the topmost plane at all to prevent ABI breakage
289 * down the line.
290 */
291 if (IS_BROXTON(dev_priv)) {
292 info->num_sprites[PIPE_A] = 2;
293 info->num_sprites[PIPE_B] = 2;
294 info->num_sprites[PIPE_C] = 1;
295 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
296 for_each_pipe(dev_priv, pipe)
297 info->num_sprites[pipe] = 2;
298 else
299 for_each_pipe(dev_priv, pipe)
300 info->num_sprites[pipe] = 1;
301
302 if (i915.disable_display) {
303 DRM_INFO("Display disabled (module parameter)\n");
304 info->num_pipes = 0;
305 } else if (info->num_pipes > 0 &&
306 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
307 HAS_PCH_SPLIT(dev_priv)) {
308 u32 fuse_strap = I915_READ(FUSE_STRAP);
309 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
310
311 /*
312 * SFUSE_STRAP is supposed to have a bit signalling the display
313 * is fused off. Unfortunately it seems that, at least in
314 * certain cases, fused off display means that PCH display
315 * reads don't land anywhere. In that case, we read 0s.
316 *
317 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
318 * should be set when taking over after the firmware.
319 */
320 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
321 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
322 (dev_priv->pch_type == PCH_CPT &&
323 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
324 DRM_INFO("Display fused off, disabling\n");
325 info->num_pipes = 0;
326 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
327 DRM_INFO("PipeC fused off\n");
328 info->num_pipes -= 1;
329 }
330 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
331 u32 dfsm = I915_READ(SKL_DFSM);
332 u8 disabled_mask = 0;
333 bool invalid;
334 int num_bits;
335
336 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
337 disabled_mask |= BIT(PIPE_A);
338 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
339 disabled_mask |= BIT(PIPE_B);
340 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
341 disabled_mask |= BIT(PIPE_C);
342
343 num_bits = hweight8(disabled_mask);
344
345 switch (disabled_mask) {
346 case BIT(PIPE_A):
347 case BIT(PIPE_B):
348 case BIT(PIPE_A) | BIT(PIPE_B):
349 case BIT(PIPE_A) | BIT(PIPE_C):
350 invalid = true;
351 break;
352 default:
353 invalid = false;
354 }
355
356 if (num_bits > info->num_pipes || invalid)
357 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
358 disabled_mask);
359 else
360 info->num_pipes -= num_bits;
361 }
362
363 /* Initialize slice/subslice/EU info */
364 if (IS_CHERRYVIEW(dev_priv))
365 cherryview_sseu_info_init(dev_priv);
366 else if (IS_BROADWELL(dev_priv))
367 broadwell_sseu_info_init(dev_priv);
368 else if (INTEL_INFO(dev_priv)->gen >= 9)
369 gen9_sseu_info_init(dev_priv);
370
371 info->has_snoop = !info->has_llc;
372
373 /* Snooping is broken on BXT A stepping. */
374 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
375 info->has_snoop = false;
376
377 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
378 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
379 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
380 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
381 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
382 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
383 info->has_slice_pg ? "y" : "n");
384 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
385 info->has_subslice_pg ? "y" : "n");
386 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
387 info->has_eu_pg ? "y" : "n");
388}