aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_dma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c260
1 files changed, 154 insertions, 106 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d70d96fe553b..1c6d227aae7c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -391,20 +391,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
391 if (ret) 391 if (ret)
392 goto cleanup_vga_client; 392 goto cleanup_vga_client;
393 393
394 /* Initialise stolen first so that we may reserve preallocated
395 * objects for the BIOS to KMS transition.
396 */
397 ret = i915_gem_init_stolen(dev);
398 if (ret)
399 goto cleanup_vga_switcheroo;
400
401 intel_power_domains_init_hw(dev_priv, false); 394 intel_power_domains_init_hw(dev_priv, false);
402 395
403 intel_csr_ucode_init(dev_priv); 396 intel_csr_ucode_init(dev_priv);
404 397
405 ret = intel_irq_install(dev_priv); 398 ret = intel_irq_install(dev_priv);
406 if (ret) 399 if (ret)
407 goto cleanup_gem_stolen; 400 goto cleanup_csr;
408 401
409 intel_setup_gmbus(dev); 402 intel_setup_gmbus(dev);
410 403
@@ -458,9 +451,8 @@ cleanup_irq:
458 intel_guc_ucode_fini(dev); 451 intel_guc_ucode_fini(dev);
459 drm_irq_uninstall(dev); 452 drm_irq_uninstall(dev);
460 intel_teardown_gmbus(dev); 453 intel_teardown_gmbus(dev);
461cleanup_gem_stolen: 454cleanup_csr:
462 i915_gem_cleanup_stolen(dev); 455 intel_csr_ucode_fini(dev_priv);
463cleanup_vga_switcheroo:
464 vga_switcheroo_unregister_client(dev->pdev); 456 vga_switcheroo_unregister_client(dev->pdev);
465cleanup_vga_client: 457cleanup_vga_client:
466 vga_client_register(dev->pdev, NULL, NULL, NULL); 458 vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -816,7 +808,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
816 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { 808 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
817 DRM_INFO("Display fused off, disabling\n"); 809 DRM_INFO("Display fused off, disabling\n");
818 info->num_pipes = 0; 810 info->num_pipes = 0;
811 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
812 DRM_INFO("PipeC fused off\n");
813 info->num_pipes -= 1;
819 } 814 }
815 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
816 u32 dfsm = I915_READ(SKL_DFSM);
817 u8 disabled_mask = 0;
818 bool invalid;
819 int num_bits;
820
821 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
822 disabled_mask |= BIT(PIPE_A);
823 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
824 disabled_mask |= BIT(PIPE_B);
825 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
826 disabled_mask |= BIT(PIPE_C);
827
828 num_bits = hweight8(disabled_mask);
829
830 switch (disabled_mask) {
831 case BIT(PIPE_A):
832 case BIT(PIPE_B):
833 case BIT(PIPE_A) | BIT(PIPE_B):
834 case BIT(PIPE_A) | BIT(PIPE_C):
835 invalid = true;
836 break;
837 default:
838 invalid = false;
839 }
840
841 if (num_bits > info->num_pipes || invalid)
842 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
843 disabled_mask);
844 else
845 info->num_pipes -= num_bits;
820 } 846 }
821 847
822 /* Initialize slice/subslice/EU info */ 848 /* Initialize slice/subslice/EU info */
@@ -855,6 +881,94 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
855 } 881 }
856} 882}
857 883
884static int i915_workqueues_init(struct drm_i915_private *dev_priv)
885{
886 /*
887 * The i915 workqueue is primarily used for batched retirement of
888 * requests (and thus managing bo) once the task has been completed
889 * by the GPU. i915_gem_retire_requests() is called directly when we
890 * need high-priority retirement, such as waiting for an explicit
891 * bo.
892 *
893 * It is also used for periodic low-priority events, such as
894 * idle-timers and recording error state.
895 *
896 * All tasks on the workqueue are expected to acquire the dev mutex
897 * so there is no point in running more than one instance of the
898 * workqueue at any time. Use an ordered one.
899 */
900 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
901 if (dev_priv->wq == NULL)
902 goto out_err;
903
904 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
905 if (dev_priv->hotplug.dp_wq == NULL)
906 goto out_free_wq;
907
908 dev_priv->gpu_error.hangcheck_wq =
909 alloc_ordered_workqueue("i915-hangcheck", 0);
910 if (dev_priv->gpu_error.hangcheck_wq == NULL)
911 goto out_free_dp_wq;
912
913 return 0;
914
915out_free_dp_wq:
916 destroy_workqueue(dev_priv->hotplug.dp_wq);
917out_free_wq:
918 destroy_workqueue(dev_priv->wq);
919out_err:
920 DRM_ERROR("Failed to allocate workqueues.\n");
921
922 return -ENOMEM;
923}
924
925static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
926{
927 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
928 destroy_workqueue(dev_priv->hotplug.dp_wq);
929 destroy_workqueue(dev_priv->wq);
930}
931
932static int i915_mmio_setup(struct drm_device *dev)
933{
934 struct drm_i915_private *dev_priv = to_i915(dev);
935 int mmio_bar;
936 int mmio_size;
937
938 mmio_bar = IS_GEN2(dev) ? 1 : 0;
939 /*
940 * Before gen4, the registers and the GTT are behind different BARs.
941 * However, from gen4 onwards, the registers and the GTT are shared
942 * in the same BAR, so we want to restrict this ioremap from
943 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
944 * the register BAR remains the same size for all the earlier
945 * generations up to Ironlake.
946 */
947 if (INTEL_INFO(dev)->gen < 5)
948 mmio_size = 512 * 1024;
949 else
950 mmio_size = 2 * 1024 * 1024;
951 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
952 if (dev_priv->regs == NULL) {
953 DRM_ERROR("failed to map registers\n");
954
955 return -EIO;
956 }
957
958 /* Try to make sure MCHBAR is enabled before poking at it */
959 intel_setup_mchbar(dev);
960
961 return 0;
962}
963
964static void i915_mmio_cleanup(struct drm_device *dev)
965{
966 struct drm_i915_private *dev_priv = to_i915(dev);
967
968 intel_teardown_mchbar(dev);
969 pci_iounmap(dev->pdev, dev_priv->regs);
970}
971
858/** 972/**
859 * i915_driver_load - setup chip and create an initial config 973 * i915_driver_load - setup chip and create an initial config
860 * @dev: DRM device 974 * @dev: DRM device
@@ -870,7 +984,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
870{ 984{
871 struct drm_i915_private *dev_priv; 985 struct drm_i915_private *dev_priv;
872 struct intel_device_info *info, *device_info; 986 struct intel_device_info *info, *device_info;
873 int ret = 0, mmio_bar, mmio_size; 987 int ret = 0;
874 uint32_t aperture_size; 988 uint32_t aperture_size;
875 989
876 info = (struct intel_device_info *) flags; 990 info = (struct intel_device_info *) flags;
@@ -897,6 +1011,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
897 mutex_init(&dev_priv->modeset_restore_lock); 1011 mutex_init(&dev_priv->modeset_restore_lock);
898 mutex_init(&dev_priv->av_mutex); 1012 mutex_init(&dev_priv->av_mutex);
899 1013
1014 ret = i915_workqueues_init(dev_priv);
1015 if (ret < 0)
1016 goto out_free_priv;
1017
900 intel_pm_setup(dev); 1018 intel_pm_setup(dev);
901 1019
902 intel_runtime_pm_get(dev_priv); 1020 intel_runtime_pm_get(dev_priv);
@@ -915,28 +1033,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
915 1033
916 if (i915_get_bridge_dev(dev)) { 1034 if (i915_get_bridge_dev(dev)) {
917 ret = -EIO; 1035 ret = -EIO;
918 goto free_priv; 1036 goto out_runtime_pm_put;
919 } 1037 }
920 1038
921 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1039 ret = i915_mmio_setup(dev);
922 /* Before gen4, the registers and the GTT are behind different BARs. 1040 if (ret < 0)
923 * However, from gen4 onwards, the registers and the GTT are shared
924 * in the same BAR, so we want to restrict this ioremap from
925 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
926 * the register BAR remains the same size for all the earlier
927 * generations up to Ironlake.
928 */
929 if (info->gen < 5)
930 mmio_size = 512*1024;
931 else
932 mmio_size = 2*1024*1024;
933
934 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
935 if (!dev_priv->regs) {
936 DRM_ERROR("failed to map registers\n");
937 ret = -EIO;
938 goto put_bridge; 1041 goto put_bridge;
939 }
940 1042
941 /* This must be called before any calls to HAS_PCH_* */ 1043 /* This must be called before any calls to HAS_PCH_* */
942 intel_detect_pch(dev); 1044 intel_detect_pch(dev);
@@ -945,7 +1047,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
945 1047
946 ret = i915_gem_gtt_init(dev); 1048 ret = i915_gem_gtt_init(dev);
947 if (ret) 1049 if (ret)
948 goto out_freecsr; 1050 goto out_uncore_fini;
949 1051
950 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1052 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
951 * otherwise the vga fbdev driver falls over. */ 1053 * otherwise the vga fbdev driver falls over. */
@@ -991,49 +1093,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
991 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1093 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
992 aperture_size); 1094 aperture_size);
993 1095
994 /* The i915 workqueue is primarily used for batched retirement of
995 * requests (and thus managing bo) once the task has been completed
996 * by the GPU. i915_gem_retire_requests() is called directly when we
997 * need high-priority retirement, such as waiting for an explicit
998 * bo.
999 *
1000 * It is also used for periodic low-priority events, such as
1001 * idle-timers and recording error state.
1002 *
1003 * All tasks on the workqueue are expected to acquire the dev mutex
1004 * so there is no point in running more than one instance of the
1005 * workqueue at any time. Use an ordered one.
1006 */
1007 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1008 if (dev_priv->wq == NULL) {
1009 DRM_ERROR("Failed to create our workqueue.\n");
1010 ret = -ENOMEM;
1011 goto out_mtrrfree;
1012 }
1013
1014 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1015 if (dev_priv->hotplug.dp_wq == NULL) {
1016 DRM_ERROR("Failed to create our dp workqueue.\n");
1017 ret = -ENOMEM;
1018 goto out_freewq;
1019 }
1020
1021 dev_priv->gpu_error.hangcheck_wq =
1022 alloc_ordered_workqueue("i915-hangcheck", 0);
1023 if (dev_priv->gpu_error.hangcheck_wq == NULL) {
1024 DRM_ERROR("Failed to create our hangcheck workqueue.\n");
1025 ret = -ENOMEM;
1026 goto out_freedpwq;
1027 }
1028
1029 intel_irq_init(dev_priv); 1096 intel_irq_init(dev_priv);
1030 intel_uncore_sanitize(dev); 1097 intel_uncore_sanitize(dev);
1031 1098
1032 /* Try to make sure MCHBAR is enabled before poking at it */
1033 intel_setup_mchbar(dev);
1034 intel_opregion_setup(dev); 1099 intel_opregion_setup(dev);
1035 1100
1036 i915_gem_load(dev); 1101 i915_gem_load_init(dev);
1102 i915_gem_shrinker_init(dev_priv);
1037 1103
1038 /* On the 945G/GM, the chipset reports the MSI capability on the 1104 /* On the 945G/GM, the chipset reports the MSI capability on the
1039 * integrated graphics even though the support isn't actually there 1105 * integrated graphics even though the support isn't actually there
@@ -1046,8 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1046 * be lost or delayed, but we use them anyways to avoid 1112 * be lost or delayed, but we use them anyways to avoid
1047 * stuck interrupts on some machines. 1113 * stuck interrupts on some machines.
1048 */ 1114 */
1049 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1115 if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1050 pci_enable_msi(dev->pdev); 1116 if (pci_enable_msi(dev->pdev) < 0)
1117 DRM_DEBUG_DRIVER("can't enable MSI");
1118 }
1051 1119
1052 intel_device_info_runtime_init(dev); 1120 intel_device_info_runtime_init(dev);
1053 1121
@@ -1097,38 +1165,29 @@ out_power_well:
1097 intel_power_domains_fini(dev_priv); 1165 intel_power_domains_fini(dev_priv);
1098 drm_vblank_cleanup(dev); 1166 drm_vblank_cleanup(dev);
1099out_gem_unload: 1167out_gem_unload:
1100 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 1168 i915_gem_shrinker_cleanup(dev_priv);
1101 unregister_shrinker(&dev_priv->mm.shrinker);
1102 1169
1103 if (dev->pdev->msi_enabled) 1170 if (dev->pdev->msi_enabled)
1104 pci_disable_msi(dev->pdev); 1171 pci_disable_msi(dev->pdev);
1105 1172
1106 intel_teardown_mchbar(dev); 1173 intel_teardown_mchbar(dev);
1107 pm_qos_remove_request(&dev_priv->pm_qos); 1174 pm_qos_remove_request(&dev_priv->pm_qos);
1108 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1109out_freedpwq:
1110 destroy_workqueue(dev_priv->hotplug.dp_wq);
1111out_freewq:
1112 destroy_workqueue(dev_priv->wq);
1113out_mtrrfree:
1114 arch_phys_wc_del(dev_priv->gtt.mtrr); 1175 arch_phys_wc_del(dev_priv->gtt.mtrr);
1115 io_mapping_free(dev_priv->gtt.mappable); 1176 io_mapping_free(dev_priv->gtt.mappable);
1116out_gtt: 1177out_gtt:
1117 i915_global_gtt_cleanup(dev); 1178 i915_global_gtt_cleanup(dev);
1118out_freecsr: 1179out_uncore_fini:
1119 intel_csr_ucode_fini(dev_priv);
1120 intel_uncore_fini(dev); 1180 intel_uncore_fini(dev);
1121 pci_iounmap(dev->pdev, dev_priv->regs); 1181 i915_mmio_cleanup(dev);
1122put_bridge: 1182put_bridge:
1123 pci_dev_put(dev_priv->bridge_dev); 1183 pci_dev_put(dev_priv->bridge_dev);
1124free_priv: 1184 i915_gem_load_cleanup(dev);
1125 kmem_cache_destroy(dev_priv->requests); 1185out_runtime_pm_put:
1126 kmem_cache_destroy(dev_priv->vmas);
1127 kmem_cache_destroy(dev_priv->objects);
1128
1129 intel_runtime_pm_put(dev_priv); 1186 intel_runtime_pm_put(dev_priv);
1130 1187 i915_workqueues_cleanup(dev_priv);
1188out_free_priv:
1131 kfree(dev_priv); 1189 kfree(dev_priv);
1190
1132 return ret; 1191 return ret;
1133} 1192}
1134 1193
@@ -1153,8 +1212,7 @@ int i915_driver_unload(struct drm_device *dev)
1153 1212
1154 i915_teardown_sysfs(dev); 1213 i915_teardown_sysfs(dev);
1155 1214
1156 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 1215 i915_gem_shrinker_cleanup(dev_priv);
1157 unregister_shrinker(&dev_priv->mm.shrinker);
1158 1216
1159 io_mapping_free(dev_priv->gtt.mappable); 1217 io_mapping_free(dev_priv->gtt.mappable);
1160 arch_phys_wc_del(dev_priv->gtt.mtrr); 1218 arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1182,6 +1240,8 @@ int i915_driver_unload(struct drm_device *dev)
1182 vga_switcheroo_unregister_client(dev->pdev); 1240 vga_switcheroo_unregister_client(dev->pdev);
1183 vga_client_register(dev->pdev, NULL, NULL, NULL); 1241 vga_client_register(dev->pdev, NULL, NULL, NULL);
1184 1242
1243 intel_csr_ucode_fini(dev_priv);
1244
1185 /* Free error state after interrupts are fully disabled. */ 1245 /* Free error state after interrupts are fully disabled. */
1186 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1246 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1187 i915_destroy_error_state(dev); 1247 i915_destroy_error_state(dev);
@@ -1200,27 +1260,17 @@ int i915_driver_unload(struct drm_device *dev)
1200 i915_gem_context_fini(dev); 1260 i915_gem_context_fini(dev);
1201 mutex_unlock(&dev->struct_mutex); 1261 mutex_unlock(&dev->struct_mutex);
1202 intel_fbc_cleanup_cfb(dev_priv); 1262 intel_fbc_cleanup_cfb(dev_priv);
1203 i915_gem_cleanup_stolen(dev);
1204 1263
1205 intel_csr_ucode_fini(dev_priv);
1206
1207 intel_teardown_mchbar(dev);
1208
1209 destroy_workqueue(dev_priv->hotplug.dp_wq);
1210 destroy_workqueue(dev_priv->wq);
1211 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1212 pm_qos_remove_request(&dev_priv->pm_qos); 1264 pm_qos_remove_request(&dev_priv->pm_qos);
1213 1265
1214 i915_global_gtt_cleanup(dev); 1266 i915_global_gtt_cleanup(dev);
1215 1267
1216 intel_uncore_fini(dev); 1268 intel_uncore_fini(dev);
1217 if (dev_priv->regs != NULL) 1269 i915_mmio_cleanup(dev);
1218 pci_iounmap(dev->pdev, dev_priv->regs);
1219 1270
1220 kmem_cache_destroy(dev_priv->requests); 1271 i915_gem_load_cleanup(dev);
1221 kmem_cache_destroy(dev_priv->vmas);
1222 kmem_cache_destroy(dev_priv->objects);
1223 pci_dev_put(dev_priv->bridge_dev); 1272 pci_dev_put(dev_priv->bridge_dev);
1273 i915_workqueues_cleanup(dev_priv);
1224 kfree(dev_priv); 1274 kfree(dev_priv);
1225 1275
1226 return 0; 1276 return 0;
@@ -1261,8 +1311,6 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1261 i915_gem_context_close(dev, file); 1311 i915_gem_context_close(dev, file);
1262 i915_gem_release(dev, file); 1312 i915_gem_release(dev, file);
1263 mutex_unlock(&dev->struct_mutex); 1313 mutex_unlock(&dev->struct_mutex);
1264
1265 intel_modeset_preclose(dev, file);
1266} 1314}
1267 1315
1268void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1316void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)