diff options
49 files changed, 2382 insertions, 679 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index aef87fdbd187..44311296ec02 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
| @@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags) | |||
| 840 | return false; | 840 | return false; |
| 841 | } | 841 | } |
| 842 | 842 | ||
| 843 | void intel_gtt_insert_page(dma_addr_t addr, | ||
| 844 | unsigned int pg, | ||
| 845 | unsigned int flags) | ||
| 846 | { | ||
| 847 | intel_private.driver->write_entry(addr, pg, flags); | ||
| 848 | } | ||
| 849 | EXPORT_SYMBOL(intel_gtt_insert_page); | ||
| 850 | |||
| 843 | void intel_gtt_insert_sg_entries(struct sg_table *st, | 851 | void intel_gtt_insert_sg_entries(struct sg_table *st, |
| 844 | unsigned int pg_start, | 852 | unsigned int pg_start, |
| 845 | unsigned int flags) | 853 | unsigned int flags) |
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 29a32b11953b..7769e469118f 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig | |||
| @@ -57,6 +57,28 @@ config DRM_I915_USERPTR | |||
| 57 | 57 | ||
| 58 | If in doubt, say "Y". | 58 | If in doubt, say "Y". |
| 59 | 59 | ||
| 60 | config DRM_I915_GVT | ||
| 61 | bool "Enable Intel GVT-g graphics virtualization host support" | ||
| 62 | depends on DRM_I915 | ||
| 63 | default n | ||
| 64 | help | ||
| 65 | Choose this option if you want to enable Intel GVT-g graphics | ||
| 66 | virtualization technology host support with integrated graphics. | ||
| 67 | With GVT-g, it's possible to have one integrated graphics | ||
| 68 | device shared by multiple VMs under different hypervisors. | ||
| 69 | |||
| 70 | Note that at least one hypervisor like Xen or KVM is required for | ||
| 71 | this driver to work, and it only supports newer device from | ||
| 72 | Broadwell+. For further information and setup guide, you can | ||
| 73 | visit: http://01.org/igvt-g. | ||
| 74 | |||
| 75 | Now it's just a stub to support the modifications of i915 for | ||
| 76 | GVT device model. It requires at least one MPT modules for Xen/KVM | ||
| 77 | and other components of GVT device model to work. Use it under | ||
| 78 | you own risk. | ||
| 79 | |||
| 80 | If in doubt, say "N". | ||
| 81 | |||
| 60 | menu "drm/i915 Debugging" | 82 | menu "drm/i915 Debugging" |
| 61 | depends on DRM_I915 | 83 | depends on DRM_I915 |
| 62 | depends on EXPERT | 84 | depends on EXPERT |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 7e2944406b8f..276abf1cac2b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
| @@ -104,6 +104,11 @@ i915-y += i915_vgpu.o | |||
| 104 | # legacy horrors | 104 | # legacy horrors |
| 105 | i915-y += i915_dma.o | 105 | i915-y += i915_dma.o |
| 106 | 106 | ||
| 107 | ifeq ($(CONFIG_DRM_I915_GVT),y) | ||
| 108 | i915-y += intel_gvt.o | ||
| 109 | include $(src)/gvt/Makefile | ||
| 110 | endif | ||
| 111 | |||
| 107 | obj-$(CONFIG_DRM_I915) += i915.o | 112 | obj-$(CONFIG_DRM_I915) += i915.o |
| 108 | 113 | ||
| 109 | CFLAGS_i915_trace_points.o := -I$(src) | 114 | CFLAGS_i915_trace_points.o := -I$(src) |
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile new file mode 100644 index 000000000000..d0f21a6ad60d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/Makefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | GVT_DIR := gvt | ||
| 2 | GVT_SOURCE := gvt.o | ||
| 3 | |||
| 4 | ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall | ||
| 5 | i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) | ||
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h new file mode 100644 index 000000000000..7ef412be665f --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/debug.h | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __GVT_DEBUG_H__ | ||
| 25 | #define __GVT_DEBUG_H__ | ||
| 26 | |||
| 27 | #define gvt_dbg_core(fmt, args...) \ | ||
| 28 | DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) | ||
| 29 | |||
| 30 | /* | ||
| 31 | * Other GVT debug stuff will be introduced in the GVT device model patches. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #endif | ||
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c new file mode 100644 index 000000000000..927f4579f5b6 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
| @@ -0,0 +1,145 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/types.h> | ||
| 25 | #include <xen/xen.h> | ||
| 26 | |||
| 27 | #include "i915_drv.h" | ||
| 28 | |||
| 29 | struct intel_gvt_host intel_gvt_host; | ||
| 30 | |||
| 31 | static const char * const supported_hypervisors[] = { | ||
| 32 | [INTEL_GVT_HYPERVISOR_XEN] = "XEN", | ||
| 33 | [INTEL_GVT_HYPERVISOR_KVM] = "KVM", | ||
| 34 | }; | ||
| 35 | |||
| 36 | /** | ||
| 37 | * intel_gvt_init_host - Load MPT modules and detect if we're running in host | ||
| 38 | * @gvt: intel gvt device | ||
| 39 | * | ||
| 40 | * This function is called at the driver loading stage. If failed to find a | ||
| 41 | * loadable MPT module or detect currently we're running in a VM, then GVT-g | ||
| 42 | * will be disabled | ||
| 43 | * | ||
| 44 | * Returns: | ||
| 45 | * Zero on success, negative error code if failed. | ||
| 46 | * | ||
| 47 | */ | ||
| 48 | int intel_gvt_init_host(void) | ||
| 49 | { | ||
| 50 | if (intel_gvt_host.initialized) | ||
| 51 | return 0; | ||
| 52 | |||
| 53 | /* Xen DOM U */ | ||
| 54 | if (xen_domain() && !xen_initial_domain()) | ||
| 55 | return -ENODEV; | ||
| 56 | |||
| 57 | /* Try to load MPT modules for hypervisors */ | ||
| 58 | if (xen_initial_domain()) { | ||
| 59 | /* In Xen dom0 */ | ||
| 60 | intel_gvt_host.mpt = try_then_request_module( | ||
| 61 | symbol_get(xengt_mpt), "xengt"); | ||
| 62 | intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; | ||
| 63 | } else { | ||
| 64 | /* not in Xen. Try KVMGT */ | ||
| 65 | intel_gvt_host.mpt = try_then_request_module( | ||
| 66 | symbol_get(kvmgt_mpt), "kvm"); | ||
| 67 | intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; | ||
| 68 | } | ||
| 69 | |||
| 70 | /* Fail to load MPT modules - bail out */ | ||
| 71 | if (!intel_gvt_host.mpt) | ||
| 72 | return -EINVAL; | ||
| 73 | |||
| 74 | /* Try to detect if we're running in host instead of VM. */ | ||
| 75 | if (!intel_gvt_hypervisor_detect_host()) | ||
| 76 | return -ENODEV; | ||
| 77 | |||
| 78 | gvt_dbg_core("Running with hypervisor %s in host mode\n", | ||
| 79 | supported_hypervisors[intel_gvt_host.hypervisor_type]); | ||
| 80 | |||
| 81 | intel_gvt_host.initialized = true; | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | static void init_device_info(struct intel_gvt *gvt) | ||
| 86 | { | ||
| 87 | if (IS_BROADWELL(gvt->dev_priv)) | ||
| 88 | gvt->device_info.max_support_vgpus = 8; | ||
| 89 | /* This function will grow large in GVT device model patches. */ | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | ||
| 93 | * intel_gvt_clean_device - clean a GVT device | ||
| 94 | * @gvt: intel gvt device | ||
| 95 | * | ||
| 96 | * This function is called at the driver unloading stage, to free the | ||
| 97 | * resources owned by a GVT device. | ||
| 98 | * | ||
| 99 | */ | ||
| 100 | void intel_gvt_clean_device(struct drm_i915_private *dev_priv) | ||
| 101 | { | ||
| 102 | struct intel_gvt *gvt = &dev_priv->gvt; | ||
| 103 | |||
| 104 | if (WARN_ON(!gvt->initialized)) | ||
| 105 | return; | ||
| 106 | |||
| 107 | /* Other de-initialization of GVT components will be introduced. */ | ||
| 108 | |||
| 109 | gvt->initialized = false; | ||
| 110 | } | ||
| 111 | |||
| 112 | /** | ||
| 113 | * intel_gvt_init_device - initialize a GVT device | ||
| 114 | * @dev_priv: drm i915 private data | ||
| 115 | * | ||
| 116 | * This function is called at the initialization stage, to initialize | ||
| 117 | * necessary GVT components. | ||
| 118 | * | ||
| 119 | * Returns: | ||
| 120 | * Zero on success, negative error code if failed. | ||
| 121 | * | ||
| 122 | */ | ||
| 123 | int intel_gvt_init_device(struct drm_i915_private *dev_priv) | ||
| 124 | { | ||
| 125 | struct intel_gvt *gvt = &dev_priv->gvt; | ||
| 126 | /* | ||
| 127 | * Cannot initialize GVT device without intel_gvt_host gets | ||
| 128 | * initialized first. | ||
| 129 | */ | ||
| 130 | if (WARN_ON(!intel_gvt_host.initialized)) | ||
| 131 | return -EINVAL; | ||
| 132 | |||
| 133 | if (WARN_ON(gvt->initialized)) | ||
| 134 | return -EEXIST; | ||
| 135 | |||
| 136 | gvt_dbg_core("init gvt device\n"); | ||
| 137 | |||
| 138 | init_device_info(gvt); | ||
| 139 | /* | ||
| 140 | * Other initialization of GVT components will be introduce here. | ||
| 141 | */ | ||
| 142 | gvt_dbg_core("gvt device creation is done\n"); | ||
| 143 | gvt->initialized = true; | ||
| 144 | return 0; | ||
| 145 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h new file mode 100644 index 000000000000..fb619a6e519d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _GVT_H_ | ||
| 25 | #define _GVT_H_ | ||
| 26 | |||
| 27 | #include "debug.h" | ||
| 28 | #include "hypercall.h" | ||
| 29 | |||
| 30 | #define GVT_MAX_VGPU 8 | ||
| 31 | |||
| 32 | enum { | ||
| 33 | INTEL_GVT_HYPERVISOR_XEN = 0, | ||
| 34 | INTEL_GVT_HYPERVISOR_KVM, | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct intel_gvt_host { | ||
| 38 | bool initialized; | ||
| 39 | int hypervisor_type; | ||
| 40 | struct intel_gvt_mpt *mpt; | ||
| 41 | }; | ||
| 42 | |||
| 43 | extern struct intel_gvt_host intel_gvt_host; | ||
| 44 | |||
| 45 | /* Describe per-platform limitations. */ | ||
| 46 | struct intel_gvt_device_info { | ||
| 47 | u32 max_support_vgpus; | ||
| 48 | /* This data structure will grow bigger in GVT device model patches */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct intel_vgpu { | ||
| 52 | struct intel_gvt *gvt; | ||
| 53 | int id; | ||
| 54 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct intel_gvt { | ||
| 58 | struct mutex lock; | ||
| 59 | bool initialized; | ||
| 60 | |||
| 61 | struct drm_i915_private *dev_priv; | ||
| 62 | struct idr vgpu_idr; /* vGPU IDR pool */ | ||
| 63 | |||
| 64 | struct intel_gvt_device_info device_info; | ||
| 65 | }; | ||
| 66 | |||
| 67 | #include "mpt.h" | ||
| 68 | |||
| 69 | #endif | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h new file mode 100644 index 000000000000..254df8bf1f35 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _GVT_HYPERCALL_H_ | ||
| 25 | #define _GVT_HYPERCALL_H_ | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Specific GVT-g MPT modules function collections. Currently GVT-g supports | ||
| 29 | * both Xen and KVM by providing dedicated hypervisor-related MPT modules. | ||
| 30 | */ | ||
| 31 | struct intel_gvt_mpt { | ||
| 32 | int (*detect_host)(void); | ||
| 33 | }; | ||
| 34 | |||
| 35 | extern struct intel_gvt_mpt xengt_mpt; | ||
| 36 | extern struct intel_gvt_mpt kvmgt_mpt; | ||
| 37 | |||
| 38 | #endif /* _GVT_HYPERCALL_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h new file mode 100644 index 000000000000..03601e3ffa7c --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _GVT_MPT_H_ | ||
| 25 | #define _GVT_MPT_H_ | ||
| 26 | |||
| 27 | /** | ||
| 28 | * DOC: Hypervisor Service APIs for GVT-g Core Logic | ||
| 29 | * | ||
| 30 | * This is the glue layer between specific hypervisor MPT modules and GVT-g core | ||
| 31 | * logic. Each kind of hypervisor MPT module provides a collection of function | ||
| 32 | * callbacks and will be attached to GVT host when the driver is loading. | ||
| 33 | * GVT-g core logic will call these APIs to request specific services from | ||
| 34 | * hypervisor. | ||
| 35 | */ | ||
| 36 | |||
| 37 | /** | ||
| 38 | * intel_gvt_hypervisor_detect_host - check if GVT-g is running within | ||
| 39 | * hypervisor host/privilged domain | ||
| 40 | * | ||
| 41 | * Returns: | ||
| 42 | * Zero on success, -ENODEV if current kernel is running inside a VM | ||
| 43 | */ | ||
| 44 | static inline int intel_gvt_hypervisor_detect_host(void) | ||
| 45 | { | ||
| 46 | return intel_gvt_host.mpt->detect_host(); | ||
| 47 | } | ||
| 48 | |||
| 49 | #endif /* _GVT_MPT_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index d97f28bfa9db..b0fd6a7b0603 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
| @@ -737,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine) | |||
| 737 | 737 | ||
| 738 | /** | 738 | /** |
| 739 | * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer | 739 | * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer |
| 740 | * @ring: the ringbuffer to initialize | 740 | * @engine: the engine to initialize |
| 741 | * | 741 | * |
| 742 | * Optionally initializes fields related to batch buffer command parsing in the | 742 | * Optionally initializes fields related to batch buffer command parsing in the |
| 743 | * struct intel_engine_cs based on whether the platform requires software | 743 | * struct intel_engine_cs based on whether the platform requires software |
| @@ -830,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) | |||
| 830 | 830 | ||
| 831 | /** | 831 | /** |
| 832 | * i915_cmd_parser_fini_ring() - clean up cmd parser related fields | 832 | * i915_cmd_parser_fini_ring() - clean up cmd parser related fields |
| 833 | * @ring: the ringbuffer to clean up | 833 | * @engine: the engine to clean up |
| 834 | * | 834 | * |
| 835 | * Releases any resources related to command parsing that may have been | 835 | * Releases any resources related to command parsing that may have been |
| 836 | * initialized for the specified ring. | 836 | * initialized for the specified ring. |
| @@ -1024,7 +1024,7 @@ unpin_src: | |||
| 1024 | 1024 | ||
| 1025 | /** | 1025 | /** |
| 1026 | * i915_needs_cmd_parser() - should a given ring use software command parsing? | 1026 | * i915_needs_cmd_parser() - should a given ring use software command parsing? |
| 1027 | * @ring: the ring in question | 1027 | * @engine: the engine in question |
| 1028 | * | 1028 | * |
| 1029 | * Only certain platforms require software batch buffer command parsing, and | 1029 | * Only certain platforms require software batch buffer command parsing, and |
| 1030 | * only when enabled via module parameter. | 1030 | * only when enabled via module parameter. |
| @@ -1176,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, | |||
| 1176 | 1176 | ||
| 1177 | /** | 1177 | /** |
| 1178 | * i915_parse_cmds() - parse a submitted batch buffer for privilege violations | 1178 | * i915_parse_cmds() - parse a submitted batch buffer for privilege violations |
| 1179 | * @ring: the ring on which the batch is to execute | 1179 | * @engine: the engine on which the batch is to execute |
| 1180 | * @batch_obj: the batch buffer in question | 1180 | * @batch_obj: the batch buffer in question |
| 1181 | * @shadow_batch_obj: copy of the batch buffer in question | 1181 | * @shadow_batch_obj: copy of the batch buffer in question |
| 1182 | * @batch_start_offset: byte offset in the batch at which execution starts | 1182 | * @batch_start_offset: byte offset in the batch at which execution starts |
| @@ -1281,6 +1281,7 @@ int i915_parse_cmds(struct intel_engine_cs *engine, | |||
| 1281 | 1281 | ||
| 1282 | /** | 1282 | /** |
| 1283 | * i915_cmd_parser_get_version() - get the cmd parser version number | 1283 | * i915_cmd_parser_get_version() - get the cmd parser version number |
| 1284 | * @dev_priv: i915 device private | ||
| 1284 | * | 1285 | * |
| 1285 | * The cmd parser maintains a simple increasing integer version number suitable | 1286 | * The cmd parser maintains a simple increasing integer version number suitable |
| 1286 | * for passing to userspace clients to determine what operations are permitted. | 1287 | * for passing to userspace clients to determine what operations are permitted. |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 614ac085e51f..5b7526697838 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -2574,6 +2574,10 @@ static int i915_guc_info(struct seq_file *m, void *data) | |||
| 2574 | 2574 | ||
| 2575 | mutex_unlock(&dev->struct_mutex); | 2575 | mutex_unlock(&dev->struct_mutex); |
| 2576 | 2576 | ||
| 2577 | seq_printf(m, "Doorbell map:\n"); | ||
| 2578 | seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap); | ||
| 2579 | seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline); | ||
| 2580 | |||
| 2577 | seq_printf(m, "GuC total action count: %llu\n", guc.action_count); | 2581 | seq_printf(m, "GuC total action count: %llu\n", guc.action_count); |
| 2578 | seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); | 2582 | seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); |
| 2579 | seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); | 2583 | seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); |
| @@ -5306,6 +5310,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused) | |||
| 5306 | INTEL_INFO(dev)->eu_total); | 5310 | INTEL_INFO(dev)->eu_total); |
| 5307 | seq_printf(m, " Available EU Per Subslice: %u\n", | 5311 | seq_printf(m, " Available EU Per Subslice: %u\n", |
| 5308 | INTEL_INFO(dev)->eu_per_subslice); | 5312 | INTEL_INFO(dev)->eu_per_subslice); |
| 5313 | seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev))); | ||
| 5314 | if (HAS_POOLED_EU(dev)) | ||
| 5315 | seq_printf(m, " Min EU in pool: %u\n", | ||
| 5316 | INTEL_INFO(dev)->min_eu_in_pool); | ||
| 5309 | seq_printf(m, " Has Slice Power Gating: %s\n", | 5317 | seq_printf(m, " Has Slice Power Gating: %s\n", |
| 5310 | yesno(INTEL_INFO(dev)->has_slice_pg)); | 5318 | yesno(INTEL_INFO(dev)->has_slice_pg)); |
| 5311 | seq_printf(m, " Has Subslice Power Gating: %s\n", | 5319 | seq_printf(m, " Has Subslice Power Gating: %s\n", |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 07edaed9d5a2..d15a461fa84a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -764,6 +764,32 @@ static void gen9_sseu_info_init(struct drm_device *dev) | |||
| 764 | (info->slice_total > 1)); | 764 | (info->slice_total > 1)); |
| 765 | info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); | 765 | info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); |
| 766 | info->has_eu_pg = (info->eu_per_subslice > 2); | 766 | info->has_eu_pg = (info->eu_per_subslice > 2); |
| 767 | |||
| 768 | if (IS_BROXTON(dev)) { | ||
| 769 | #define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss)) | ||
| 770 | /* | ||
| 771 | * There is a HW issue in 2x6 fused down parts that requires | ||
| 772 | * Pooled EU to be enabled as a WA. The pool configuration | ||
| 773 | * changes depending upon which subslice is fused down. This | ||
| 774 | * doesn't affect if the device has all 3 subslices enabled. | ||
| 775 | */ | ||
| 776 | /* WaEnablePooledEuFor2x6:bxt */ | ||
| 777 | info->has_pooled_eu = ((info->subslice_per_slice == 3) || | ||
| 778 | (info->subslice_per_slice == 2 && | ||
| 779 | INTEL_REVID(dev) < BXT_REVID_C0)); | ||
| 780 | |||
| 781 | info->min_eu_in_pool = 0; | ||
| 782 | if (info->has_pooled_eu) { | ||
| 783 | if (IS_SS_DISABLED(ss_disable, 0) || | ||
| 784 | IS_SS_DISABLED(ss_disable, 2)) | ||
| 785 | info->min_eu_in_pool = 3; | ||
| 786 | else if (IS_SS_DISABLED(ss_disable, 1)) | ||
| 787 | info->min_eu_in_pool = 6; | ||
| 788 | else | ||
| 789 | info->min_eu_in_pool = 9; | ||
| 790 | } | ||
| 791 | #undef IS_SS_DISABLED | ||
| 792 | } | ||
| 767 | } | 793 | } |
| 768 | 794 | ||
| 769 | static void broadwell_sseu_info_init(struct drm_device *dev) | 795 | static void broadwell_sseu_info_init(struct drm_device *dev) |
| @@ -962,6 +988,9 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
| 962 | DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); | 988 | DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); |
| 963 | DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); | 989 | DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); |
| 964 | DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); | 990 | DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); |
| 991 | DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n"); | ||
| 992 | if (HAS_POOLED_EU(dev)) | ||
| 993 | DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool); | ||
| 965 | DRM_DEBUG_DRIVER("has slice power gating: %s\n", | 994 | DRM_DEBUG_DRIVER("has slice power gating: %s\n", |
| 966 | info->has_slice_pg ? "y" : "n"); | 995 | info->has_slice_pg ? "y" : "n"); |
| 967 | DRM_DEBUG_DRIVER("has subslice power gating: %s\n", | 996 | DRM_DEBUG_DRIVER("has subslice power gating: %s\n", |
| @@ -1091,6 +1120,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
| 1091 | if (ret < 0) | 1120 | if (ret < 0) |
| 1092 | return ret; | 1121 | return ret; |
| 1093 | 1122 | ||
| 1123 | ret = intel_gvt_init(dev_priv); | ||
| 1124 | if (ret < 0) | ||
| 1125 | goto err_workqueues; | ||
| 1126 | |||
| 1094 | /* This must be called before any calls to HAS_PCH_* */ | 1127 | /* This must be called before any calls to HAS_PCH_* */ |
| 1095 | intel_detect_pch(dev); | 1128 | intel_detect_pch(dev); |
| 1096 | 1129 | ||
| @@ -1116,6 +1149,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
| 1116 | "It may not be fully functional.\n"); | 1149 | "It may not be fully functional.\n"); |
| 1117 | 1150 | ||
| 1118 | return 0; | 1151 | return 0; |
| 1152 | |||
| 1153 | err_workqueues: | ||
| 1154 | i915_workqueues_cleanup(dev_priv); | ||
| 1155 | return ret; | ||
| 1119 | } | 1156 | } |
| 1120 | 1157 | ||
| 1121 | /** | 1158 | /** |
| @@ -1487,6 +1524,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1487 | 1524 | ||
| 1488 | intel_fbdev_fini(dev); | 1525 | intel_fbdev_fini(dev); |
| 1489 | 1526 | ||
| 1527 | intel_gvt_cleanup(dev_priv); | ||
| 1528 | |||
| 1490 | ret = i915_gem_suspend(dev); | 1529 | ret = i915_gem_suspend(dev); |
| 1491 | if (ret) { | 1530 | if (ret) { |
| 1492 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 1531 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 872c60608dbd..3eb47fbcea73 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -355,6 +355,7 @@ static const struct intel_device_info intel_broxton_info = { | |||
| 355 | .has_ddi = 1, | 355 | .has_ddi = 1, |
| 356 | .has_fpga_dbg = 1, | 356 | .has_fpga_dbg = 1, |
| 357 | .has_fbc = 1, | 357 | .has_fbc = 1, |
| 358 | .has_pooled_eu = 0, | ||
| 358 | GEN_DEFAULT_PIPEOFFSETS, | 359 | GEN_DEFAULT_PIPEOFFSETS, |
| 359 | IVB_CURSOR_OFFSETS, | 360 | IVB_CURSOR_OFFSETS, |
| 360 | BDW_COLORS, | 361 | BDW_COLORS, |
| @@ -517,8 +518,10 @@ void intel_detect_pch(struct drm_device *dev) | |||
| 517 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || | 518 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
| 518 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || | 519 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || |
| 519 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && | 520 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
| 520 | pch->subsystem_vendor == 0x1af4 && | 521 | pch->subsystem_vendor == |
| 521 | pch->subsystem_device == 0x1100)) { | 522 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET && |
| 523 | pch->subsystem_device == | ||
| 524 | PCI_SUBDEVICE_ID_QEMU)) { | ||
| 522 | dev_priv->pch_type = intel_virt_detect_pch(dev); | 525 | dev_priv->pch_type = intel_virt_detect_pch(dev); |
| 523 | } else | 526 | } else |
| 524 | continue; | 527 | continue; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ee338655f782..24a86c64d22e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -62,12 +62,14 @@ | |||
| 62 | #include "i915_gem_gtt.h" | 62 | #include "i915_gem_gtt.h" |
| 63 | #include "i915_gem_render_state.h" | 63 | #include "i915_gem_render_state.h" |
| 64 | 64 | ||
| 65 | #include "intel_gvt.h" | ||
| 66 | |||
| 65 | /* General customization: | 67 | /* General customization: |
| 66 | */ | 68 | */ |
| 67 | 69 | ||
| 68 | #define DRIVER_NAME "i915" | 70 | #define DRIVER_NAME "i915" |
| 69 | #define DRIVER_DESC "Intel Graphics" | 71 | #define DRIVER_DESC "Intel Graphics" |
| 70 | #define DRIVER_DATE "20160606" | 72 | #define DRIVER_DATE "20160620" |
| 71 | 73 | ||
| 72 | #undef WARN_ON | 74 | #undef WARN_ON |
| 73 | /* Many gcc seem to no see through this and fall over :( */ | 75 | /* Many gcc seem to no see through this and fall over :( */ |
| @@ -762,7 +764,8 @@ struct intel_csr { | |||
| 762 | func(has_llc) sep \ | 764 | func(has_llc) sep \ |
| 763 | func(has_snoop) sep \ | 765 | func(has_snoop) sep \ |
| 764 | func(has_ddi) sep \ | 766 | func(has_ddi) sep \ |
| 765 | func(has_fpga_dbg) | 767 | func(has_fpga_dbg) sep \ |
| 768 | func(has_pooled_eu) | ||
| 766 | 769 | ||
| 767 | #define DEFINE_FLAG(name) u8 name:1 | 770 | #define DEFINE_FLAG(name) u8 name:1 |
| 768 | #define SEP_SEMICOLON ; | 771 | #define SEP_SEMICOLON ; |
| @@ -788,6 +791,7 @@ struct intel_device_info { | |||
| 788 | u8 subslice_per_slice; | 791 | u8 subslice_per_slice; |
| 789 | u8 eu_total; | 792 | u8 eu_total; |
| 790 | u8 eu_per_subslice; | 793 | u8 eu_per_subslice; |
| 794 | u8 min_eu_in_pool; | ||
| 791 | /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ | 795 | /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ |
| 792 | u8 subslice_7eu[3]; | 796 | u8 subslice_7eu[3]; |
| 793 | u8 has_slice_pg:1; | 797 | u8 has_slice_pg:1; |
| @@ -877,6 +881,10 @@ struct i915_gem_context { | |||
| 877 | int pin_count; | 881 | int pin_count; |
| 878 | bool initialised; | 882 | bool initialised; |
| 879 | } engine[I915_NUM_ENGINES]; | 883 | } engine[I915_NUM_ENGINES]; |
| 884 | u32 ring_size; | ||
| 885 | u32 desc_template; | ||
| 886 | struct atomic_notifier_head status_notifier; | ||
| 887 | bool execlists_force_single_submission; | ||
| 880 | 888 | ||
| 881 | struct list_head link; | 889 | struct list_head link; |
| 882 | 890 | ||
| @@ -1740,6 +1748,8 @@ struct drm_i915_private { | |||
| 1740 | 1748 | ||
| 1741 | struct i915_virtual_gpu vgpu; | 1749 | struct i915_virtual_gpu vgpu; |
| 1742 | 1750 | ||
| 1751 | struct intel_gvt gvt; | ||
| 1752 | |||
| 1743 | struct intel_guc guc; | 1753 | struct intel_guc guc; |
| 1744 | 1754 | ||
| 1745 | struct intel_csr csr; | 1755 | struct intel_csr csr; |
| @@ -2718,6 +2728,15 @@ struct drm_i915_cmd_table { | |||
| 2718 | 2728 | ||
| 2719 | #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) | 2729 | #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) |
| 2720 | 2730 | ||
| 2731 | #define KBL_REVID_A0 0x0 | ||
| 2732 | #define KBL_REVID_B0 0x1 | ||
| 2733 | #define KBL_REVID_C0 0x2 | ||
| 2734 | #define KBL_REVID_D0 0x3 | ||
| 2735 | #define KBL_REVID_E0 0x4 | ||
| 2736 | |||
| 2737 | #define IS_KBL_REVID(p, since, until) \ | ||
| 2738 | (IS_KABYLAKE(p) && IS_REVID(p, since, until)) | ||
| 2739 | |||
| 2721 | /* | 2740 | /* |
| 2722 | * The genX designation typically refers to the render engine, so render | 2741 | * The genX designation typically refers to the render engine, so render |
| 2723 | * capability related checks should use IS_GEN, while display and other checks | 2742 | * capability related checks should use IS_GEN, while display and other checks |
| @@ -2824,6 +2843,8 @@ struct drm_i915_cmd_table { | |||
| 2824 | !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ | 2843 | !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ |
| 2825 | !IS_BROXTON(dev)) | 2844 | !IS_BROXTON(dev)) |
| 2826 | 2845 | ||
| 2846 | #define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) | ||
| 2847 | |||
| 2827 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | 2848 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
| 2828 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | 2849 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
| 2829 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | 2850 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
| @@ -2941,6 +2962,12 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, | |||
| 2941 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); | 2962 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); |
| 2942 | 2963 | ||
| 2943 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); | 2964 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); |
| 2965 | |||
| 2966 | static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) | ||
| 2967 | { | ||
| 2968 | return dev_priv->gvt.initialized; | ||
| 2969 | } | ||
| 2970 | |||
| 2944 | static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) | 2971 | static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) |
| 2945 | { | 2972 | { |
| 2946 | return dev_priv->vgpu.active; | 2973 | return dev_priv->vgpu.active; |
| @@ -3110,6 +3137,23 @@ static inline int __sg_page_count(struct scatterlist *sg) | |||
| 3110 | struct page * | 3137 | struct page * |
| 3111 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); | 3138 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); |
| 3112 | 3139 | ||
| 3140 | static inline dma_addr_t | ||
| 3141 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n) | ||
| 3142 | { | ||
| 3143 | if (n < obj->get_page.last) { | ||
| 3144 | obj->get_page.sg = obj->pages->sgl; | ||
| 3145 | obj->get_page.last = 0; | ||
| 3146 | } | ||
| 3147 | |||
| 3148 | while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { | ||
| 3149 | obj->get_page.last += __sg_page_count(obj->get_page.sg++); | ||
| 3150 | if (unlikely(sg_is_chain(obj->get_page.sg))) | ||
| 3151 | obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); | ||
| 3152 | } | ||
| 3153 | |||
| 3154 | return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT); | ||
| 3155 | } | ||
| 3156 | |||
| 3113 | static inline struct page * | 3157 | static inline struct page * |
| 3114 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) | 3158 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
| 3115 | { | 3159 | { |
| @@ -3432,6 +3476,8 @@ int i915_switch_context(struct drm_i915_gem_request *req); | |||
| 3432 | void i915_gem_context_free(struct kref *ctx_ref); | 3476 | void i915_gem_context_free(struct kref *ctx_ref); |
| 3433 | struct drm_i915_gem_object * | 3477 | struct drm_i915_gem_object * |
| 3434 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); | 3478 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); |
| 3479 | struct i915_gem_context * | ||
| 3480 | i915_gem_context_create_gvt(struct drm_device *dev); | ||
| 3435 | 3481 | ||
| 3436 | static inline struct i915_gem_context * | 3482 | static inline struct i915_gem_context * |
| 3437 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) | 3483 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) |
| @@ -3620,6 +3666,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv); | |||
| 3620 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); | 3666 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); |
| 3621 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); | 3667 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
| 3622 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); | 3668 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
| 3669 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); | ||
| 3623 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); | 3670 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
| 3624 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); | 3671 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); |
| 3625 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); | 3672 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 343d88114f3b..21d0dea57312 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev, | |||
| 54 | 54 | ||
| 55 | static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) | 55 | static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) |
| 56 | { | 56 | { |
| 57 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) | ||
| 58 | return false; | ||
| 59 | |||
| 57 | if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) | 60 | if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) |
| 58 | return true; | 61 | return true; |
| 59 | 62 | ||
| 60 | return obj->pin_display; | 63 | return obj->pin_display; |
| 61 | } | 64 | } |
| 62 | 65 | ||
| 66 | static int | ||
| 67 | insert_mappable_node(struct drm_i915_private *i915, | ||
| 68 | struct drm_mm_node *node, u32 size) | ||
| 69 | { | ||
| 70 | memset(node, 0, sizeof(*node)); | ||
| 71 | return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node, | ||
| 72 | size, 0, 0, 0, | ||
| 73 | i915->ggtt.mappable_end, | ||
| 74 | DRM_MM_SEARCH_DEFAULT, | ||
| 75 | DRM_MM_CREATE_DEFAULT); | ||
| 76 | } | ||
| 77 | |||
| 78 | static void | ||
| 79 | remove_mappable_node(struct drm_mm_node *node) | ||
| 80 | { | ||
| 81 | drm_mm_remove_node(node); | ||
| 82 | } | ||
| 83 | |||
| 63 | /* some bookkeeping */ | 84 | /* some bookkeeping */ |
| 64 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 85 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
| 65 | size_t size) | 86 | size_t size) |
| @@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file, | |||
| 409 | 430 | ||
| 410 | /** | 431 | /** |
| 411 | * Creates a new mm object and returns a handle to it. | 432 | * Creates a new mm object and returns a handle to it. |
| 433 | * @dev: drm device pointer | ||
| 434 | * @data: ioctl data blob | ||
| 435 | * @file: drm file pointer | ||
| 412 | */ | 436 | */ |
| 413 | int | 437 | int |
| 414 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 438 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
| @@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, | |||
| 585 | return ret ? - EFAULT : 0; | 609 | return ret ? - EFAULT : 0; |
| 586 | } | 610 | } |
| 587 | 611 | ||
| 612 | static inline unsigned long | ||
| 613 | slow_user_access(struct io_mapping *mapping, | ||
| 614 | uint64_t page_base, int page_offset, | ||
| 615 | char __user *user_data, | ||
| 616 | unsigned long length, bool pwrite) | ||
| 617 | { | ||
| 618 | void __iomem *ioaddr; | ||
| 619 | void *vaddr; | ||
| 620 | uint64_t unwritten; | ||
| 621 | |||
| 622 | ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE); | ||
| 623 | /* We can use the cpu mem copy function because this is X86. */ | ||
| 624 | vaddr = (void __force *)ioaddr + page_offset; | ||
| 625 | if (pwrite) | ||
| 626 | unwritten = __copy_from_user(vaddr, user_data, length); | ||
| 627 | else | ||
| 628 | unwritten = __copy_to_user(user_data, vaddr, length); | ||
| 629 | |||
| 630 | io_mapping_unmap(ioaddr); | ||
| 631 | return unwritten; | ||
| 632 | } | ||
| 633 | |||
| 634 | static int | ||
| 635 | i915_gem_gtt_pread(struct drm_device *dev, | ||
| 636 | struct drm_i915_gem_object *obj, uint64_t size, | ||
| 637 | uint64_t data_offset, uint64_t data_ptr) | ||
| 638 | { | ||
| 639 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 640 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | ||
| 641 | struct drm_mm_node node; | ||
| 642 | char __user *user_data; | ||
| 643 | uint64_t remain; | ||
| 644 | uint64_t offset; | ||
| 645 | int ret; | ||
| 646 | |||
| 647 | ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); | ||
| 648 | if (ret) { | ||
| 649 | ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE); | ||
| 650 | if (ret) | ||
| 651 | goto out; | ||
| 652 | |||
| 653 | ret = i915_gem_object_get_pages(obj); | ||
| 654 | if (ret) { | ||
| 655 | remove_mappable_node(&node); | ||
| 656 | goto out; | ||
| 657 | } | ||
| 658 | |||
| 659 | i915_gem_object_pin_pages(obj); | ||
| 660 | } else { | ||
| 661 | node.start = i915_gem_obj_ggtt_offset(obj); | ||
| 662 | node.allocated = false; | ||
| 663 | ret = i915_gem_object_put_fence(obj); | ||
| 664 | if (ret) | ||
| 665 | goto out_unpin; | ||
| 666 | } | ||
| 667 | |||
| 668 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | ||
| 669 | if (ret) | ||
| 670 | goto out_unpin; | ||
| 671 | |||
| 672 | user_data = u64_to_user_ptr(data_ptr); | ||
| 673 | remain = size; | ||
| 674 | offset = data_offset; | ||
| 675 | |||
| 676 | mutex_unlock(&dev->struct_mutex); | ||
| 677 | if (likely(!i915.prefault_disable)) { | ||
| 678 | ret = fault_in_multipages_writeable(user_data, remain); | ||
| 679 | if (ret) { | ||
| 680 | mutex_lock(&dev->struct_mutex); | ||
| 681 | goto out_unpin; | ||
| 682 | } | ||
| 683 | } | ||
| 684 | |||
| 685 | while (remain > 0) { | ||
| 686 | /* Operation in this page | ||
| 687 | * | ||
| 688 | * page_base = page offset within aperture | ||
| 689 | * page_offset = offset within page | ||
| 690 | * page_length = bytes to copy for this page | ||
| 691 | */ | ||
| 692 | u32 page_base = node.start; | ||
| 693 | unsigned page_offset = offset_in_page(offset); | ||
| 694 | unsigned page_length = PAGE_SIZE - page_offset; | ||
| 695 | page_length = remain < page_length ? remain : page_length; | ||
| 696 | if (node.allocated) { | ||
| 697 | wmb(); | ||
| 698 | ggtt->base.insert_page(&ggtt->base, | ||
| 699 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), | ||
| 700 | node.start, | ||
| 701 | I915_CACHE_NONE, 0); | ||
| 702 | wmb(); | ||
| 703 | } else { | ||
| 704 | page_base += offset & PAGE_MASK; | ||
| 705 | } | ||
| 706 | /* This is a slow read/write as it tries to read from | ||
| 707 | * and write to user memory which may result into page | ||
| 708 | * faults, and so we cannot perform this under struct_mutex. | ||
| 709 | */ | ||
| 710 | if (slow_user_access(ggtt->mappable, page_base, | ||
| 711 | page_offset, user_data, | ||
| 712 | page_length, false)) { | ||
| 713 | ret = -EFAULT; | ||
| 714 | break; | ||
| 715 | } | ||
| 716 | |||
| 717 | remain -= page_length; | ||
| 718 | user_data += page_length; | ||
| 719 | offset += page_length; | ||
| 720 | } | ||
| 721 | |||
| 722 | mutex_lock(&dev->struct_mutex); | ||
| 723 | if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) { | ||
| 724 | /* The user has modified the object whilst we tried | ||
| 725 | * reading from it, and we now have no idea what domain | ||
| 726 | * the pages should be in. As we have just been touching | ||
| 727 | * them directly, flush everything back to the GTT | ||
| 728 | * domain. | ||
| 729 | */ | ||
| 730 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | ||
| 731 | } | ||
| 732 | |||
| 733 | out_unpin: | ||
| 734 | if (node.allocated) { | ||
| 735 | wmb(); | ||
| 736 | ggtt->base.clear_range(&ggtt->base, | ||
| 737 | node.start, node.size, | ||
| 738 | true); | ||
| 739 | i915_gem_object_unpin_pages(obj); | ||
| 740 | remove_mappable_node(&node); | ||
| 741 | } else { | ||
| 742 | i915_gem_object_ggtt_unpin(obj); | ||
| 743 | } | ||
| 744 | out: | ||
| 745 | return ret; | ||
| 746 | } | ||
| 747 | |||
| 588 | static int | 748 | static int |
| 589 | i915_gem_shmem_pread(struct drm_device *dev, | 749 | i915_gem_shmem_pread(struct drm_device *dev, |
| 590 | struct drm_i915_gem_object *obj, | 750 | struct drm_i915_gem_object *obj, |
| @@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev, | |||
| 600 | int needs_clflush = 0; | 760 | int needs_clflush = 0; |
| 601 | struct sg_page_iter sg_iter; | 761 | struct sg_page_iter sg_iter; |
| 602 | 762 | ||
| 763 | if (!obj->base.filp) | ||
| 764 | return -ENODEV; | ||
| 765 | |||
| 603 | user_data = u64_to_user_ptr(args->data_ptr); | 766 | user_data = u64_to_user_ptr(args->data_ptr); |
| 604 | remain = args->size; | 767 | remain = args->size; |
| 605 | 768 | ||
| @@ -672,6 +835,9 @@ out: | |||
| 672 | 835 | ||
| 673 | /** | 836 | /** |
| 674 | * Reads data from the object referenced by handle. | 837 | * Reads data from the object referenced by handle. |
| 838 | * @dev: drm device pointer | ||
| 839 | * @data: ioctl data blob | ||
| 840 | * @file: drm file pointer | ||
| 675 | * | 841 | * |
| 676 | * On error, the contents of *data are undefined. | 842 | * On error, the contents of *data are undefined. |
| 677 | */ | 843 | */ |
| @@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
| 708 | goto out; | 874 | goto out; |
| 709 | } | 875 | } |
| 710 | 876 | ||
| 711 | /* prime objects have no backing filp to GEM pread/pwrite | ||
| 712 | * pages from. | ||
| 713 | */ | ||
| 714 | if (!obj->base.filp) { | ||
| 715 | ret = -EINVAL; | ||
| 716 | goto out; | ||
| 717 | } | ||
| 718 | |||
| 719 | trace_i915_gem_object_pread(obj, args->offset, args->size); | 877 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
| 720 | 878 | ||
| 721 | ret = i915_gem_shmem_pread(dev, obj, args, file); | 879 | ret = i915_gem_shmem_pread(dev, obj, args, file); |
| 722 | 880 | ||
| 881 | /* pread for non shmem backed objects */ | ||
| 882 | if (ret == -EFAULT || ret == -ENODEV) | ||
| 883 | ret = i915_gem_gtt_pread(dev, obj, args->size, | ||
| 884 | args->offset, args->data_ptr); | ||
| 885 | |||
| 723 | out: | 886 | out: |
| 724 | drm_gem_object_unreference(&obj->base); | 887 | drm_gem_object_unreference(&obj->base); |
| 725 | unlock: | 888 | unlock: |
| @@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping, | |||
| 753 | /** | 916 | /** |
| 754 | * This is the fast pwrite path, where we copy the data directly from the | 917 | * This is the fast pwrite path, where we copy the data directly from the |
| 755 | * user into the GTT, uncached. | 918 | * user into the GTT, uncached. |
| 919 | * @dev: drm device pointer | ||
| 920 | * @obj: i915 gem object | ||
| 921 | * @args: pwrite arguments structure | ||
| 922 | * @file: drm file pointer | ||
| 756 | */ | 923 | */ |
| 757 | static int | 924 | static int |
| 758 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, | 925 | i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, |
| 759 | struct drm_i915_gem_object *obj, | 926 | struct drm_i915_gem_object *obj, |
| 760 | struct drm_i915_gem_pwrite *args, | 927 | struct drm_i915_gem_pwrite *args, |
| 761 | struct drm_file *file) | 928 | struct drm_file *file) |
| 762 | { | 929 | { |
| 763 | struct drm_i915_private *dev_priv = to_i915(dev); | 930 | struct i915_ggtt *ggtt = &i915->ggtt; |
| 764 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 931 | struct drm_device *dev = obj->base.dev; |
| 765 | ssize_t remain; | 932 | struct drm_mm_node node; |
| 766 | loff_t offset, page_base; | 933 | uint64_t remain, offset; |
| 767 | char __user *user_data; | 934 | char __user *user_data; |
| 768 | int page_offset, page_length, ret; | 935 | int ret; |
| 936 | bool hit_slow_path = false; | ||
| 937 | |||
| 938 | if (obj->tiling_mode != I915_TILING_NONE) | ||
| 939 | return -EFAULT; | ||
| 769 | 940 | ||
| 770 | ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); | 941 | ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); |
| 771 | if (ret) | 942 | if (ret) { |
| 772 | goto out; | 943 | ret = insert_mappable_node(i915, &node, PAGE_SIZE); |
| 944 | if (ret) | ||
| 945 | goto out; | ||
| 946 | |||
| 947 | ret = i915_gem_object_get_pages(obj); | ||
| 948 | if (ret) { | ||
| 949 | remove_mappable_node(&node); | ||
| 950 | goto out; | ||
| 951 | } | ||
| 952 | |||
| 953 | i915_gem_object_pin_pages(obj); | ||
| 954 | } else { | ||
| 955 | node.start = i915_gem_obj_ggtt_offset(obj); | ||
| 956 | node.allocated = false; | ||
| 957 | ret = i915_gem_object_put_fence(obj); | ||
| 958 | if (ret) | ||
| 959 | goto out_unpin; | ||
| 960 | } | ||
| 773 | 961 | ||
| 774 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 962 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
| 775 | if (ret) | 963 | if (ret) |
| 776 | goto out_unpin; | 964 | goto out_unpin; |
| 777 | 965 | ||
| 778 | ret = i915_gem_object_put_fence(obj); | 966 | intel_fb_obj_invalidate(obj, ORIGIN_GTT); |
| 779 | if (ret) | 967 | obj->dirty = true; |
| 780 | goto out_unpin; | ||
| 781 | 968 | ||
| 782 | user_data = u64_to_user_ptr(args->data_ptr); | 969 | user_data = u64_to_user_ptr(args->data_ptr); |
| 970 | offset = args->offset; | ||
| 783 | remain = args->size; | 971 | remain = args->size; |
| 784 | 972 | while (remain) { | |
| 785 | offset = i915_gem_obj_ggtt_offset(obj) + args->offset; | ||
| 786 | |||
| 787 | intel_fb_obj_invalidate(obj, ORIGIN_GTT); | ||
| 788 | |||
| 789 | while (remain > 0) { | ||
| 790 | /* Operation in this page | 973 | /* Operation in this page |
| 791 | * | 974 | * |
| 792 | * page_base = page offset within aperture | 975 | * page_base = page offset within aperture |
| 793 | * page_offset = offset within page | 976 | * page_offset = offset within page |
| 794 | * page_length = bytes to copy for this page | 977 | * page_length = bytes to copy for this page |
| 795 | */ | 978 | */ |
| 796 | page_base = offset & PAGE_MASK; | 979 | u32 page_base = node.start; |
| 797 | page_offset = offset_in_page(offset); | 980 | unsigned page_offset = offset_in_page(offset); |
| 798 | page_length = remain; | 981 | unsigned page_length = PAGE_SIZE - page_offset; |
| 799 | if ((page_offset + remain) > PAGE_SIZE) | 982 | page_length = remain < page_length ? remain : page_length; |
| 800 | page_length = PAGE_SIZE - page_offset; | 983 | if (node.allocated) { |
| 801 | 984 | wmb(); /* flush the write before we modify the GGTT */ | |
| 985 | ggtt->base.insert_page(&ggtt->base, | ||
| 986 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), | ||
| 987 | node.start, I915_CACHE_NONE, 0); | ||
| 988 | wmb(); /* flush modifications to the GGTT (insert_page) */ | ||
| 989 | } else { | ||
| 990 | page_base += offset & PAGE_MASK; | ||
| 991 | } | ||
| 802 | /* If we get a fault while copying data, then (presumably) our | 992 | /* If we get a fault while copying data, then (presumably) our |
| 803 | * source page isn't available. Return the error and we'll | 993 | * source page isn't available. Return the error and we'll |
| 804 | * retry in the slow path. | 994 | * retry in the slow path. |
| 995 | * If the object is non-shmem backed, we retry again with the | ||
| 996 | * path that handles page fault. | ||
| 805 | */ | 997 | */ |
| 806 | if (fast_user_write(ggtt->mappable, page_base, | 998 | if (fast_user_write(ggtt->mappable, page_base, |
| 807 | page_offset, user_data, page_length)) { | 999 | page_offset, user_data, page_length)) { |
| 808 | ret = -EFAULT; | 1000 | hit_slow_path = true; |
| 809 | goto out_flush; | 1001 | mutex_unlock(&dev->struct_mutex); |
| 1002 | if (slow_user_access(ggtt->mappable, | ||
| 1003 | page_base, | ||
| 1004 | page_offset, user_data, | ||
| 1005 | page_length, true)) { | ||
| 1006 | ret = -EFAULT; | ||
| 1007 | mutex_lock(&dev->struct_mutex); | ||
| 1008 | goto out_flush; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | mutex_lock(&dev->struct_mutex); | ||
| 810 | } | 1012 | } |
| 811 | 1013 | ||
| 812 | remain -= page_length; | 1014 | remain -= page_length; |
| @@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
| 815 | } | 1017 | } |
| 816 | 1018 | ||
| 817 | out_flush: | 1019 | out_flush: |
| 1020 | if (hit_slow_path) { | ||
| 1021 | if (ret == 0 && | ||
| 1022 | (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) { | ||
| 1023 | /* The user has modified the object whilst we tried | ||
| 1024 | * reading from it, and we now have no idea what domain | ||
| 1025 | * the pages should be in. As we have just been touching | ||
| 1026 | * them directly, flush everything back to the GTT | ||
| 1027 | * domain. | ||
| 1028 | */ | ||
| 1029 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | ||
| 1030 | } | ||
| 1031 | } | ||
| 1032 | |||
| 818 | intel_fb_obj_flush(obj, false, ORIGIN_GTT); | 1033 | intel_fb_obj_flush(obj, false, ORIGIN_GTT); |
| 819 | out_unpin: | 1034 | out_unpin: |
| 820 | i915_gem_object_ggtt_unpin(obj); | 1035 | if (node.allocated) { |
| 1036 | wmb(); | ||
| 1037 | ggtt->base.clear_range(&ggtt->base, | ||
| 1038 | node.start, node.size, | ||
| 1039 | true); | ||
| 1040 | i915_gem_object_unpin_pages(obj); | ||
| 1041 | remove_mappable_node(&node); | ||
| 1042 | } else { | ||
| 1043 | i915_gem_object_ggtt_unpin(obj); | ||
| 1044 | } | ||
| 821 | out: | 1045 | out: |
| 822 | return ret; | 1046 | return ret; |
| 823 | } | 1047 | } |
| @@ -1016,6 +1240,9 @@ out: | |||
| 1016 | 1240 | ||
| 1017 | /** | 1241 | /** |
| 1018 | * Writes data to the object referenced by handle. | 1242 | * Writes data to the object referenced by handle. |
| 1243 | * @dev: drm device | ||
| 1244 | * @data: ioctl data blob | ||
| 1245 | * @file: drm file | ||
| 1019 | * | 1246 | * |
| 1020 | * On error, the contents of the buffer that were to be modified are undefined. | 1247 | * On error, the contents of the buffer that were to be modified are undefined. |
| 1021 | */ | 1248 | */ |
| @@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 1062 | goto out; | 1289 | goto out; |
| 1063 | } | 1290 | } |
| 1064 | 1291 | ||
| 1065 | /* prime objects have no backing filp to GEM pread/pwrite | ||
| 1066 | * pages from. | ||
| 1067 | */ | ||
| 1068 | if (!obj->base.filp) { | ||
| 1069 | ret = -EINVAL; | ||
| 1070 | goto out; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | 1292 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
| 1074 | 1293 | ||
| 1075 | ret = -EFAULT; | 1294 | ret = -EFAULT; |
| @@ -1079,20 +1298,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 1079 | * pread/pwrite currently are reading and writing from the CPU | 1298 | * pread/pwrite currently are reading and writing from the CPU |
| 1080 | * perspective, requiring manual detiling by the client. | 1299 | * perspective, requiring manual detiling by the client. |
| 1081 | */ | 1300 | */ |
| 1082 | if (obj->tiling_mode == I915_TILING_NONE && | 1301 | if (!obj->base.filp || cpu_write_needs_clflush(obj)) { |
| 1083 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && | 1302 | ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); |
| 1084 | cpu_write_needs_clflush(obj)) { | ||
| 1085 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | ||
| 1086 | /* Note that the gtt paths might fail with non-page-backed user | 1303 | /* Note that the gtt paths might fail with non-page-backed user |
| 1087 | * pointers (e.g. gtt mappings when moving data between | 1304 | * pointers (e.g. gtt mappings when moving data between |
| 1088 | * textures). Fallback to the shmem path in that case. */ | 1305 | * textures). Fallback to the shmem path in that case. */ |
| 1089 | } | 1306 | } |
| 1090 | 1307 | ||
| 1091 | if (ret == -EFAULT || ret == -ENOSPC) { | 1308 | if (ret == -EFAULT) { |
| 1092 | if (obj->phys_handle) | 1309 | if (obj->phys_handle) |
| 1093 | ret = i915_gem_phys_pwrite(obj, args, file); | 1310 | ret = i915_gem_phys_pwrite(obj, args, file); |
| 1094 | else | 1311 | else if (obj->base.filp) |
| 1095 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | 1312 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); |
| 1313 | else | ||
| 1314 | ret = -ENODEV; | ||
| 1096 | } | 1315 | } |
| 1097 | 1316 | ||
| 1098 | out: | 1317 | out: |
| @@ -1213,6 +1432,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | |||
| 1213 | * @req: duh! | 1432 | * @req: duh! |
| 1214 | * @interruptible: do an interruptible wait (normally yes) | 1433 | * @interruptible: do an interruptible wait (normally yes) |
| 1215 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | 1434 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
| 1435 | * @rps: RPS client | ||
| 1216 | * | 1436 | * |
| 1217 | * Note: It is of utmost importance that the passed in seqno and reset_counter | 1437 | * Note: It is of utmost importance that the passed in seqno and reset_counter |
| 1218 | * values have been read by the caller in an smp safe manner. Where read-side | 1438 | * values have been read by the caller in an smp safe manner. Where read-side |
| @@ -1446,6 +1666,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) | |||
| 1446 | /** | 1666 | /** |
| 1447 | * Waits for a request to be signaled, and cleans up the | 1667 | * Waits for a request to be signaled, and cleans up the |
| 1448 | * request and object lists appropriately for that event. | 1668 | * request and object lists appropriately for that event. |
| 1669 | * @req: request to wait on | ||
| 1449 | */ | 1670 | */ |
| 1450 | int | 1671 | int |
| 1451 | i915_wait_request(struct drm_i915_gem_request *req) | 1672 | i915_wait_request(struct drm_i915_gem_request *req) |
| @@ -1472,6 +1693,8 @@ i915_wait_request(struct drm_i915_gem_request *req) | |||
| 1472 | /** | 1693 | /** |
| 1473 | * Ensures that all rendering to the object has completed and the object is | 1694 | * Ensures that all rendering to the object has completed and the object is |
| 1474 | * safe to unbind from the GTT or access from the CPU. | 1695 | * safe to unbind from the GTT or access from the CPU. |
| 1696 | * @obj: i915 gem object | ||
| 1697 | * @readonly: waiting for read access or write | ||
| 1475 | */ | 1698 | */ |
| 1476 | int | 1699 | int |
| 1477 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 1700 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
| @@ -1589,6 +1812,9 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file) | |||
| 1589 | /** | 1812 | /** |
| 1590 | * Called when user space prepares to use an object with the CPU, either | 1813 | * Called when user space prepares to use an object with the CPU, either |
| 1591 | * through the mmap ioctl's mapping or a GTT mapping. | 1814 | * through the mmap ioctl's mapping or a GTT mapping. |
| 1815 | * @dev: drm device | ||
| 1816 | * @data: ioctl data blob | ||
| 1817 | * @file: drm file | ||
| 1592 | */ | 1818 | */ |
| 1593 | int | 1819 | int |
| 1594 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 1820 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
| @@ -1652,6 +1878,9 @@ unlock: | |||
| 1652 | 1878 | ||
| 1653 | /** | 1879 | /** |
| 1654 | * Called when user space has done writes to this buffer | 1880 | * Called when user space has done writes to this buffer |
| 1881 | * @dev: drm device | ||
| 1882 | * @data: ioctl data blob | ||
| 1883 | * @file: drm file | ||
| 1655 | */ | 1884 | */ |
| 1656 | int | 1885 | int |
| 1657 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 1886 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
| @@ -1682,8 +1911,11 @@ unlock: | |||
| 1682 | } | 1911 | } |
| 1683 | 1912 | ||
| 1684 | /** | 1913 | /** |
| 1685 | * Maps the contents of an object, returning the address it is mapped | 1914 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
| 1686 | * into. | 1915 | * it is mapped to. |
| 1916 | * @dev: drm device | ||
| 1917 | * @data: ioctl data blob | ||
| 1918 | * @file: drm file | ||
| 1687 | * | 1919 | * |
| 1688 | * While the mapping holds a reference on the contents of the object, it doesn't | 1920 | * While the mapping holds a reference on the contents of the object, it doesn't |
| 1689 | * imply a ref on the object itself. | 1921 | * imply a ref on the object itself. |
| @@ -2001,7 +2233,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | |||
| 2001 | 2233 | ||
| 2002 | /** | 2234 | /** |
| 2003 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 2235 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
| 2004 | * @obj: object to check | 2236 | * @dev: drm device |
| 2237 | * @size: object size | ||
| 2238 | * @tiling_mode: tiling mode | ||
| 2239 | * @fenced: is fenced alignemned required or not | ||
| 2005 | * | 2240 | * |
| 2006 | * Return the required GTT alignment for an object, taking into account | 2241 | * Return the required GTT alignment for an object, taking into account |
| 2007 | * potential fence register mapping. | 2242 | * potential fence register mapping. |
| @@ -2951,6 +3186,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
| 2951 | 3186 | ||
| 2952 | /** | 3187 | /** |
| 2953 | * This function clears the request list as sequence numbers are passed. | 3188 | * This function clears the request list as sequence numbers are passed. |
| 3189 | * @engine: engine to retire requests on | ||
| 2954 | */ | 3190 | */ |
| 2955 | void | 3191 | void |
| 2956 | i915_gem_retire_requests_ring(struct intel_engine_cs *engine) | 3192 | i915_gem_retire_requests_ring(struct intel_engine_cs *engine) |
| @@ -3074,6 +3310,7 @@ i915_gem_idle_work_handler(struct work_struct *work) | |||
| 3074 | * Ensures that an object will eventually get non-busy by flushing any required | 3310 | * Ensures that an object will eventually get non-busy by flushing any required |
| 3075 | * write domains, emitting any outstanding lazy request and retiring and | 3311 | * write domains, emitting any outstanding lazy request and retiring and |
| 3076 | * completed requests. | 3312 | * completed requests. |
| 3313 | * @obj: object to flush | ||
| 3077 | */ | 3314 | */ |
| 3078 | static int | 3315 | static int |
| 3079 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | 3316 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) |
| @@ -3099,7 +3336,9 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | |||
| 3099 | 3336 | ||
| 3100 | /** | 3337 | /** |
| 3101 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT | 3338 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT |
| 3102 | * @DRM_IOCTL_ARGS: standard ioctl arguments | 3339 | * @dev: drm device pointer |
| 3340 | * @data: ioctl data blob | ||
| 3341 | * @file: drm file pointer | ||
| 3103 | * | 3342 | * |
| 3104 | * Returns 0 if successful, else an error is returned with the remaining time in | 3343 | * Returns 0 if successful, else an error is returned with the remaining time in |
| 3105 | * the timeout parameter. | 3344 | * the timeout parameter. |
| @@ -3489,6 +3728,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, | |||
| 3489 | /** | 3728 | /** |
| 3490 | * Finds free space in the GTT aperture and binds the object or a view of it | 3729 | * Finds free space in the GTT aperture and binds the object or a view of it |
| 3491 | * there. | 3730 | * there. |
| 3731 | * @obj: object to bind | ||
| 3732 | * @vm: address space to bind into | ||
| 3733 | * @ggtt_view: global gtt view if applicable | ||
| 3734 | * @alignment: requested alignment | ||
| 3735 | * @flags: mask of PIN_* flags to use | ||
| 3492 | */ | 3736 | */ |
| 3493 | static struct i915_vma * | 3737 | static struct i915_vma * |
| 3494 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | 3738 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
| @@ -3746,6 +3990,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) | |||
| 3746 | 3990 | ||
| 3747 | /** | 3991 | /** |
| 3748 | * Moves a single object to the GTT read, and possibly write domain. | 3992 | * Moves a single object to the GTT read, and possibly write domain. |
| 3993 | * @obj: object to act on | ||
| 3994 | * @write: ask for write access or read only | ||
| 3749 | * | 3995 | * |
| 3750 | * This function returns when the move is complete, including waiting on | 3996 | * This function returns when the move is complete, including waiting on |
| 3751 | * flushes to occur. | 3997 | * flushes to occur. |
| @@ -3817,6 +4063,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
| 3817 | 4063 | ||
| 3818 | /** | 4064 | /** |
| 3819 | * Changes the cache-level of an object across all VMA. | 4065 | * Changes the cache-level of an object across all VMA. |
| 4066 | * @obj: object to act on | ||
| 4067 | * @cache_level: new cache level to set for the object | ||
| 3820 | * | 4068 | * |
| 3821 | * After this function returns, the object will be in the new cache-level | 4069 | * After this function returns, the object will be in the new cache-level |
| 3822 | * across all GTT and the contents of the backing storage will be coherent, | 4070 | * across all GTT and the contents of the backing storage will be coherent, |
| @@ -3926,9 +4174,7 @@ out: | |||
| 3926 | * object is now coherent at its new cache level (with respect | 4174 | * object is now coherent at its new cache level (with respect |
| 3927 | * to the access domain). | 4175 | * to the access domain). |
| 3928 | */ | 4176 | */ |
| 3929 | if (obj->cache_dirty && | 4177 | if (obj->cache_dirty && cpu_write_needs_clflush(obj)) { |
| 3930 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && | ||
| 3931 | cpu_write_needs_clflush(obj)) { | ||
| 3932 | if (i915_gem_clflush_object(obj, true)) | 4178 | if (i915_gem_clflush_object(obj, true)) |
| 3933 | i915_gem_chipset_flush(to_i915(obj->base.dev)); | 4179 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
| 3934 | } | 4180 | } |
| @@ -4098,6 +4344,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, | |||
| 4098 | 4344 | ||
| 4099 | /** | 4345 | /** |
| 4100 | * Moves a single object to the CPU read, and possibly write domain. | 4346 | * Moves a single object to the CPU read, and possibly write domain. |
| 4347 | * @obj: object to act on | ||
| 4348 | * @write: requesting write or read-only access | ||
| 4101 | * | 4349 | * |
| 4102 | * This function returns when the move is complete, including waiting on | 4350 | * This function returns when the move is complete, including waiting on |
| 4103 | * flushes to occur. | 4351 | * flushes to occur. |
| @@ -4886,11 +5134,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4886 | intel_mocs_init_l3cc_table(dev); | 5134 | intel_mocs_init_l3cc_table(dev); |
| 4887 | 5135 | ||
| 4888 | /* We can't enable contexts until all firmware is loaded */ | 5136 | /* We can't enable contexts until all firmware is loaded */ |
| 4889 | if (HAS_GUC(dev)) { | 5137 | ret = intel_guc_setup(dev); |
| 4890 | ret = intel_guc_setup(dev); | 5138 | if (ret) |
| 4891 | if (ret) | 5139 | goto out; |
| 4892 | goto out; | ||
| 4893 | } | ||
| 4894 | 5140 | ||
| 4895 | /* | 5141 | /* |
| 4896 | * Increment the next seqno by 0x100 so we have a visible break | 5142 | * Increment the next seqno by 0x100 so we have a visible break |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a3b11aac23a4..30d9b4fd30f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -295,6 +295,10 @@ __create_hw_context(struct drm_device *dev, | |||
| 295 | ctx->remap_slice = ALL_L3_SLICES(dev_priv); | 295 | ctx->remap_slice = ALL_L3_SLICES(dev_priv); |
| 296 | 296 | ||
| 297 | ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; | 297 | ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; |
| 298 | ctx->ring_size = 4 * PAGE_SIZE; | ||
| 299 | ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << | ||
| 300 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | ||
| 301 | ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); | ||
| 298 | 302 | ||
| 299 | return ctx; | 303 | return ctx; |
| 300 | 304 | ||
| @@ -339,6 +343,40 @@ i915_gem_create_context(struct drm_device *dev, | |||
| 339 | return ctx; | 343 | return ctx; |
| 340 | } | 344 | } |
| 341 | 345 | ||
| 346 | /** | ||
| 347 | * i915_gem_context_create_gvt - create a GVT GEM context | ||
| 348 | * @dev: drm device * | ||
| 349 | * | ||
| 350 | * This function is used to create a GVT specific GEM context. | ||
| 351 | * | ||
| 352 | * Returns: | ||
| 353 | * pointer to i915_gem_context on success, error pointer if failed | ||
| 354 | * | ||
| 355 | */ | ||
| 356 | struct i915_gem_context * | ||
| 357 | i915_gem_context_create_gvt(struct drm_device *dev) | ||
| 358 | { | ||
| 359 | struct i915_gem_context *ctx; | ||
| 360 | int ret; | ||
| 361 | |||
| 362 | if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) | ||
| 363 | return ERR_PTR(-ENODEV); | ||
| 364 | |||
| 365 | ret = i915_mutex_lock_interruptible(dev); | ||
| 366 | if (ret) | ||
| 367 | return ERR_PTR(ret); | ||
| 368 | |||
| 369 | ctx = i915_gem_create_context(dev, NULL); | ||
| 370 | if (IS_ERR(ctx)) | ||
| 371 | goto out; | ||
| 372 | |||
| 373 | ctx->execlists_force_single_submission = true; | ||
| 374 | ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ | ||
| 375 | out: | ||
| 376 | mutex_unlock(&dev->struct_mutex); | ||
| 377 | return ctx; | ||
| 378 | } | ||
| 379 | |||
| 342 | static void i915_gem_context_unpin(struct i915_gem_context *ctx, | 380 | static void i915_gem_context_unpin(struct i915_gem_context *ctx, |
| 343 | struct intel_engine_cs *engine) | 381 | struct intel_engine_cs *engine) |
| 344 | { | 382 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h new file mode 100644 index 000000000000..91315557e421 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Intel Corporation | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 21 | * DEALINGS IN THE SOFTWARE. | ||
| 22 | * | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef _I915_GEM_DMABUF_H_ | ||
| 26 | #define _I915_GEM_DMABUF_H_ | ||
| 27 | |||
| 28 | #include <linux/dma-buf.h> | ||
| 29 | |||
| 30 | static inline struct reservation_object * | ||
| 31 | i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj) | ||
| 32 | { | ||
| 33 | struct dma_buf *dma_buf; | ||
| 34 | |||
| 35 | if (obj->base.dma_buf) | ||
| 36 | dma_buf = obj->base.dma_buf; | ||
| 37 | else if (obj->base.import_attach) | ||
| 38 | dma_buf = obj->base.import_attach->dmabuf; | ||
| 39 | else | ||
| 40 | return NULL; | ||
| 41 | |||
| 42 | return dma_buf->resv; | ||
| 43 | } | ||
| 44 | |||
| 45 | #endif | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 46684779d4d6..5890017b9832 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -2355,6 +2355,28 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) | |||
| 2355 | #endif | 2355 | #endif |
| 2356 | } | 2356 | } |
| 2357 | 2357 | ||
| 2358 | static void gen8_ggtt_insert_page(struct i915_address_space *vm, | ||
| 2359 | dma_addr_t addr, | ||
| 2360 | uint64_t offset, | ||
| 2361 | enum i915_cache_level level, | ||
| 2362 | u32 unused) | ||
| 2363 | { | ||
| 2364 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | ||
| 2365 | gen8_pte_t __iomem *pte = | ||
| 2366 | (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + | ||
| 2367 | (offset >> PAGE_SHIFT); | ||
| 2368 | int rpm_atomic_seq; | ||
| 2369 | |||
| 2370 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
| 2371 | |||
| 2372 | gen8_set_pte(pte, gen8_pte_encode(addr, level, true)); | ||
| 2373 | |||
| 2374 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | ||
| 2375 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | ||
| 2376 | |||
| 2377 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
| 2378 | } | ||
| 2379 | |||
| 2358 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | 2380 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
| 2359 | struct sg_table *st, | 2381 | struct sg_table *st, |
| 2360 | uint64_t start, | 2382 | uint64_t start, |
| @@ -2424,6 +2446,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, | |||
| 2424 | stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); | 2446 | stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); |
| 2425 | } | 2447 | } |
| 2426 | 2448 | ||
| 2449 | static void gen6_ggtt_insert_page(struct i915_address_space *vm, | ||
| 2450 | dma_addr_t addr, | ||
| 2451 | uint64_t offset, | ||
| 2452 | enum i915_cache_level level, | ||
| 2453 | u32 flags) | ||
| 2454 | { | ||
| 2455 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | ||
| 2456 | gen6_pte_t __iomem *pte = | ||
| 2457 | (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + | ||
| 2458 | (offset >> PAGE_SHIFT); | ||
| 2459 | int rpm_atomic_seq; | ||
| 2460 | |||
| 2461 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
| 2462 | |||
| 2463 | iowrite32(vm->pte_encode(addr, level, true, flags), pte); | ||
| 2464 | |||
| 2465 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | ||
| 2466 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | ||
| 2467 | |||
| 2468 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
| 2469 | } | ||
| 2470 | |||
| 2427 | /* | 2471 | /* |
| 2428 | * Binds an object into the global gtt with the specified cache level. The object | 2472 | * Binds an object into the global gtt with the specified cache level. The object |
| 2429 | * will be accessible to the GPU via commands whose operands reference offsets | 2473 | * will be accessible to the GPU via commands whose operands reference offsets |
| @@ -2543,6 +2587,24 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
| 2543 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | 2587 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); |
| 2544 | } | 2588 | } |
| 2545 | 2589 | ||
| 2590 | static void i915_ggtt_insert_page(struct i915_address_space *vm, | ||
| 2591 | dma_addr_t addr, | ||
| 2592 | uint64_t offset, | ||
| 2593 | enum i915_cache_level cache_level, | ||
| 2594 | u32 unused) | ||
| 2595 | { | ||
| 2596 | struct drm_i915_private *dev_priv = to_i915(vm->dev); | ||
| 2597 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | ||
| 2598 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | ||
| 2599 | int rpm_atomic_seq; | ||
| 2600 | |||
| 2601 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
| 2602 | |||
| 2603 | intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); | ||
| 2604 | |||
| 2605 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
| 2606 | } | ||
| 2607 | |||
| 2546 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, | 2608 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
| 2547 | struct sg_table *pages, | 2609 | struct sg_table *pages, |
| 2548 | uint64_t start, | 2610 | uint64_t start, |
| @@ -2732,11 +2794,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, | |||
| 2732 | i915_address_space_init(&ggtt->base, dev_priv); | 2794 | i915_address_space_init(&ggtt->base, dev_priv); |
| 2733 | ggtt->base.total += PAGE_SIZE; | 2795 | ggtt->base.total += PAGE_SIZE; |
| 2734 | 2796 | ||
| 2735 | if (intel_vgpu_active(dev_priv)) { | 2797 | ret = intel_vgt_balloon(dev_priv); |
| 2736 | ret = intel_vgt_balloon(dev); | 2798 | if (ret) |
| 2737 | if (ret) | 2799 | return ret; |
| 2738 | return ret; | ||
| 2739 | } | ||
| 2740 | 2800 | ||
| 2741 | if (!HAS_LLC(dev)) | 2801 | if (!HAS_LLC(dev)) |
| 2742 | ggtt->base.mm.color_adjust = i915_gtt_color_adjust; | 2802 | ggtt->base.mm.color_adjust = i915_gtt_color_adjust; |
| @@ -2836,8 +2896,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) | |||
| 2836 | i915_gem_cleanup_stolen(dev); | 2896 | i915_gem_cleanup_stolen(dev); |
| 2837 | 2897 | ||
| 2838 | if (drm_mm_initialized(&ggtt->base.mm)) { | 2898 | if (drm_mm_initialized(&ggtt->base.mm)) { |
| 2839 | if (intel_vgpu_active(dev_priv)) | 2899 | intel_vgt_deballoon(dev_priv); |
| 2840 | intel_vgt_deballoon(); | ||
| 2841 | 2900 | ||
| 2842 | drm_mm_takedown(&ggtt->base.mm); | 2901 | drm_mm_takedown(&ggtt->base.mm); |
| 2843 | list_del(&ggtt->base.global_link); | 2902 | list_del(&ggtt->base.global_link); |
| @@ -3076,7 +3135,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
| 3076 | 3135 | ||
| 3077 | ggtt->base.bind_vma = ggtt_bind_vma; | 3136 | ggtt->base.bind_vma = ggtt_bind_vma; |
| 3078 | ggtt->base.unbind_vma = ggtt_unbind_vma; | 3137 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
| 3079 | 3138 | ggtt->base.insert_page = gen8_ggtt_insert_page; | |
| 3080 | ggtt->base.clear_range = nop_clear_range; | 3139 | ggtt->base.clear_range = nop_clear_range; |
| 3081 | if (!USES_FULL_PPGTT(dev_priv)) | 3140 | if (!USES_FULL_PPGTT(dev_priv)) |
| 3082 | ggtt->base.clear_range = gen8_ggtt_clear_range; | 3141 | ggtt->base.clear_range = gen8_ggtt_clear_range; |
| @@ -3116,6 +3175,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) | |||
| 3116 | ret = ggtt_probe_common(dev, ggtt->size); | 3175 | ret = ggtt_probe_common(dev, ggtt->size); |
| 3117 | 3176 | ||
| 3118 | ggtt->base.clear_range = gen6_ggtt_clear_range; | 3177 | ggtt->base.clear_range = gen6_ggtt_clear_range; |
| 3178 | ggtt->base.insert_page = gen6_ggtt_insert_page; | ||
| 3119 | ggtt->base.insert_entries = gen6_ggtt_insert_entries; | 3179 | ggtt->base.insert_entries = gen6_ggtt_insert_entries; |
| 3120 | ggtt->base.bind_vma = ggtt_bind_vma; | 3180 | ggtt->base.bind_vma = ggtt_bind_vma; |
| 3121 | ggtt->base.unbind_vma = ggtt_unbind_vma; | 3181 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
| @@ -3147,6 +3207,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) | |||
| 3147 | &ggtt->mappable_base, &ggtt->mappable_end); | 3207 | &ggtt->mappable_base, &ggtt->mappable_end); |
| 3148 | 3208 | ||
| 3149 | ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); | 3209 | ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); |
| 3210 | ggtt->base.insert_page = i915_ggtt_insert_page; | ||
| 3150 | ggtt->base.insert_entries = i915_ggtt_insert_entries; | 3211 | ggtt->base.insert_entries = i915_ggtt_insert_entries; |
| 3151 | ggtt->base.clear_range = i915_ggtt_clear_range; | 3212 | ggtt->base.clear_range = i915_ggtt_clear_range; |
| 3152 | ggtt->base.bind_vma = ggtt_bind_vma; | 3213 | ggtt->base.bind_vma = ggtt_bind_vma; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 62be77cac5cd..163b564fb87d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
| @@ -319,6 +319,11 @@ struct i915_address_space { | |||
| 319 | uint64_t start, | 319 | uint64_t start, |
| 320 | uint64_t length, | 320 | uint64_t length, |
| 321 | bool use_scratch); | 321 | bool use_scratch); |
| 322 | void (*insert_page)(struct i915_address_space *vm, | ||
| 323 | dma_addr_t addr, | ||
| 324 | uint64_t offset, | ||
| 325 | enum i915_cache_level cache_level, | ||
| 326 | u32 flags); | ||
| 322 | void (*insert_entries)(struct i915_address_space *vm, | 327 | void (*insert_entries)(struct i915_address_space *vm, |
| 323 | struct sg_table *st, | 328 | struct sg_table *st, |
| 324 | uint64_t start, | 329 | uint64_t start, |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 7c93327b70fe..b7c1b5fb61ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
| @@ -94,6 +94,7 @@ free_gem: | |||
| 94 | 94 | ||
| 95 | static int render_state_setup(struct render_state *so) | 95 | static int render_state_setup(struct render_state *so) |
| 96 | { | 96 | { |
| 97 | struct drm_device *dev = so->obj->base.dev; | ||
| 97 | const struct intel_renderstate_rodata *rodata = so->rodata; | 98 | const struct intel_renderstate_rodata *rodata = so->rodata; |
| 98 | unsigned int i = 0, reloc_index = 0; | 99 | unsigned int i = 0, reloc_index = 0; |
| 99 | struct page *page; | 100 | struct page *page; |
| @@ -135,6 +136,33 @@ static int render_state_setup(struct render_state *so) | |||
| 135 | 136 | ||
| 136 | so->aux_batch_offset = i * sizeof(u32); | 137 | so->aux_batch_offset = i * sizeof(u32); |
| 137 | 138 | ||
| 139 | if (HAS_POOLED_EU(dev)) { | ||
| 140 | /* | ||
| 141 | * We always program 3x6 pool config but depending upon which | ||
| 142 | * subslice is disabled HW drops down to appropriate config | ||
| 143 | * shown below. | ||
| 144 | * | ||
| 145 | * In the below table 2x6 config always refers to | ||
| 146 | * fused-down version, native 2x6 is not available and can | ||
| 147 | * be ignored | ||
| 148 | * | ||
| 149 | * SNo subslices config eu pool configuration | ||
| 150 | * ----------------------------------------------------------- | ||
| 151 | * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) | ||
| 152 | * 2 ss0 disabled (2x6) - 0x00777000 (3+9) | ||
| 153 | * 3 ss1 disabled (2x6) - 0x00770000 (6+6) | ||
| 154 | * 4 ss2 disabled (2x6) - 0x00007000 (9+3) | ||
| 155 | */ | ||
| 156 | u32 eu_pool_config = 0x00777000; | ||
| 157 | |||
| 158 | OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); | ||
| 159 | OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); | ||
| 160 | OUT_BATCH(d, i, eu_pool_config); | ||
| 161 | OUT_BATCH(d, i, 0); | ||
| 162 | OUT_BATCH(d, i, 0); | ||
| 163 | OUT_BATCH(d, i, 0); | ||
| 164 | } | ||
| 165 | |||
| 138 | OUT_BATCH(d, i, MI_BATCH_BUFFER_END); | 166 | OUT_BATCH(d, i, MI_BATCH_BUFFER_END); |
| 139 | so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; | 167 | so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; |
| 140 | 168 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index f9253f2b7ba0..e9cd82290408 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
| @@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, | |||
| 55 | return -ENODEV; | 55 | return -ENODEV; |
| 56 | 56 | ||
| 57 | /* See the comment at the drm_mm_init() call for more about this check. | 57 | /* See the comment at the drm_mm_init() call for more about this check. |
| 58 | * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ | 58 | * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) |
| 59 | if (IS_GEN8(dev_priv) && start < 4096) | 59 | */ |
| 60 | if (start < 4096 && (IS_GEN8(dev_priv) || | ||
| 61 | IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) | ||
| 60 | start = 4096; | 62 | start = 4096; |
| 61 | 63 | ||
| 62 | mutex_lock(&dev_priv->mm.stolen_lock); | 64 | mutex_lock(&dev_priv->mm.stolen_lock); |
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index ac72451c571c..22a55ac4e51c 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
| @@ -174,94 +174,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, | |||
| 174 | * client object which contains the page being used for the doorbell | 174 | * client object which contains the page being used for the doorbell |
| 175 | */ | 175 | */ |
| 176 | 176 | ||
| 177 | static void guc_init_doorbell(struct intel_guc *guc, | 177 | static int guc_update_doorbell_id(struct intel_guc *guc, |
| 178 | struct i915_guc_client *client) | 178 | struct i915_guc_client *client, |
| 179 | u16 new_id) | ||
| 179 | { | 180 | { |
| 181 | struct sg_table *sg = guc->ctx_pool_obj->pages; | ||
| 182 | void *doorbell_bitmap = guc->doorbell_bitmap; | ||
| 180 | struct guc_doorbell_info *doorbell; | 183 | struct guc_doorbell_info *doorbell; |
| 184 | struct guc_context_desc desc; | ||
| 185 | size_t len; | ||
| 181 | 186 | ||
| 182 | doorbell = client->client_base + client->doorbell_offset; | 187 | doorbell = client->client_base + client->doorbell_offset; |
| 183 | 188 | ||
| 184 | doorbell->db_status = GUC_DOORBELL_ENABLED; | 189 | if (client->doorbell_id != GUC_INVALID_DOORBELL_ID && |
| 185 | doorbell->cookie = 0; | 190 | test_bit(client->doorbell_id, doorbell_bitmap)) { |
| 186 | } | 191 | /* Deactivate the old doorbell */ |
| 187 | 192 | doorbell->db_status = GUC_DOORBELL_DISABLED; | |
| 188 | static int guc_ring_doorbell(struct i915_guc_client *gc) | 193 | (void)host2guc_release_doorbell(guc, client); |
| 189 | { | 194 | __clear_bit(client->doorbell_id, doorbell_bitmap); |
| 190 | struct guc_process_desc *desc; | 195 | } |
| 191 | union guc_doorbell_qw db_cmp, db_exc, db_ret; | ||
| 192 | union guc_doorbell_qw *db; | ||
| 193 | int attempt = 2, ret = -EAGAIN; | ||
| 194 | |||
| 195 | desc = gc->client_base + gc->proc_desc_offset; | ||
| 196 | |||
| 197 | /* Update the tail so it is visible to GuC */ | ||
| 198 | desc->tail = gc->wq_tail; | ||
| 199 | |||
| 200 | /* current cookie */ | ||
| 201 | db_cmp.db_status = GUC_DOORBELL_ENABLED; | ||
| 202 | db_cmp.cookie = gc->cookie; | ||
| 203 | |||
| 204 | /* cookie to be updated */ | ||
| 205 | db_exc.db_status = GUC_DOORBELL_ENABLED; | ||
| 206 | db_exc.cookie = gc->cookie + 1; | ||
| 207 | if (db_exc.cookie == 0) | ||
| 208 | db_exc.cookie = 1; | ||
| 209 | |||
| 210 | /* pointer of current doorbell cacheline */ | ||
| 211 | db = gc->client_base + gc->doorbell_offset; | ||
| 212 | |||
| 213 | while (attempt--) { | ||
| 214 | /* lets ring the doorbell */ | ||
| 215 | db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, | ||
| 216 | db_cmp.value_qw, db_exc.value_qw); | ||
| 217 | |||
| 218 | /* if the exchange was successfully executed */ | ||
| 219 | if (db_ret.value_qw == db_cmp.value_qw) { | ||
| 220 | /* db was successfully rung */ | ||
| 221 | gc->cookie = db_exc.cookie; | ||
| 222 | ret = 0; | ||
| 223 | break; | ||
| 224 | } | ||
| 225 | 196 | ||
| 226 | /* XXX: doorbell was lost and need to acquire it again */ | 197 | /* Update the GuC's idea of the doorbell ID */ |
| 227 | if (db_ret.db_status == GUC_DOORBELL_DISABLED) | 198 | len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), |
| 228 | break; | 199 | sizeof(desc) * client->ctx_index); |
| 200 | if (len != sizeof(desc)) | ||
| 201 | return -EFAULT; | ||
| 202 | desc.db_id = new_id; | ||
| 203 | len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), | ||
| 204 | sizeof(desc) * client->ctx_index); | ||
| 205 | if (len != sizeof(desc)) | ||
| 206 | return -EFAULT; | ||
| 229 | 207 | ||
| 230 | DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", | 208 | client->doorbell_id = new_id; |
| 231 | db_cmp.cookie, db_ret.cookie); | 209 | if (new_id == GUC_INVALID_DOORBELL_ID) |
| 210 | return 0; | ||
| 232 | 211 | ||
| 233 | /* update the cookie to newly read cookie from GuC */ | 212 | /* Activate the new doorbell */ |
| 234 | db_cmp.cookie = db_ret.cookie; | 213 | __set_bit(new_id, doorbell_bitmap); |
| 235 | db_exc.cookie = db_ret.cookie + 1; | 214 | doorbell->cookie = 0; |
| 236 | if (db_exc.cookie == 0) | 215 | doorbell->db_status = GUC_DOORBELL_ENABLED; |
| 237 | db_exc.cookie = 1; | 216 | return host2guc_allocate_doorbell(guc, client); |
| 238 | } | 217 | } |
| 239 | 218 | ||
| 240 | return ret; | 219 | static int guc_init_doorbell(struct intel_guc *guc, |
| 220 | struct i915_guc_client *client, | ||
| 221 | uint16_t db_id) | ||
| 222 | { | ||
| 223 | return guc_update_doorbell_id(guc, client, db_id); | ||
| 241 | } | 224 | } |
| 242 | 225 | ||
| 243 | static void guc_disable_doorbell(struct intel_guc *guc, | 226 | static void guc_disable_doorbell(struct intel_guc *guc, |
| 244 | struct i915_guc_client *client) | 227 | struct i915_guc_client *client) |
| 245 | { | 228 | { |
| 246 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 229 | (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID); |
| 247 | struct guc_doorbell_info *doorbell; | ||
| 248 | i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id); | ||
| 249 | int value; | ||
| 250 | 230 | ||
| 251 | doorbell = client->client_base + client->doorbell_offset; | 231 | /* XXX: wait for any interrupts */ |
| 252 | 232 | /* XXX: wait for workqueue to drain */ | |
| 253 | doorbell->db_status = GUC_DOORBELL_DISABLED; | 233 | } |
| 254 | 234 | ||
| 255 | I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); | 235 | static uint16_t |
| 236 | select_doorbell_register(struct intel_guc *guc, uint32_t priority) | ||
| 237 | { | ||
| 238 | /* | ||
| 239 | * The bitmap tracks which doorbell registers are currently in use. | ||
| 240 | * It is split into two halves; the first half is used for normal | ||
| 241 | * priority contexts, the second half for high-priority ones. | ||
| 242 | * Note that logically higher priorities are numerically less than | ||
| 243 | * normal ones, so the test below means "is it high-priority?" | ||
| 244 | */ | ||
| 245 | const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); | ||
| 246 | const uint16_t half = GUC_MAX_DOORBELLS / 2; | ||
| 247 | const uint16_t start = hi_pri ? half : 0; | ||
| 248 | const uint16_t end = start + half; | ||
| 249 | uint16_t id; | ||
| 256 | 250 | ||
| 257 | value = I915_READ(drbreg); | 251 | id = find_next_zero_bit(guc->doorbell_bitmap, end, start); |
| 258 | WARN_ON((value & GEN8_DRB_VALID) != 0); | 252 | if (id == end) |
| 253 | id = GUC_INVALID_DOORBELL_ID; | ||
| 259 | 254 | ||
| 260 | I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0); | 255 | DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", |
| 261 | I915_WRITE(drbreg, 0); | 256 | hi_pri ? "high" : "normal", id); |
| 262 | 257 | ||
| 263 | /* XXX: wait for any interrupts */ | 258 | return id; |
| 264 | /* XXX: wait for workqueue to drain */ | ||
| 265 | } | 259 | } |
| 266 | 260 | ||
| 267 | /* | 261 | /* |
| @@ -288,37 +282,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc) | |||
| 288 | return offset; | 282 | return offset; |
| 289 | } | 283 | } |
| 290 | 284 | ||
| 291 | static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority) | ||
| 292 | { | ||
| 293 | /* | ||
| 294 | * The bitmap is split into two halves; the first half is used for | ||
| 295 | * normal priority contexts, the second half for high-priority ones. | ||
| 296 | * Note that logically higher priorities are numerically less than | ||
| 297 | * normal ones, so the test below means "is it high-priority?" | ||
| 298 | */ | ||
| 299 | const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); | ||
| 300 | const uint16_t half = GUC_MAX_DOORBELLS / 2; | ||
| 301 | const uint16_t start = hi_pri ? half : 0; | ||
| 302 | const uint16_t end = start + half; | ||
| 303 | uint16_t id; | ||
| 304 | |||
| 305 | id = find_next_zero_bit(guc->doorbell_bitmap, end, start); | ||
| 306 | if (id == end) | ||
| 307 | id = GUC_INVALID_DOORBELL_ID; | ||
| 308 | else | ||
| 309 | bitmap_set(guc->doorbell_bitmap, id, 1); | ||
| 310 | |||
| 311 | DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", | ||
| 312 | hi_pri ? "high" : "normal", id); | ||
| 313 | |||
| 314 | return id; | ||
| 315 | } | ||
| 316 | |||
| 317 | static void release_doorbell(struct intel_guc *guc, uint16_t id) | ||
| 318 | { | ||
| 319 | bitmap_clear(guc->doorbell_bitmap, id, 1); | ||
| 320 | } | ||
| 321 | |||
| 322 | /* | 285 | /* |
| 323 | * Initialise the process descriptor shared with the GuC firmware. | 286 | * Initialise the process descriptor shared with the GuC firmware. |
| 324 | */ | 287 | */ |
| @@ -543,6 +506,61 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc, | |||
| 543 | kunmap_atomic(base); | 506 | kunmap_atomic(base); |
| 544 | } | 507 | } |
| 545 | 508 | ||
| 509 | static int guc_ring_doorbell(struct i915_guc_client *gc) | ||
| 510 | { | ||
| 511 | struct guc_process_desc *desc; | ||
| 512 | union guc_doorbell_qw db_cmp, db_exc, db_ret; | ||
| 513 | union guc_doorbell_qw *db; | ||
| 514 | int attempt = 2, ret = -EAGAIN; | ||
| 515 | |||
| 516 | desc = gc->client_base + gc->proc_desc_offset; | ||
| 517 | |||
| 518 | /* Update the tail so it is visible to GuC */ | ||
| 519 | desc->tail = gc->wq_tail; | ||
| 520 | |||
| 521 | /* current cookie */ | ||
| 522 | db_cmp.db_status = GUC_DOORBELL_ENABLED; | ||
| 523 | db_cmp.cookie = gc->cookie; | ||
| 524 | |||
| 525 | /* cookie to be updated */ | ||
| 526 | db_exc.db_status = GUC_DOORBELL_ENABLED; | ||
| 527 | db_exc.cookie = gc->cookie + 1; | ||
| 528 | if (db_exc.cookie == 0) | ||
| 529 | db_exc.cookie = 1; | ||
| 530 | |||
| 531 | /* pointer of current doorbell cacheline */ | ||
| 532 | db = gc->client_base + gc->doorbell_offset; | ||
| 533 | |||
| 534 | while (attempt--) { | ||
| 535 | /* lets ring the doorbell */ | ||
| 536 | db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, | ||
| 537 | db_cmp.value_qw, db_exc.value_qw); | ||
| 538 | |||
| 539 | /* if the exchange was successfully executed */ | ||
| 540 | if (db_ret.value_qw == db_cmp.value_qw) { | ||
| 541 | /* db was successfully rung */ | ||
| 542 | gc->cookie = db_exc.cookie; | ||
| 543 | ret = 0; | ||
| 544 | break; | ||
| 545 | } | ||
| 546 | |||
| 547 | /* XXX: doorbell was lost and need to acquire it again */ | ||
| 548 | if (db_ret.db_status == GUC_DOORBELL_DISABLED) | ||
| 549 | break; | ||
| 550 | |||
| 551 | DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", | ||
| 552 | db_cmp.cookie, db_ret.cookie); | ||
| 553 | |||
| 554 | /* update the cookie to newly read cookie from GuC */ | ||
| 555 | db_cmp.cookie = db_ret.cookie; | ||
| 556 | db_exc.cookie = db_ret.cookie + 1; | ||
| 557 | if (db_exc.cookie == 0) | ||
| 558 | db_exc.cookie = 1; | ||
| 559 | } | ||
| 560 | |||
| 561 | return ret; | ||
| 562 | } | ||
| 563 | |||
| 546 | /** | 564 | /** |
| 547 | * i915_guc_submit() - Submit commands through GuC | 565 | * i915_guc_submit() - Submit commands through GuC |
| 548 | * @rq: request associated with the commands | 566 | * @rq: request associated with the commands |
| @@ -591,7 +609,7 @@ int i915_guc_submit(struct drm_i915_gem_request *rq) | |||
| 591 | 609 | ||
| 592 | /** | 610 | /** |
| 593 | * gem_allocate_guc_obj() - Allocate gem object for GuC usage | 611 | * gem_allocate_guc_obj() - Allocate gem object for GuC usage |
| 594 | * @dev: drm device | 612 | * @dev_priv: driver private data structure |
| 595 | * @size: size of object | 613 | * @size: size of object |
| 596 | * | 614 | * |
| 597 | * This is a wrapper to create a gem obj. In order to use it inside GuC, the | 615 | * This is a wrapper to create a gem obj. In order to use it inside GuC, the |
| @@ -600,13 +618,12 @@ int i915_guc_submit(struct drm_i915_gem_request *rq) | |||
| 600 | * | 618 | * |
| 601 | * Return: A drm_i915_gem_object if successful, otherwise NULL. | 619 | * Return: A drm_i915_gem_object if successful, otherwise NULL. |
| 602 | */ | 620 | */ |
| 603 | static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, | 621 | static struct drm_i915_gem_object * |
| 604 | u32 size) | 622 | gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size) |
| 605 | { | 623 | { |
| 606 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 607 | struct drm_i915_gem_object *obj; | 624 | struct drm_i915_gem_object *obj; |
| 608 | 625 | ||
| 609 | obj = i915_gem_object_create(dev, size); | 626 | obj = i915_gem_object_create(dev_priv->dev, size); |
| 610 | if (IS_ERR(obj)) | 627 | if (IS_ERR(obj)) |
| 611 | return NULL; | 628 | return NULL; |
| 612 | 629 | ||
| @@ -642,10 +659,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj) | |||
| 642 | drm_gem_object_unreference(&obj->base); | 659 | drm_gem_object_unreference(&obj->base); |
| 643 | } | 660 | } |
| 644 | 661 | ||
| 645 | static void guc_client_free(struct drm_device *dev, | 662 | static void |
| 646 | struct i915_guc_client *client) | 663 | guc_client_free(struct drm_i915_private *dev_priv, |
| 664 | struct i915_guc_client *client) | ||
| 647 | { | 665 | { |
| 648 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 649 | struct intel_guc *guc = &dev_priv->guc; | 666 | struct intel_guc *guc = &dev_priv->guc; |
| 650 | 667 | ||
| 651 | if (!client) | 668 | if (!client) |
| @@ -658,17 +675,10 @@ static void guc_client_free(struct drm_device *dev, | |||
| 658 | 675 | ||
| 659 | if (client->client_base) { | 676 | if (client->client_base) { |
| 660 | /* | 677 | /* |
| 661 | * If we got as far as setting up a doorbell, make sure | 678 | * If we got as far as setting up a doorbell, make sure we |
| 662 | * we shut it down before unmapping & deallocating the | 679 | * shut it down before unmapping & deallocating the memory. |
| 663 | * memory. So first disable the doorbell, then tell the | ||
| 664 | * GuC that we've finished with it, finally deallocate | ||
| 665 | * it in our bitmap | ||
| 666 | */ | 680 | */ |
| 667 | if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { | 681 | guc_disable_doorbell(guc, client); |
| 668 | guc_disable_doorbell(guc, client); | ||
| 669 | host2guc_release_doorbell(guc, client); | ||
| 670 | release_doorbell(guc, client->doorbell_id); | ||
| 671 | } | ||
| 672 | 682 | ||
| 673 | kunmap(kmap_to_page(client->client_base)); | 683 | kunmap(kmap_to_page(client->client_base)); |
| 674 | } | 684 | } |
| @@ -683,9 +693,51 @@ static void guc_client_free(struct drm_device *dev, | |||
| 683 | kfree(client); | 693 | kfree(client); |
| 684 | } | 694 | } |
| 685 | 695 | ||
| 696 | /* | ||
| 697 | * Borrow the first client to set up & tear down every doorbell | ||
| 698 | * in turn, to ensure that all doorbell h/w is (re)initialised. | ||
| 699 | */ | ||
| 700 | static void guc_init_doorbell_hw(struct intel_guc *guc) | ||
| 701 | { | ||
| 702 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
| 703 | struct i915_guc_client *client = guc->execbuf_client; | ||
| 704 | uint16_t db_id, i; | ||
| 705 | int err; | ||
| 706 | |||
| 707 | db_id = client->doorbell_id; | ||
| 708 | |||
| 709 | for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { | ||
| 710 | i915_reg_t drbreg = GEN8_DRBREGL(i); | ||
| 711 | u32 value = I915_READ(drbreg); | ||
| 712 | |||
| 713 | err = guc_update_doorbell_id(guc, client, i); | ||
| 714 | |||
| 715 | /* Report update failure or unexpectedly active doorbell */ | ||
| 716 | if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED))) | ||
| 717 | DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n", | ||
| 718 | i, drbreg.reg, value, err); | ||
| 719 | } | ||
| 720 | |||
| 721 | /* Restore to original value */ | ||
| 722 | err = guc_update_doorbell_id(guc, client, db_id); | ||
| 723 | if (err) | ||
| 724 | DRM_ERROR("Failed to restore doorbell to %d, err %d\n", | ||
| 725 | db_id, err); | ||
| 726 | |||
| 727 | for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { | ||
| 728 | i915_reg_t drbreg = GEN8_DRBREGL(i); | ||
| 729 | u32 value = I915_READ(drbreg); | ||
| 730 | |||
| 731 | if (i != db_id && (value & GUC_DOORBELL_ENABLED)) | ||
| 732 | DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n", | ||
| 733 | i, drbreg.reg, value); | ||
| 734 | |||
| 735 | } | ||
| 736 | } | ||
| 737 | |||
| 686 | /** | 738 | /** |
| 687 | * guc_client_alloc() - Allocate an i915_guc_client | 739 | * guc_client_alloc() - Allocate an i915_guc_client |
| 688 | * @dev: drm device | 740 | * @dev_priv: driver private data structure |
| 689 | * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW | 741 | * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW |
| 690 | * The kernel client to replace ExecList submission is created with | 742 | * The kernel client to replace ExecList submission is created with |
| 691 | * NORMAL priority. Priority of a client for scheduler can be HIGH, | 743 | * NORMAL priority. Priority of a client for scheduler can be HIGH, |
| @@ -695,14 +747,15 @@ static void guc_client_free(struct drm_device *dev, | |||
| 695 | * | 747 | * |
| 696 | * Return: An i915_guc_client object if success, else NULL. | 748 | * Return: An i915_guc_client object if success, else NULL. |
| 697 | */ | 749 | */ |
| 698 | static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | 750 | static struct i915_guc_client * |
| 699 | uint32_t priority, | 751 | guc_client_alloc(struct drm_i915_private *dev_priv, |
| 700 | struct i915_gem_context *ctx) | 752 | uint32_t priority, |
| 753 | struct i915_gem_context *ctx) | ||
| 701 | { | 754 | { |
| 702 | struct i915_guc_client *client; | 755 | struct i915_guc_client *client; |
| 703 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 704 | struct intel_guc *guc = &dev_priv->guc; | 756 | struct intel_guc *guc = &dev_priv->guc; |
| 705 | struct drm_i915_gem_object *obj; | 757 | struct drm_i915_gem_object *obj; |
| 758 | uint16_t db_id; | ||
| 706 | 759 | ||
| 707 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 760 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
| 708 | if (!client) | 761 | if (!client) |
| @@ -721,7 +774,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | |||
| 721 | } | 774 | } |
| 722 | 775 | ||
| 723 | /* The first page is doorbell/proc_desc. Two followed pages are wq. */ | 776 | /* The first page is doorbell/proc_desc. Two followed pages are wq. */ |
| 724 | obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); | 777 | obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE); |
| 725 | if (!obj) | 778 | if (!obj) |
| 726 | goto err; | 779 | goto err; |
| 727 | 780 | ||
| @@ -731,6 +784,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | |||
| 731 | client->wq_offset = GUC_DB_SIZE; | 784 | client->wq_offset = GUC_DB_SIZE; |
| 732 | client->wq_size = GUC_WQ_SIZE; | 785 | client->wq_size = GUC_WQ_SIZE; |
| 733 | 786 | ||
| 787 | db_id = select_doorbell_register(guc, client->priority); | ||
| 788 | if (db_id == GUC_INVALID_DOORBELL_ID) | ||
| 789 | /* XXX: evict a doorbell instead? */ | ||
| 790 | goto err; | ||
| 791 | |||
| 734 | client->doorbell_offset = select_doorbell_cacheline(guc); | 792 | client->doorbell_offset = select_doorbell_cacheline(guc); |
| 735 | 793 | ||
| 736 | /* | 794 | /* |
| @@ -743,29 +801,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | |||
| 743 | else | 801 | else |
| 744 | client->proc_desc_offset = (GUC_DB_SIZE / 2); | 802 | client->proc_desc_offset = (GUC_DB_SIZE / 2); |
| 745 | 803 | ||
| 746 | client->doorbell_id = assign_doorbell(guc, client->priority); | ||
| 747 | if (client->doorbell_id == GUC_INVALID_DOORBELL_ID) | ||
| 748 | /* XXX: evict a doorbell instead */ | ||
| 749 | goto err; | ||
| 750 | |||
| 751 | guc_init_proc_desc(guc, client); | 804 | guc_init_proc_desc(guc, client); |
| 752 | guc_init_ctx_desc(guc, client); | 805 | guc_init_ctx_desc(guc, client); |
| 753 | guc_init_doorbell(guc, client); | 806 | if (guc_init_doorbell(guc, client, db_id)) |
| 754 | |||
| 755 | /* XXX: Any cache flushes needed? General domain mgmt calls? */ | ||
| 756 | |||
| 757 | if (host2guc_allocate_doorbell(guc, client)) | ||
| 758 | goto err; | 807 | goto err; |
| 759 | 808 | ||
| 760 | DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n", | 809 | DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n", |
| 761 | priority, client, client->ctx_index, client->doorbell_id); | 810 | priority, client, client->ctx_index); |
| 811 | DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", | ||
| 812 | client->doorbell_id, client->doorbell_offset); | ||
| 762 | 813 | ||
| 763 | return client; | 814 | return client; |
| 764 | 815 | ||
| 765 | err: | 816 | err: |
| 766 | DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); | 817 | DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); |
| 767 | 818 | ||
| 768 | guc_client_free(dev, client); | 819 | guc_client_free(dev_priv, client); |
| 769 | return NULL; | 820 | return NULL; |
| 770 | } | 821 | } |
| 771 | 822 | ||
| @@ -790,7 +841,7 @@ static void guc_create_log(struct intel_guc *guc) | |||
| 790 | 841 | ||
| 791 | obj = guc->log_obj; | 842 | obj = guc->log_obj; |
| 792 | if (!obj) { | 843 | if (!obj) { |
| 793 | obj = gem_allocate_guc_obj(dev_priv->dev, size); | 844 | obj = gem_allocate_guc_obj(dev_priv, size); |
| 794 | if (!obj) { | 845 | if (!obj) { |
| 795 | /* logging will be off */ | 846 | /* logging will be off */ |
| 796 | i915.guc_log_level = -1; | 847 | i915.guc_log_level = -1; |
| @@ -850,7 +901,7 @@ static void guc_create_ads(struct intel_guc *guc) | |||
| 850 | 901 | ||
| 851 | obj = guc->ads_obj; | 902 | obj = guc->ads_obj; |
| 852 | if (!obj) { | 903 | if (!obj) { |
| 853 | obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); | 904 | obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size)); |
| 854 | if (!obj) | 905 | if (!obj) |
| 855 | return; | 906 | return; |
| 856 | 907 | ||
| @@ -904,41 +955,41 @@ static void guc_create_ads(struct intel_guc *guc) | |||
| 904 | * Set up the memory resources to be shared with the GuC. At this point, | 955 | * Set up the memory resources to be shared with the GuC. At this point, |
| 905 | * we require just one object that can be mapped through the GGTT. | 956 | * we require just one object that can be mapped through the GGTT. |
| 906 | */ | 957 | */ |
| 907 | int i915_guc_submission_init(struct drm_device *dev) | 958 | int i915_guc_submission_init(struct drm_i915_private *dev_priv) |
| 908 | { | 959 | { |
| 909 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 910 | const size_t ctxsize = sizeof(struct guc_context_desc); | 960 | const size_t ctxsize = sizeof(struct guc_context_desc); |
| 911 | const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; | 961 | const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; |
| 912 | const size_t gemsize = round_up(poolsize, PAGE_SIZE); | 962 | const size_t gemsize = round_up(poolsize, PAGE_SIZE); |
| 913 | struct intel_guc *guc = &dev_priv->guc; | 963 | struct intel_guc *guc = &dev_priv->guc; |
| 914 | 964 | ||
| 965 | /* Wipe bitmap & delete client in case of reinitialisation */ | ||
| 966 | bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); | ||
| 967 | i915_guc_submission_disable(dev_priv); | ||
| 968 | |||
| 915 | if (!i915.enable_guc_submission) | 969 | if (!i915.enable_guc_submission) |
| 916 | return 0; /* not enabled */ | 970 | return 0; /* not enabled */ |
| 917 | 971 | ||
| 918 | if (guc->ctx_pool_obj) | 972 | if (guc->ctx_pool_obj) |
| 919 | return 0; /* already allocated */ | 973 | return 0; /* already allocated */ |
| 920 | 974 | ||
| 921 | guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); | 975 | guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize); |
| 922 | if (!guc->ctx_pool_obj) | 976 | if (!guc->ctx_pool_obj) |
| 923 | return -ENOMEM; | 977 | return -ENOMEM; |
| 924 | 978 | ||
| 925 | ida_init(&guc->ctx_ids); | 979 | ida_init(&guc->ctx_ids); |
| 926 | |||
| 927 | guc_create_log(guc); | 980 | guc_create_log(guc); |
| 928 | |||
| 929 | guc_create_ads(guc); | 981 | guc_create_ads(guc); |
| 930 | 982 | ||
| 931 | return 0; | 983 | return 0; |
| 932 | } | 984 | } |
| 933 | 985 | ||
| 934 | int i915_guc_submission_enable(struct drm_device *dev) | 986 | int i915_guc_submission_enable(struct drm_i915_private *dev_priv) |
| 935 | { | 987 | { |
| 936 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 937 | struct intel_guc *guc = &dev_priv->guc; | 988 | struct intel_guc *guc = &dev_priv->guc; |
| 938 | struct i915_guc_client *client; | 989 | struct i915_guc_client *client; |
| 939 | 990 | ||
| 940 | /* client for execbuf submission */ | 991 | /* client for execbuf submission */ |
| 941 | client = guc_client_alloc(dev, | 992 | client = guc_client_alloc(dev_priv, |
| 942 | GUC_CTX_PRIORITY_KMD_NORMAL, | 993 | GUC_CTX_PRIORITY_KMD_NORMAL, |
| 943 | dev_priv->kernel_context); | 994 | dev_priv->kernel_context); |
| 944 | if (!client) { | 995 | if (!client) { |
| @@ -947,24 +998,22 @@ int i915_guc_submission_enable(struct drm_device *dev) | |||
| 947 | } | 998 | } |
| 948 | 999 | ||
| 949 | guc->execbuf_client = client; | 1000 | guc->execbuf_client = client; |
| 950 | |||
| 951 | host2guc_sample_forcewake(guc, client); | 1001 | host2guc_sample_forcewake(guc, client); |
| 1002 | guc_init_doorbell_hw(guc); | ||
| 952 | 1003 | ||
| 953 | return 0; | 1004 | return 0; |
| 954 | } | 1005 | } |
| 955 | 1006 | ||
| 956 | void i915_guc_submission_disable(struct drm_device *dev) | 1007 | void i915_guc_submission_disable(struct drm_i915_private *dev_priv) |
| 957 | { | 1008 | { |
| 958 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 959 | struct intel_guc *guc = &dev_priv->guc; | 1009 | struct intel_guc *guc = &dev_priv->guc; |
| 960 | 1010 | ||
| 961 | guc_client_free(dev, guc->execbuf_client); | 1011 | guc_client_free(dev_priv, guc->execbuf_client); |
| 962 | guc->execbuf_client = NULL; | 1012 | guc->execbuf_client = NULL; |
| 963 | } | 1013 | } |
| 964 | 1014 | ||
| 965 | void i915_guc_submission_fini(struct drm_device *dev) | 1015 | void i915_guc_submission_fini(struct drm_i915_private *dev_priv) |
| 966 | { | 1016 | { |
| 967 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 968 | struct intel_guc *guc = &dev_priv->guc; | 1017 | struct intel_guc *guc = &dev_priv->guc; |
| 969 | 1018 | ||
| 970 | gem_release_guc_obj(dev_priv->guc.ads_obj); | 1019 | gem_release_guc_obj(dev_priv->guc.ads_obj); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5c7378374ae6..4378a659d962 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -588,7 +588,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
| 588 | 588 | ||
| 589 | /** | 589 | /** |
| 590 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion | 590 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
| 591 | * @dev: drm device | 591 | * @dev_priv: i915 device private |
| 592 | */ | 592 | */ |
| 593 | static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) | 593 | static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) |
| 594 | { | 594 | { |
| @@ -2517,7 +2517,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, | |||
| 2517 | 2517 | ||
| 2518 | /** | 2518 | /** |
| 2519 | * i915_reset_and_wakeup - do process context error handling work | 2519 | * i915_reset_and_wakeup - do process context error handling work |
| 2520 | * @dev: drm device | 2520 | * @dev_priv: i915 device private |
| 2521 | * | 2521 | * |
| 2522 | * Fire an error uevent so userspace can see that a hang or error | 2522 | * Fire an error uevent so userspace can see that a hang or error |
| 2523 | * was detected. | 2523 | * was detected. |
| @@ -2674,13 +2674,14 @@ static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) | |||
| 2674 | 2674 | ||
| 2675 | /** | 2675 | /** |
| 2676 | * i915_handle_error - handle a gpu error | 2676 | * i915_handle_error - handle a gpu error |
| 2677 | * @dev: drm device | 2677 | * @dev_priv: i915 device private |
| 2678 | * @engine_mask: mask representing engines that are hung | 2678 | * @engine_mask: mask representing engines that are hung |
| 2679 | * Do some basic checking of register state at error time and | 2679 | * Do some basic checking of register state at error time and |
| 2680 | * dump it to the syslog. Also call i915_capture_error_state() to make | 2680 | * dump it to the syslog. Also call i915_capture_error_state() to make |
| 2681 | * sure we get a record and make it available in debugfs. Fire a uevent | 2681 | * sure we get a record and make it available in debugfs. Fire a uevent |
| 2682 | * so userspace knows something bad happened (should trigger collection | 2682 | * so userspace knows something bad happened (should trigger collection |
| 2683 | * of a ring dump etc.). | 2683 | * of a ring dump etc.). |
| 2684 | * @fmt: Error message format string | ||
| 2684 | */ | 2685 | */ |
| 2685 | void i915_handle_error(struct drm_i915_private *dev_priv, | 2686 | void i915_handle_error(struct drm_i915_private *dev_priv, |
| 2686 | u32 engine_mask, | 2687 | u32 engine_mask, |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 5e18cf9f754d..7effe68d552c 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
| @@ -54,12 +54,13 @@ struct i915_params i915 __read_mostly = { | |||
| 54 | .verbose_state_checks = 1, | 54 | .verbose_state_checks = 1, |
| 55 | .nuclear_pageflip = 0, | 55 | .nuclear_pageflip = 0, |
| 56 | .edp_vswing = 0, | 56 | .edp_vswing = 0, |
| 57 | .enable_guc_loading = 0, | 57 | .enable_guc_loading = -1, |
| 58 | .enable_guc_submission = 0, | 58 | .enable_guc_submission = -1, |
| 59 | .guc_log_level = -1, | 59 | .guc_log_level = -1, |
| 60 | .enable_dp_mst = true, | 60 | .enable_dp_mst = true, |
| 61 | .inject_load_failure = 0, | 61 | .inject_load_failure = 0, |
| 62 | .enable_dpcd_backlight = false, | 62 | .enable_dpcd_backlight = false, |
| 63 | .enable_gvt = false, | ||
| 63 | }; | 64 | }; |
| 64 | 65 | ||
| 65 | module_param_named(modeset, i915.modeset, int, 0400); | 66 | module_param_named(modeset, i915.modeset, int, 0400); |
| @@ -202,12 +203,12 @@ MODULE_PARM_DESC(edp_vswing, | |||
| 202 | module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); | 203 | module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); |
| 203 | MODULE_PARM_DESC(enable_guc_loading, | 204 | MODULE_PARM_DESC(enable_guc_loading, |
| 204 | "Enable GuC firmware loading " | 205 | "Enable GuC firmware loading " |
| 205 | "(-1=auto, 0=never [default], 1=if available, 2=required)"); | 206 | "(-1=auto [default], 0=never, 1=if available, 2=required)"); |
| 206 | 207 | ||
| 207 | module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); | 208 | module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); |
| 208 | MODULE_PARM_DESC(enable_guc_submission, | 209 | MODULE_PARM_DESC(enable_guc_submission, |
| 209 | "Enable GuC submission " | 210 | "Enable GuC submission " |
| 210 | "(-1=auto, 0=never [default], 1=if available, 2=required)"); | 211 | "(-1=auto [default], 0=never, 1=if available, 2=required)"); |
| 211 | 212 | ||
| 212 | module_param_named(guc_log_level, i915.guc_log_level, int, 0400); | 213 | module_param_named(guc_log_level, i915.guc_log_level, int, 0400); |
| 213 | MODULE_PARM_DESC(guc_log_level, | 214 | MODULE_PARM_DESC(guc_log_level, |
| @@ -222,3 +223,7 @@ MODULE_PARM_DESC(inject_load_failure, | |||
| 222 | module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); | 223 | module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); |
| 223 | MODULE_PARM_DESC(enable_dpcd_backlight, | 224 | MODULE_PARM_DESC(enable_dpcd_backlight, |
| 224 | "Enable support for DPCD backlight control (default:false)"); | 225 | "Enable support for DPCD backlight control (default:false)"); |
| 226 | |||
| 227 | module_param_named(enable_gvt, i915.enable_gvt, bool, 0600); | ||
| 228 | MODULE_PARM_DESC(enable_gvt, | ||
| 229 | "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); | ||
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 1323261a0cdd..0ad020b4a925 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h | |||
| @@ -63,6 +63,7 @@ struct i915_params { | |||
| 63 | bool nuclear_pageflip; | 63 | bool nuclear_pageflip; |
| 64 | bool enable_dp_mst; | 64 | bool enable_dp_mst; |
| 65 | bool enable_dpcd_backlight; | 65 | bool enable_dpcd_backlight; |
| 66 | bool enable_gvt; | ||
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | extern struct i915_params i915 __read_mostly; | 69 | extern struct i915_params i915 __read_mostly; |
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h new file mode 100644 index 000000000000..c0cb2974caac --- /dev/null +++ b/drivers/gpu/drm/i915/i915_pvinfo.h | |||
| @@ -0,0 +1,113 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _I915_PVINFO_H_ | ||
| 25 | #define _I915_PVINFO_H_ | ||
| 26 | |||
| 27 | /* The MMIO offset of the shared info between guest and host emulator */ | ||
| 28 | #define VGT_PVINFO_PAGE 0x78000 | ||
| 29 | #define VGT_PVINFO_SIZE 0x1000 | ||
| 30 | |||
| 31 | /* | ||
| 32 | * The following structure pages are defined in GEN MMIO space | ||
| 33 | * for virtualization. (One page for now) | ||
| 34 | */ | ||
| 35 | #define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ | ||
| 36 | #define VGT_VERSION_MAJOR 1 | ||
| 37 | #define VGT_VERSION_MINOR 0 | ||
| 38 | |||
| 39 | #define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) | ||
| 40 | #define INTEL_VGT_IF_VERSION \ | ||
| 41 | INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) | ||
| 42 | |||
| 43 | /* | ||
| 44 | * notifications from guest to vgpu device model | ||
| 45 | */ | ||
| 46 | enum vgt_g2v_type { | ||
| 47 | VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, | ||
| 48 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, | ||
| 49 | VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, | ||
| 50 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, | ||
| 51 | VGT_G2V_EXECLIST_CONTEXT_CREATE, | ||
| 52 | VGT_G2V_EXECLIST_CONTEXT_DESTROY, | ||
| 53 | VGT_G2V_MAX, | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct vgt_if { | ||
| 57 | u64 magic; /* VGT_MAGIC */ | ||
| 58 | uint16_t version_major; | ||
| 59 | uint16_t version_minor; | ||
| 60 | u32 vgt_id; /* ID of vGT instance */ | ||
| 61 | u32 rsv1[12]; /* pad to offset 0x40 */ | ||
| 62 | /* | ||
| 63 | * Data structure to describe the balooning info of resources. | ||
| 64 | * Each VM can only have one portion of continuous area for now. | ||
| 65 | * (May support scattered resource in future) | ||
| 66 | * (starting from offset 0x40) | ||
| 67 | */ | ||
| 68 | struct { | ||
| 69 | /* Aperture register balooning */ | ||
| 70 | struct { | ||
| 71 | u32 base; | ||
| 72 | u32 size; | ||
| 73 | } mappable_gmadr; /* aperture */ | ||
| 74 | /* GMADR register balooning */ | ||
| 75 | struct { | ||
| 76 | u32 base; | ||
| 77 | u32 size; | ||
| 78 | } nonmappable_gmadr; /* non aperture */ | ||
| 79 | /* allowed fence registers */ | ||
| 80 | u32 fence_num; | ||
| 81 | u32 rsv2[3]; | ||
| 82 | } avail_rs; /* available/assigned resource */ | ||
| 83 | u32 rsv3[0x200 - 24]; /* pad to half page */ | ||
| 84 | /* | ||
| 85 | * The bottom half page is for response from Gfx driver to hypervisor. | ||
| 86 | */ | ||
| 87 | u32 rsv4; | ||
| 88 | u32 display_ready; /* ready for display owner switch */ | ||
| 89 | |||
| 90 | u32 rsv5[4]; | ||
| 91 | |||
| 92 | u32 g2v_notify; | ||
| 93 | u32 rsv6[7]; | ||
| 94 | |||
| 95 | struct { | ||
| 96 | u32 lo; | ||
| 97 | u32 hi; | ||
| 98 | } pdp[4]; | ||
| 99 | |||
| 100 | u32 execlist_context_descriptor_lo; | ||
| 101 | u32 execlist_context_descriptor_hi; | ||
| 102 | |||
| 103 | u32 rsv7[0x200 - 24]; /* pad to one page */ | ||
| 104 | } __packed; | ||
| 105 | |||
| 106 | #define vgtif_reg(x) \ | ||
| 107 | _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))) | ||
| 108 | |||
| 109 | /* vGPU display status to be used by the host side */ | ||
| 110 | #define VGT_DRV_DISPLAY_NOT_READY 0 | ||
| 111 | #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ | ||
| 112 | |||
| 113 | #endif /* _I915_PVINFO_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index dfb4c7a88de3..c6bfbf8d7cca 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
| 220 | #define ECOCHK_PPGTT_WT_HSW (0x2<<3) | 220 | #define ECOCHK_PPGTT_WT_HSW (0x2<<3) |
| 221 | #define ECOCHK_PPGTT_WB_HSW (0x3<<3) | 221 | #define ECOCHK_PPGTT_WB_HSW (0x3<<3) |
| 222 | 222 | ||
| 223 | #define GEN8_CONFIG0 _MMIO(0xD00) | ||
| 224 | #define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1) | ||
| 225 | |||
| 223 | #define GAC_ECO_BITS _MMIO(0x14090) | 226 | #define GAC_ECO_BITS _MMIO(0x14090) |
| 224 | #define ECOBITS_SNB_BIT (1<<13) | 227 | #define ECOBITS_SNB_BIT (1<<13) |
| 225 | #define ECOBITS_PPGTT_CACHE64B (3<<8) | 228 | #define ECOBITS_PPGTT_CACHE64B (3<<8) |
| @@ -442,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
| 442 | */ | 445 | */ |
| 443 | #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) | 446 | #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) |
| 444 | 447 | ||
| 448 | #define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) | ||
| 449 | #define GEN9_MEDIA_POOL_ENABLE (1 << 31) | ||
| 445 | #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) | 450 | #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) |
| 446 | #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) | 451 | #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) |
| 447 | #define SC_UPDATE_SCISSOR (0x1<<1) | 452 | #define SC_UPDATE_SCISSOR (0x1<<1) |
| @@ -713,6 +718,9 @@ enum skl_disp_power_wells { | |||
| 713 | /* Not actual bit groups. Used as IDs for lookup_power_well() */ | 718 | /* Not actual bit groups. Used as IDs for lookup_power_well() */ |
| 714 | SKL_DISP_PW_ALWAYS_ON, | 719 | SKL_DISP_PW_ALWAYS_ON, |
| 715 | SKL_DISP_PW_DC_OFF, | 720 | SKL_DISP_PW_DC_OFF, |
| 721 | |||
| 722 | BXT_DPIO_CMN_A, | ||
| 723 | BXT_DPIO_CMN_BC, | ||
| 716 | }; | 724 | }; |
| 717 | 725 | ||
| 718 | #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) | 726 | #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) |
| @@ -1273,6 +1281,15 @@ enum skl_disp_power_wells { | |||
| 1273 | #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) | 1281 | #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) |
| 1274 | #define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) | 1282 | #define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) |
| 1275 | 1283 | ||
| 1284 | #define _BXT_PHY_CTL_DDI_A 0x64C00 | ||
| 1285 | #define _BXT_PHY_CTL_DDI_B 0x64C10 | ||
| 1286 | #define _BXT_PHY_CTL_DDI_C 0x64C20 | ||
| 1287 | #define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10) | ||
| 1288 | #define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9) | ||
| 1289 | #define BXT_PHY_LANE_ENABLED (1 << 8) | ||
| 1290 | #define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \ | ||
| 1291 | _BXT_PHY_CTL_DDI_B) | ||
| 1292 | |||
| 1276 | #define _PHY_CTL_FAMILY_EDP 0x64C80 | 1293 | #define _PHY_CTL_FAMILY_EDP 0x64C80 |
| 1277 | #define _PHY_CTL_FAMILY_DDI 0x64C90 | 1294 | #define _PHY_CTL_FAMILY_DDI 0x64C90 |
| 1278 | #define COMMON_RESET_DIS (1 << 31) | 1295 | #define COMMON_RESET_DIS (1 << 31) |
| @@ -1669,6 +1686,9 @@ enum skl_disp_power_wells { | |||
| 1669 | 1686 | ||
| 1670 | #define GEN7_TLB_RD_ADDR _MMIO(0x4700) | 1687 | #define GEN7_TLB_RD_ADDR _MMIO(0x4700) |
| 1671 | 1688 | ||
| 1689 | #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) | ||
| 1690 | #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) | ||
| 1691 | |||
| 1672 | #if 0 | 1692 | #if 0 |
| 1673 | #define PRB0_TAIL _MMIO(0x2030) | 1693 | #define PRB0_TAIL _MMIO(0x2030) |
| 1674 | #define PRB0_HEAD _MMIO(0x2034) | 1694 | #define PRB0_HEAD _MMIO(0x2034) |
| @@ -1804,6 +1824,10 @@ enum skl_disp_power_wells { | |||
| 1804 | #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) | 1824 | #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) |
| 1805 | #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) | 1825 | #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) |
| 1806 | 1826 | ||
| 1827 | /* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ | ||
| 1828 | #define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) | ||
| 1829 | #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) | ||
| 1830 | |||
| 1807 | /* WaClearTdlStateAckDirtyBits */ | 1831 | /* WaClearTdlStateAckDirtyBits */ |
| 1808 | #define GEN8_STATE_ACK _MMIO(0x20F0) | 1832 | #define GEN8_STATE_ACK _MMIO(0x20F0) |
| 1809 | #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) | 1833 | #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) |
| @@ -2161,6 +2185,9 @@ enum skl_disp_power_wells { | |||
| 2161 | 2185 | ||
| 2162 | #define FBC_LL_SIZE (1536) | 2186 | #define FBC_LL_SIZE (1536) |
| 2163 | 2187 | ||
| 2188 | #define FBC_LLC_READ_CTRL _MMIO(0x9044) | ||
| 2189 | #define FBC_LLC_FULLY_OPEN (1<<30) | ||
| 2190 | |||
| 2164 | /* Framebuffer compression for GM45+ */ | 2191 | /* Framebuffer compression for GM45+ */ |
| 2165 | #define DPFC_CB_BASE _MMIO(0x3200) | 2192 | #define DPFC_CB_BASE _MMIO(0x3200) |
| 2166 | #define DPFC_CONTROL _MMIO(0x3208) | 2193 | #define DPFC_CONTROL _MMIO(0x3208) |
| @@ -2200,6 +2227,8 @@ enum skl_disp_power_wells { | |||
| 2200 | #define ILK_DPFC_STATUS _MMIO(0x43210) | 2227 | #define ILK_DPFC_STATUS _MMIO(0x43210) |
| 2201 | #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) | 2228 | #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) |
| 2202 | #define ILK_DPFC_CHICKEN _MMIO(0x43224) | 2229 | #define ILK_DPFC_CHICKEN _MMIO(0x43224) |
| 2230 | #define ILK_DPFC_DISABLE_DUMMY0 (1<<8) | ||
| 2231 | #define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) | ||
| 2203 | #define ILK_FBC_RT_BASE _MMIO(0x2128) | 2232 | #define ILK_FBC_RT_BASE _MMIO(0x2128) |
| 2204 | #define ILK_FBC_RT_VALID (1<<0) | 2233 | #define ILK_FBC_RT_VALID (1<<0) |
| 2205 | #define SNB_FBC_FRONT_BUFFER (1<<1) | 2234 | #define SNB_FBC_FRONT_BUFFER (1<<1) |
| @@ -3022,6 +3051,18 @@ enum skl_disp_power_wells { | |||
| 3022 | /* Same as Haswell, but 72064 bytes now. */ | 3051 | /* Same as Haswell, but 72064 bytes now. */ |
| 3023 | #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) | 3052 | #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) |
| 3024 | 3053 | ||
| 3054 | enum { | ||
| 3055 | INTEL_ADVANCED_CONTEXT = 0, | ||
| 3056 | INTEL_LEGACY_32B_CONTEXT, | ||
| 3057 | INTEL_ADVANCED_AD_CONTEXT, | ||
| 3058 | INTEL_LEGACY_64B_CONTEXT | ||
| 3059 | }; | ||
| 3060 | |||
| 3061 | #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 | ||
| 3062 | #define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\ | ||
| 3063 | INTEL_LEGACY_64B_CONTEXT : \ | ||
| 3064 | INTEL_LEGACY_32B_CONTEXT) | ||
| 3065 | |||
| 3025 | #define CHV_CLK_CTL1 _MMIO(0x101100) | 3066 | #define CHV_CLK_CTL1 _MMIO(0x101100) |
| 3026 | #define VLV_CLK_CTL2 _MMIO(0x101104) | 3067 | #define VLV_CLK_CTL2 _MMIO(0x101104) |
| 3027 | #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 | 3068 | #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 |
| @@ -6035,6 +6076,9 @@ enum skl_disp_power_wells { | |||
| 6035 | #define FORCE_ARB_IDLE_PLANES (1 << 14) | 6076 | #define FORCE_ARB_IDLE_PLANES (1 << 14) |
| 6036 | #define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) | 6077 | #define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) |
| 6037 | 6078 | ||
| 6079 | #define CHICKEN_PAR2_1 _MMIO(0x42090) | ||
| 6080 | #define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14) | ||
| 6081 | |||
| 6038 | #define _CHICKEN_PIPESL_1_A 0x420b0 | 6082 | #define _CHICKEN_PIPESL_1_A 0x420b0 |
| 6039 | #define _CHICKEN_PIPESL_1_B 0x420b4 | 6083 | #define _CHICKEN_PIPESL_1_B 0x420b4 |
| 6040 | #define HSW_FBCQ_DIS (1 << 22) | 6084 | #define HSW_FBCQ_DIS (1 << 22) |
| @@ -6042,6 +6086,7 @@ enum skl_disp_power_wells { | |||
| 6042 | #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) | 6086 | #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) |
| 6043 | 6087 | ||
| 6044 | #define DISP_ARB_CTL _MMIO(0x45000) | 6088 | #define DISP_ARB_CTL _MMIO(0x45000) |
| 6089 | #define DISP_FBC_MEMORY_WAKE (1<<31) | ||
| 6045 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 6090 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
| 6046 | #define DISP_FBC_WM_DIS (1<<15) | 6091 | #define DISP_FBC_WM_DIS (1<<15) |
| 6047 | #define DISP_ARB_CTL2 _MMIO(0x45004) | 6092 | #define DISP_ARB_CTL2 _MMIO(0x45004) |
| @@ -6055,6 +6100,9 @@ enum skl_disp_power_wells { | |||
| 6055 | #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) | 6100 | #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) |
| 6056 | #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) | 6101 | #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) |
| 6057 | 6102 | ||
| 6103 | #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) | ||
| 6104 | #define MASK_WAKEMEM (1<<13) | ||
| 6105 | |||
| 6058 | #define SKL_DFSM _MMIO(0x51000) | 6106 | #define SKL_DFSM _MMIO(0x51000) |
| 6059 | #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) | 6107 | #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) |
| 6060 | #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) | 6108 | #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) |
| @@ -6070,8 +6118,10 @@ enum skl_disp_power_wells { | |||
| 6070 | 6118 | ||
| 6071 | #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) | 6119 | #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) |
| 6072 | #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) | 6120 | #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) |
| 6121 | #define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10) | ||
| 6073 | 6122 | ||
| 6074 | #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) | 6123 | #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) |
| 6124 | #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) | ||
| 6075 | #define GEN8_CS_CHICKEN1 _MMIO(0x2580) | 6125 | #define GEN8_CS_CHICKEN1 _MMIO(0x2580) |
| 6076 | 6126 | ||
| 6077 | /* GEN7 chicken */ | 6127 | /* GEN7 chicken */ |
| @@ -6079,6 +6129,7 @@ enum skl_disp_power_wells { | |||
| 6079 | # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) | 6129 | # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) |
| 6080 | # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) | 6130 | # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) |
| 6081 | #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) | 6131 | #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) |
| 6132 | # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) | ||
| 6082 | # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) | 6133 | # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) |
| 6083 | 6134 | ||
| 6084 | #define HIZ_CHICKEN _MMIO(0x7018) | 6135 | #define HIZ_CHICKEN _MMIO(0x7018) |
| @@ -6931,6 +6982,7 @@ enum skl_disp_power_wells { | |||
| 6931 | #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) | 6982 | #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) |
| 6932 | 6983 | ||
| 6933 | #define GEN6_UCGCTL1 _MMIO(0x9400) | 6984 | #define GEN6_UCGCTL1 _MMIO(0x9400) |
| 6985 | # define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22) | ||
| 6934 | # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) | 6986 | # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) |
| 6935 | # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) | 6987 | # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
| 6936 | # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) | 6988 | # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) |
| @@ -6947,6 +6999,7 @@ enum skl_disp_power_wells { | |||
| 6947 | 6999 | ||
| 6948 | #define GEN7_UCGCTL4 _MMIO(0x940c) | 7000 | #define GEN7_UCGCTL4 _MMIO(0x940c) |
| 6949 | #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) | 7001 | #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) |
| 7002 | #define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14) | ||
| 6950 | 7003 | ||
| 6951 | #define GEN6_RCGCTL1 _MMIO(0x9410) | 7004 | #define GEN6_RCGCTL1 _MMIO(0x9410) |
| 6952 | #define GEN6_RCGCTL2 _MMIO(0x9414) | 7005 | #define GEN6_RCGCTL2 _MMIO(0x9414) |
| @@ -8151,6 +8204,8 @@ enum skl_disp_power_wells { | |||
| 8151 | #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) | 8204 | #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) |
| 8152 | #define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) | 8205 | #define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) |
| 8153 | #define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) | 8206 | #define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) |
| 8207 | #define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) | ||
| 8208 | #define BXT_DPHY_DEFEATURE_EN (1 << 8) | ||
| 8154 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) | 8209 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) |
| 8155 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) | 8210 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) |
| 8156 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) | 8211 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) |
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 004326291854..f6acb5a0e701 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | 53 | ||
| 54 | /** | 54 | /** |
| 55 | * i915_check_vgpu - detect virtual GPU | 55 | * i915_check_vgpu - detect virtual GPU |
| 56 | * @dev: drm device * | 56 | * @dev_priv: i915 device private |
| 57 | * | 57 | * |
| 58 | * This function is called at the initialization stage, to detect whether | 58 | * This function is called at the initialization stage, to detect whether |
| 59 | * running on a vGPU. | 59 | * running on a vGPU. |
| @@ -101,10 +101,13 @@ static struct _balloon_info_ bl_info; | |||
| 101 | * This function is called to deallocate the ballooned-out graphic memory, when | 101 | * This function is called to deallocate the ballooned-out graphic memory, when |
| 102 | * driver is unloaded or when ballooning fails. | 102 | * driver is unloaded or when ballooning fails. |
| 103 | */ | 103 | */ |
| 104 | void intel_vgt_deballoon(void) | 104 | void intel_vgt_deballoon(struct drm_i915_private *dev_priv) |
| 105 | { | 105 | { |
| 106 | int i; | 106 | int i; |
| 107 | 107 | ||
| 108 | if (!intel_vgpu_active(dev_priv)) | ||
| 109 | return; | ||
| 110 | |||
| 108 | DRM_DEBUG("VGT deballoon.\n"); | 111 | DRM_DEBUG("VGT deballoon.\n"); |
| 109 | 112 | ||
| 110 | for (i = 0; i < 4; i++) { | 113 | for (i = 0; i < 4; i++) { |
| @@ -135,7 +138,7 @@ static int vgt_balloon_space(struct drm_mm *mm, | |||
| 135 | 138 | ||
| 136 | /** | 139 | /** |
| 137 | * intel_vgt_balloon - balloon out reserved graphics address trunks | 140 | * intel_vgt_balloon - balloon out reserved graphics address trunks |
| 138 | * @dev_priv: i915 device | 141 | * @dev: drm device |
| 139 | * | 142 | * |
| 140 | * This function is called at the initialization stage, to balloon out the | 143 | * This function is called at the initialization stage, to balloon out the |
| 141 | * graphic address space allocated to other vGPUs, by marking these spaces as | 144 | * graphic address space allocated to other vGPUs, by marking these spaces as |
| @@ -177,9 +180,8 @@ static int vgt_balloon_space(struct drm_mm *mm, | |||
| 177 | * Returns: | 180 | * Returns: |
| 178 | * zero on success, non-zero if configuration invalid or ballooning failed | 181 | * zero on success, non-zero if configuration invalid or ballooning failed |
| 179 | */ | 182 | */ |
| 180 | int intel_vgt_balloon(struct drm_device *dev) | 183 | int intel_vgt_balloon(struct drm_i915_private *dev_priv) |
| 181 | { | 184 | { |
| 182 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
| 183 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 185 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
| 184 | unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; | 186 | unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; |
| 185 | 187 | ||
| @@ -187,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev) | |||
| 187 | unsigned long unmappable_base, unmappable_size, unmappable_end; | 189 | unsigned long unmappable_base, unmappable_size, unmappable_end; |
| 188 | int ret; | 190 | int ret; |
| 189 | 191 | ||
| 192 | if (!intel_vgpu_active(dev_priv)) | ||
| 193 | return 0; | ||
| 194 | |||
| 190 | mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); | 195 | mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); |
| 191 | mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); | 196 | mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); |
| 192 | unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); | 197 | unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); |
| @@ -258,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev) | |||
| 258 | 263 | ||
| 259 | err: | 264 | err: |
| 260 | DRM_ERROR("VGT balloon fail\n"); | 265 | DRM_ERROR("VGT balloon fail\n"); |
| 261 | intel_vgt_deballoon(); | 266 | intel_vgt_deballoon(dev_priv); |
| 262 | return ret; | 267 | return ret; |
| 263 | } | 268 | } |
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 21ffcfea5f5d..3c3b2d24e830 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h | |||
| @@ -24,94 +24,10 @@ | |||
| 24 | #ifndef _I915_VGPU_H_ | 24 | #ifndef _I915_VGPU_H_ |
| 25 | #define _I915_VGPU_H_ | 25 | #define _I915_VGPU_H_ |
| 26 | 26 | ||
| 27 | /* The MMIO offset of the shared info between guest and host emulator */ | 27 | #include "i915_pvinfo.h" |
| 28 | #define VGT_PVINFO_PAGE 0x78000 | ||
| 29 | #define VGT_PVINFO_SIZE 0x1000 | ||
| 30 | 28 | ||
| 31 | /* | 29 | void i915_check_vgpu(struct drm_i915_private *dev_priv); |
| 32 | * The following structure pages are defined in GEN MMIO space | 30 | int intel_vgt_balloon(struct drm_i915_private *dev_priv); |
| 33 | * for virtualization. (One page for now) | 31 | void intel_vgt_deballoon(struct drm_i915_private *dev_priv); |
| 34 | */ | ||
| 35 | #define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ | ||
| 36 | #define VGT_VERSION_MAJOR 1 | ||
| 37 | #define VGT_VERSION_MINOR 0 | ||
| 38 | |||
| 39 | #define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) | ||
| 40 | #define INTEL_VGT_IF_VERSION \ | ||
| 41 | INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) | ||
| 42 | |||
| 43 | /* | ||
| 44 | * notifications from guest to vgpu device model | ||
| 45 | */ | ||
| 46 | enum vgt_g2v_type { | ||
| 47 | VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, | ||
| 48 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, | ||
| 49 | VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, | ||
| 50 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, | ||
| 51 | VGT_G2V_EXECLIST_CONTEXT_CREATE, | ||
| 52 | VGT_G2V_EXECLIST_CONTEXT_DESTROY, | ||
| 53 | VGT_G2V_MAX, | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct vgt_if { | ||
| 57 | uint64_t magic; /* VGT_MAGIC */ | ||
| 58 | uint16_t version_major; | ||
| 59 | uint16_t version_minor; | ||
| 60 | uint32_t vgt_id; /* ID of vGT instance */ | ||
| 61 | uint32_t rsv1[12]; /* pad to offset 0x40 */ | ||
| 62 | /* | ||
| 63 | * Data structure to describe the balooning info of resources. | ||
| 64 | * Each VM can only have one portion of continuous area for now. | ||
| 65 | * (May support scattered resource in future) | ||
| 66 | * (starting from offset 0x40) | ||
| 67 | */ | ||
| 68 | struct { | ||
| 69 | /* Aperture register balooning */ | ||
| 70 | struct { | ||
| 71 | uint32_t base; | ||
| 72 | uint32_t size; | ||
| 73 | } mappable_gmadr; /* aperture */ | ||
| 74 | /* GMADR register balooning */ | ||
| 75 | struct { | ||
| 76 | uint32_t base; | ||
| 77 | uint32_t size; | ||
| 78 | } nonmappable_gmadr; /* non aperture */ | ||
| 79 | /* allowed fence registers */ | ||
| 80 | uint32_t fence_num; | ||
| 81 | uint32_t rsv2[3]; | ||
| 82 | } avail_rs; /* available/assigned resource */ | ||
| 83 | uint32_t rsv3[0x200 - 24]; /* pad to half page */ | ||
| 84 | /* | ||
| 85 | * The bottom half page is for response from Gfx driver to hypervisor. | ||
| 86 | */ | ||
| 87 | uint32_t rsv4; | ||
| 88 | uint32_t display_ready; /* ready for display owner switch */ | ||
| 89 | |||
| 90 | uint32_t rsv5[4]; | ||
| 91 | |||
| 92 | uint32_t g2v_notify; | ||
| 93 | uint32_t rsv6[7]; | ||
| 94 | |||
| 95 | struct { | ||
| 96 | uint32_t lo; | ||
| 97 | uint32_t hi; | ||
| 98 | } pdp[4]; | ||
| 99 | |||
| 100 | uint32_t execlist_context_descriptor_lo; | ||
| 101 | uint32_t execlist_context_descriptor_hi; | ||
| 102 | |||
| 103 | uint32_t rsv7[0x200 - 24]; /* pad to one page */ | ||
| 104 | } __packed; | ||
| 105 | |||
| 106 | #define vgtif_reg(x) \ | ||
| 107 | _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)) | ||
| 108 | |||
| 109 | /* vGPU display status to be used by the host side */ | ||
| 110 | #define VGT_DRV_DISPLAY_NOT_READY 0 | ||
| 111 | #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ | ||
| 112 | |||
| 113 | extern void i915_check_vgpu(struct drm_i915_private *dev_priv); | ||
| 114 | extern int intel_vgt_balloon(struct drm_device *dev); | ||
| 115 | extern void intel_vgt_deballoon(void); | ||
| 116 | 32 | ||
| 117 | #endif /* _I915_VGPU_H_ */ | 33 | #endif /* _I915_VGPU_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 713a02db378a..da5ed4a850b9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1570,6 +1570,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) | |||
| 1570 | } | 1570 | } |
| 1571 | 1571 | ||
| 1572 | /** | 1572 | /** |
| 1573 | * intel_bios_is_port_present - is the specified digital port present | ||
| 1574 | * @dev_priv: i915 device instance | ||
| 1575 | * @port: port to check | ||
| 1576 | * | ||
| 1577 | * Return true if the device in %port is present. | ||
| 1578 | */ | ||
| 1579 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) | ||
| 1580 | { | ||
| 1581 | static const struct { | ||
| 1582 | u16 dp, hdmi; | ||
| 1583 | } port_mapping[] = { | ||
| 1584 | [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, | ||
| 1585 | [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, | ||
| 1586 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | ||
| 1587 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | ||
| 1588 | }; | ||
| 1589 | int i; | ||
| 1590 | |||
| 1591 | /* FIXME maybe deal with port A as well? */ | ||
| 1592 | if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) | ||
| 1593 | return false; | ||
| 1594 | |||
| 1595 | if (!dev_priv->vbt.child_dev_num) | ||
| 1596 | return false; | ||
| 1597 | |||
| 1598 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | ||
| 1599 | const union child_device_config *p_child = | ||
| 1600 | &dev_priv->vbt.child_dev[i]; | ||
| 1601 | if ((p_child->common.dvo_port == port_mapping[port].dp || | ||
| 1602 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
| 1603 | (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | | ||
| 1604 | DEVICE_TYPE_DISPLAYPORT_OUTPUT))) | ||
| 1605 | return true; | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | return false; | ||
| 1609 | } | ||
| 1610 | |||
| 1611 | /** | ||
| 1573 | * intel_bios_is_port_edp - is the device in given port eDP | 1612 | * intel_bios_is_port_edp - is the device in given port eDP |
| 1574 | * @dev_priv: i915 device instance | 1613 | * @dev_priv: i915 device instance |
| 1575 | * @port: port to check | 1614 | * @port: port to check |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 022b41d422dc..ad3b0ee5e55b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | |||
| 1342 | DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); | 1342 | DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); |
| 1343 | 1343 | ||
| 1344 | out: | 1344 | out: |
| 1345 | if (ret && IS_BROXTON(dev_priv)) { | ||
| 1346 | tmp = I915_READ(BXT_PHY_CTL(port)); | ||
| 1347 | if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | | ||
| 1348 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) | ||
| 1349 | DRM_ERROR("Port %c enabled but PHY powered down? " | ||
| 1350 | "(PHY_CTL %08x)\n", port_name(port), tmp); | ||
| 1351 | } | ||
| 1352 | |||
| 1345 | intel_display_power_put(dev_priv, power_domain); | 1353 | intel_display_power_put(dev_priv, power_domain); |
| 1346 | 1354 | ||
| 1347 | return ret; | 1355 | return ret; |
| @@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) | |||
| 1742 | } | 1750 | } |
| 1743 | } | 1751 | } |
| 1744 | 1752 | ||
| 1745 | static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, | 1753 | bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, |
| 1746 | enum dpio_phy phy) | 1754 | enum dpio_phy phy) |
| 1747 | { | 1755 | { |
| 1756 | enum port port; | ||
| 1757 | |||
| 1748 | if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) | 1758 | if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) |
| 1749 | return false; | 1759 | return false; |
| 1750 | 1760 | ||
| @@ -1770,38 +1780,48 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, | |||
| 1770 | return false; | 1780 | return false; |
| 1771 | } | 1781 | } |
| 1772 | 1782 | ||
| 1783 | for_each_port_masked(port, | ||
| 1784 | phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) : | ||
| 1785 | BIT(PORT_A)) { | ||
| 1786 | u32 tmp = I915_READ(BXT_PHY_CTL(port)); | ||
| 1787 | |||
| 1788 | if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) { | ||
| 1789 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane " | ||
| 1790 | "for port %c powered down " | ||
| 1791 | "(PHY_CTL %08x)\n", | ||
| 1792 | phy, port_name(port), tmp); | ||
| 1793 | |||
| 1794 | return false; | ||
| 1795 | } | ||
| 1796 | } | ||
| 1797 | |||
| 1773 | return true; | 1798 | return true; |
| 1774 | } | 1799 | } |
| 1775 | 1800 | ||
| 1776 | static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) | 1801 | static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) |
| 1777 | { | 1802 | { |
| 1778 | u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); | 1803 | u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); |
| 1779 | 1804 | ||
| 1780 | return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; | 1805 | return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; |
| 1781 | } | 1806 | } |
| 1782 | 1807 | ||
| 1783 | static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv, | 1808 | static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, |
| 1784 | enum dpio_phy phy) | 1809 | enum dpio_phy phy) |
| 1785 | { | 1810 | { |
| 1786 | if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) | 1811 | if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) |
| 1787 | DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); | 1812 | DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); |
| 1788 | } | 1813 | } |
| 1789 | 1814 | ||
| 1790 | static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, | 1815 | void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) |
| 1791 | enum dpio_phy phy); | ||
| 1792 | |||
| 1793 | static void broxton_phy_init(struct drm_i915_private *dev_priv, | ||
| 1794 | enum dpio_phy phy) | ||
| 1795 | { | 1816 | { |
| 1796 | enum port port; | 1817 | u32 val; |
| 1797 | u32 ports, val; | ||
| 1798 | 1818 | ||
| 1799 | if (broxton_phy_is_enabled(dev_priv, phy)) { | 1819 | if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { |
| 1800 | /* Still read out the GRC value for state verification */ | 1820 | /* Still read out the GRC value for state verification */ |
| 1801 | if (phy == DPIO_PHY0) | 1821 | if (phy == DPIO_PHY0) |
| 1802 | dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy); | 1822 | dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy); |
| 1803 | 1823 | ||
| 1804 | if (broxton_phy_verify_state(dev_priv, phy)) { | 1824 | if (bxt_ddi_phy_verify_state(dev_priv, phy)) { |
| 1805 | DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " | 1825 | DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " |
| 1806 | "won't reprogram it\n", phy); | 1826 | "won't reprogram it\n", phy); |
| 1807 | 1827 | ||
| @@ -1810,8 +1830,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
| 1810 | 1830 | ||
| 1811 | DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " | 1831 | DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " |
| 1812 | "force reprogramming it\n", phy); | 1832 | "force reprogramming it\n", phy); |
| 1813 | } else { | ||
| 1814 | DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy); | ||
| 1815 | } | 1833 | } |
| 1816 | 1834 | ||
| 1817 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); | 1835 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); |
| @@ -1831,28 +1849,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
| 1831 | DRM_ERROR("timeout during PHY%d power on\n", phy); | 1849 | DRM_ERROR("timeout during PHY%d power on\n", phy); |
| 1832 | } | 1850 | } |
| 1833 | 1851 | ||
| 1834 | if (phy == DPIO_PHY0) | ||
| 1835 | ports = BIT(PORT_B) | BIT(PORT_C); | ||
| 1836 | else | ||
| 1837 | ports = BIT(PORT_A); | ||
| 1838 | |||
| 1839 | for_each_port_masked(port, ports) { | ||
| 1840 | int lane; | ||
| 1841 | |||
| 1842 | for (lane = 0; lane < 4; lane++) { | ||
| 1843 | val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); | ||
| 1844 | /* | ||
| 1845 | * Note that on CHV this flag is called UPAR, but has | ||
| 1846 | * the same function. | ||
| 1847 | */ | ||
| 1848 | val &= ~LATENCY_OPTIM; | ||
| 1849 | if (lane != 1) | ||
| 1850 | val |= LATENCY_OPTIM; | ||
| 1851 | |||
| 1852 | I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val); | ||
| 1853 | } | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | /* Program PLL Rcomp code offset */ | 1852 | /* Program PLL Rcomp code offset */ |
| 1857 | val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); | 1853 | val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); |
| 1858 | val &= ~IREF0RC_OFFSET_MASK; | 1854 | val &= ~IREF0RC_OFFSET_MASK; |
| @@ -1899,10 +1895,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
| 1899 | * the corresponding calibrated value from PHY1, and disable | 1895 | * the corresponding calibrated value from PHY1, and disable |
| 1900 | * the automatic calibration on PHY0. | 1896 | * the automatic calibration on PHY0. |
| 1901 | */ | 1897 | */ |
| 1902 | broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); | 1898 | val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1); |
| 1903 | |||
| 1904 | val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, | ||
| 1905 | DPIO_PHY1); | ||
| 1906 | grc_code = val << GRC_CODE_FAST_SHIFT | | 1899 | grc_code = val << GRC_CODE_FAST_SHIFT | |
| 1907 | val << GRC_CODE_SLOW_SHIFT | | 1900 | val << GRC_CODE_SLOW_SHIFT | |
| 1908 | val; | 1901 | val; |
| @@ -1912,31 +1905,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
| 1912 | val |= GRC_DIS | GRC_RDY_OVRD; | 1905 | val |= GRC_DIS | GRC_RDY_OVRD; |
| 1913 | I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); | 1906 | I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); |
| 1914 | } | 1907 | } |
| 1915 | /* | ||
| 1916 | * During PHY1 init delay waiting for GRC calibration to finish, since | ||
| 1917 | * it can happen in parallel with the subsequent PHY0 init. | ||
| 1918 | */ | ||
| 1919 | 1908 | ||
| 1920 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); | 1909 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); |
| 1921 | val |= COMMON_RESET_DIS; | 1910 | val |= COMMON_RESET_DIS; |
| 1922 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); | 1911 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); |
| 1923 | } | ||
| 1924 | 1912 | ||
| 1925 | void broxton_ddi_phy_init(struct drm_i915_private *dev_priv) | 1913 | if (phy == DPIO_PHY1) |
| 1926 | { | 1914 | bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1); |
| 1927 | /* Enable PHY1 first since it provides Rcomp for PHY0 */ | ||
| 1928 | broxton_phy_init(dev_priv, DPIO_PHY1); | ||
| 1929 | broxton_phy_init(dev_priv, DPIO_PHY0); | ||
| 1930 | |||
| 1931 | /* | ||
| 1932 | * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the | ||
| 1933 | * PHY1 GRC calibration to finish, so wait for it here. | ||
| 1934 | */ | ||
| 1935 | broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); | ||
| 1936 | } | 1915 | } |
| 1937 | 1916 | ||
| 1938 | static void broxton_phy_uninit(struct drm_i915_private *dev_priv, | 1917 | void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) |
| 1939 | enum dpio_phy phy) | ||
| 1940 | { | 1918 | { |
| 1941 | uint32_t val; | 1919 | uint32_t val; |
| 1942 | 1920 | ||
| @@ -1949,12 +1927,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv, | |||
| 1949 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); | 1927 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); |
| 1950 | } | 1928 | } |
| 1951 | 1929 | ||
| 1952 | void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv) | ||
| 1953 | { | ||
| 1954 | broxton_phy_uninit(dev_priv, DPIO_PHY1); | ||
| 1955 | broxton_phy_uninit(dev_priv, DPIO_PHY0); | ||
| 1956 | } | ||
| 1957 | |||
| 1958 | static bool __printf(6, 7) | 1930 | static bool __printf(6, 7) |
| 1959 | __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, | 1931 | __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, |
| 1960 | i915_reg_t reg, u32 mask, u32 expected, | 1932 | i915_reg_t reg, u32 mask, u32 expected, |
| @@ -1982,11 +1954,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, | |||
| 1982 | return false; | 1954 | return false; |
| 1983 | } | 1955 | } |
| 1984 | 1956 | ||
| 1985 | static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, | 1957 | bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, |
| 1986 | enum dpio_phy phy) | 1958 | enum dpio_phy phy) |
| 1987 | { | 1959 | { |
| 1988 | enum port port; | ||
| 1989 | u32 ports; | ||
| 1990 | uint32_t mask; | 1960 | uint32_t mask; |
| 1991 | bool ok; | 1961 | bool ok; |
| 1992 | 1962 | ||
| @@ -1994,27 +1964,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, | |||
| 1994 | __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ | 1964 | __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ |
| 1995 | ## __VA_ARGS__) | 1965 | ## __VA_ARGS__) |
| 1996 | 1966 | ||
| 1997 | /* We expect the PHY to be always enabled */ | 1967 | if (!bxt_ddi_phy_is_enabled(dev_priv, phy)) |
| 1998 | if (!broxton_phy_is_enabled(dev_priv, phy)) | ||
| 1999 | return false; | 1968 | return false; |
| 2000 | 1969 | ||
| 2001 | ok = true; | 1970 | ok = true; |
| 2002 | 1971 | ||
| 2003 | if (phy == DPIO_PHY0) | ||
| 2004 | ports = BIT(PORT_B) | BIT(PORT_C); | ||
| 2005 | else | ||
| 2006 | ports = BIT(PORT_A); | ||
| 2007 | |||
| 2008 | for_each_port_masked(port, ports) { | ||
| 2009 | int lane; | ||
| 2010 | |||
| 2011 | for (lane = 0; lane < 4; lane++) | ||
| 2012 | ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane), | ||
| 2013 | LATENCY_OPTIM, | ||
| 2014 | lane != 1 ? LATENCY_OPTIM : 0, | ||
| 2015 | "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane); | ||
| 2016 | } | ||
| 2017 | |||
| 2018 | /* PLL Rcomp code offset */ | 1972 | /* PLL Rcomp code offset */ |
| 2019 | ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), | 1973 | ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), |
| 2020 | IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, | 1974 | IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, |
| @@ -2058,11 +2012,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, | |||
| 2058 | #undef _CHK | 2012 | #undef _CHK |
| 2059 | } | 2013 | } |
| 2060 | 2014 | ||
| 2061 | void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv) | 2015 | static uint8_t |
| 2016 | bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, | ||
| 2017 | struct intel_crtc_state *pipe_config) | ||
| 2062 | { | 2018 | { |
| 2063 | if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) || | 2019 | switch (pipe_config->lane_count) { |
| 2064 | !broxton_phy_verify_state(dev_priv, DPIO_PHY1)) | 2020 | case 1: |
| 2065 | i915_report_error(dev_priv, "DDI PHY state mismatch\n"); | 2021 | return 0; |
| 2022 | case 2: | ||
| 2023 | return BIT(2) | BIT(0); | ||
| 2024 | case 4: | ||
| 2025 | return BIT(3) | BIT(2) | BIT(0); | ||
| 2026 | default: | ||
| 2027 | MISSING_CASE(pipe_config->lane_count); | ||
| 2028 | |||
| 2029 | return 0; | ||
| 2030 | } | ||
| 2031 | } | ||
| 2032 | |||
| 2033 | static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder) | ||
| 2034 | { | ||
| 2035 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | ||
| 2036 | struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); | ||
| 2037 | enum port port = dport->port; | ||
| 2038 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
| 2039 | int lane; | ||
| 2040 | |||
| 2041 | for (lane = 0; lane < 4; lane++) { | ||
| 2042 | u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); | ||
| 2043 | |||
| 2044 | /* | ||
| 2045 | * Note that on CHV this flag is called UPAR, but has | ||
| 2046 | * the same function. | ||
| 2047 | */ | ||
| 2048 | val &= ~LATENCY_OPTIM; | ||
| 2049 | if (intel_crtc->config->lane_lat_optim_mask & BIT(lane)) | ||
| 2050 | val |= LATENCY_OPTIM; | ||
| 2051 | |||
| 2052 | I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val); | ||
| 2053 | } | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | static uint8_t | ||
| 2057 | bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) | ||
| 2058 | { | ||
| 2059 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | ||
| 2060 | struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); | ||
| 2061 | enum port port = dport->port; | ||
| 2062 | int lane; | ||
| 2063 | uint8_t mask; | ||
| 2064 | |||
| 2065 | mask = 0; | ||
| 2066 | for (lane = 0; lane < 4; lane++) { | ||
| 2067 | u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); | ||
| 2068 | |||
| 2069 | if (val & LATENCY_OPTIM) | ||
| 2070 | mask |= BIT(lane); | ||
| 2071 | } | ||
| 2072 | |||
| 2073 | return mask; | ||
| 2066 | } | 2074 | } |
| 2067 | 2075 | ||
| 2068 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) | 2076 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) |
| @@ -2236,13 +2244,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
| 2236 | } | 2244 | } |
| 2237 | 2245 | ||
| 2238 | intel_ddi_clock_get(encoder, pipe_config); | 2246 | intel_ddi_clock_get(encoder, pipe_config); |
| 2247 | |||
| 2248 | if (IS_BROXTON(dev_priv)) | ||
| 2249 | pipe_config->lane_lat_optim_mask = | ||
| 2250 | bxt_ddi_phy_get_lane_lat_optim_mask(encoder); | ||
| 2239 | } | 2251 | } |
| 2240 | 2252 | ||
| 2241 | static bool intel_ddi_compute_config(struct intel_encoder *encoder, | 2253 | static bool intel_ddi_compute_config(struct intel_encoder *encoder, |
| 2242 | struct intel_crtc_state *pipe_config) | 2254 | struct intel_crtc_state *pipe_config) |
| 2243 | { | 2255 | { |
| 2256 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
| 2244 | int type = encoder->type; | 2257 | int type = encoder->type; |
| 2245 | int port = intel_ddi_get_encoder_port(encoder); | 2258 | int port = intel_ddi_get_encoder_port(encoder); |
| 2259 | int ret; | ||
| 2246 | 2260 | ||
| 2247 | WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); | 2261 | WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); |
| 2248 | 2262 | ||
| @@ -2250,9 +2264,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, | |||
| 2250 | pipe_config->cpu_transcoder = TRANSCODER_EDP; | 2264 | pipe_config->cpu_transcoder = TRANSCODER_EDP; |
| 2251 | 2265 | ||
| 2252 | if (type == INTEL_OUTPUT_HDMI) | 2266 | if (type == INTEL_OUTPUT_HDMI) |
| 2253 | return intel_hdmi_compute_config(encoder, pipe_config); | 2267 | ret = intel_hdmi_compute_config(encoder, pipe_config); |
| 2254 | else | 2268 | else |
| 2255 | return intel_dp_compute_config(encoder, pipe_config); | 2269 | ret = intel_dp_compute_config(encoder, pipe_config); |
| 2270 | |||
| 2271 | if (IS_BROXTON(dev_priv) && ret) | ||
| 2272 | pipe_config->lane_lat_optim_mask = | ||
| 2273 | bxt_ddi_phy_calc_lane_lat_optim_mask(encoder, | ||
| 2274 | pipe_config); | ||
| 2275 | |||
| 2276 | return ret; | ||
| 2277 | |||
| 2256 | } | 2278 | } |
| 2257 | 2279 | ||
| 2258 | static const struct drm_encoder_funcs intel_ddi_funcs = { | 2280 | static const struct drm_encoder_funcs intel_ddi_funcs = { |
| @@ -2351,6 +2373,8 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
| 2351 | 2373 | ||
| 2352 | intel_encoder->compute_config = intel_ddi_compute_config; | 2374 | intel_encoder->compute_config = intel_ddi_compute_config; |
| 2353 | intel_encoder->enable = intel_enable_ddi; | 2375 | intel_encoder->enable = intel_enable_ddi; |
| 2376 | if (IS_BROXTON(dev_priv)) | ||
| 2377 | intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; | ||
| 2354 | intel_encoder->pre_enable = intel_ddi_pre_enable; | 2378 | intel_encoder->pre_enable = intel_ddi_pre_enable; |
| 2355 | intel_encoder->disable = intel_disable_ddi; | 2379 | intel_encoder->disable = intel_disable_ddi; |
| 2356 | intel_encoder->post_disable = intel_ddi_post_disable; | 2380 | intel_encoder->post_disable = intel_ddi_post_disable; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b12085a8bfa6..0b2cd669ac05 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
| 37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
| 38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
| 39 | #include "i915_gem_dmabuf.h" | ||
| 39 | #include "intel_dsi.h" | 40 | #include "intel_dsi.h" |
| 40 | #include "i915_trace.h" | 41 | #include "i915_trace.h" |
| 41 | #include <drm/drm_atomic.h> | 42 | #include <drm/drm_atomic.h> |
| @@ -46,7 +47,6 @@ | |||
| 46 | #include <drm/drm_rect.h> | 47 | #include <drm/drm_rect.h> |
| 47 | #include <linux/dma_remapping.h> | 48 | #include <linux/dma_remapping.h> |
| 48 | #include <linux/reservation.h> | 49 | #include <linux/reservation.h> |
| 49 | #include <linux/dma-buf.h> | ||
| 50 | 50 | ||
| 51 | static bool is_mmio_work(struct intel_flip_work *work) | 51 | static bool is_mmio_work(struct intel_flip_work *work) |
| 52 | { | 52 | { |
| @@ -123,7 +123,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc); | |||
| 123 | static void intel_modeset_setup_hw_state(struct drm_device *dev); | 123 | static void intel_modeset_setup_hw_state(struct drm_device *dev); |
| 124 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); | 124 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); |
| 125 | static int ilk_max_pixel_rate(struct drm_atomic_state *state); | 125 | static int ilk_max_pixel_rate(struct drm_atomic_state *state); |
| 126 | static int broxton_calc_cdclk(int max_pixclk); | 126 | static int bxt_calc_cdclk(int max_pixclk); |
| 127 | 127 | ||
| 128 | struct intel_limit { | 128 | struct intel_limit { |
| 129 | struct { | 129 | struct { |
| @@ -4641,14 +4641,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) | |||
| 4641 | struct intel_plane_state *old_primary_state = | 4641 | struct intel_plane_state *old_primary_state = |
| 4642 | to_intel_plane_state(old_pri_state); | 4642 | to_intel_plane_state(old_pri_state); |
| 4643 | 4643 | ||
| 4644 | intel_fbc_pre_update(crtc); | 4644 | intel_fbc_pre_update(crtc, pipe_config, primary_state); |
| 4645 | 4645 | ||
| 4646 | if (old_primary_state->visible && | 4646 | if (old_primary_state->visible && |
| 4647 | (modeset || !primary_state->visible)) | 4647 | (modeset || !primary_state->visible)) |
| 4648 | intel_pre_disable_primary(&crtc->base); | 4648 | intel_pre_disable_primary(&crtc->base); |
| 4649 | } | 4649 | } |
| 4650 | 4650 | ||
| 4651 | if (pipe_config->disable_cxsr) { | 4651 | if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { |
| 4652 | crtc->wm.cxsr_allowed = false; | 4652 | crtc->wm.cxsr_allowed = false; |
| 4653 | 4653 | ||
| 4654 | /* | 4654 | /* |
| @@ -4841,6 +4841,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
| 4841 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | 4841 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, |
| 4842 | false); | 4842 | false); |
| 4843 | 4843 | ||
| 4844 | for_each_encoder_on_crtc(dev, crtc, encoder) | ||
| 4845 | if (encoder->pre_pll_enable) | ||
| 4846 | encoder->pre_pll_enable(encoder); | ||
| 4847 | |||
| 4844 | if (intel_crtc->config->shared_dpll) | 4848 | if (intel_crtc->config->shared_dpll) |
| 4845 | intel_enable_shared_dpll(intel_crtc); | 4849 | intel_enable_shared_dpll(intel_crtc); |
| 4846 | 4850 | ||
| @@ -5416,7 +5420,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) | |||
| 5416 | dev_priv->cdclk_pll.vco = vco; | 5420 | dev_priv->cdclk_pll.vco = vco; |
| 5417 | } | 5421 | } |
| 5418 | 5422 | ||
| 5419 | static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) | 5423 | static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) |
| 5420 | { | 5424 | { |
| 5421 | u32 val, divider; | 5425 | u32 val, divider; |
| 5422 | int vco, ret; | 5426 | int vco, ret; |
| @@ -5541,7 +5545,7 @@ sanitize: | |||
| 5541 | dev_priv->cdclk_pll.vco = -1; | 5545 | dev_priv->cdclk_pll.vco = -1; |
| 5542 | } | 5546 | } |
| 5543 | 5547 | ||
| 5544 | void broxton_init_cdclk(struct drm_i915_private *dev_priv) | 5548 | void bxt_init_cdclk(struct drm_i915_private *dev_priv) |
| 5545 | { | 5549 | { |
| 5546 | bxt_sanitize_cdclk(dev_priv); | 5550 | bxt_sanitize_cdclk(dev_priv); |
| 5547 | 5551 | ||
| @@ -5553,12 +5557,12 @@ void broxton_init_cdclk(struct drm_i915_private *dev_priv) | |||
| 5553 | * - The initial CDCLK needs to be read from VBT. | 5557 | * - The initial CDCLK needs to be read from VBT. |
| 5554 | * Need to make this change after VBT has changes for BXT. | 5558 | * Need to make this change after VBT has changes for BXT. |
| 5555 | */ | 5559 | */ |
| 5556 | broxton_set_cdclk(dev_priv, broxton_calc_cdclk(0)); | 5560 | bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); |
| 5557 | } | 5561 | } |
| 5558 | 5562 | ||
| 5559 | void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) | 5563 | void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) |
| 5560 | { | 5564 | { |
| 5561 | broxton_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); | 5565 | bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); |
| 5562 | } | 5566 | } |
| 5563 | 5567 | ||
| 5564 | static int skl_calc_cdclk(int max_pixclk, int vco) | 5568 | static int skl_calc_cdclk(int max_pixclk, int vco) |
| @@ -5984,7 +5988,7 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, | |||
| 5984 | return 200000; | 5988 | return 200000; |
| 5985 | } | 5989 | } |
| 5986 | 5990 | ||
| 5987 | static int broxton_calc_cdclk(int max_pixclk) | 5991 | static int bxt_calc_cdclk(int max_pixclk) |
| 5988 | { | 5992 | { |
| 5989 | if (max_pixclk > 576000) | 5993 | if (max_pixclk > 576000) |
| 5990 | return 624000; | 5994 | return 624000; |
| @@ -6044,17 +6048,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) | |||
| 6044 | return 0; | 6048 | return 0; |
| 6045 | } | 6049 | } |
| 6046 | 6050 | ||
| 6047 | static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) | 6051 | static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) |
| 6048 | { | 6052 | { |
| 6049 | int max_pixclk = ilk_max_pixel_rate(state); | 6053 | int max_pixclk = ilk_max_pixel_rate(state); |
| 6050 | struct intel_atomic_state *intel_state = | 6054 | struct intel_atomic_state *intel_state = |
| 6051 | to_intel_atomic_state(state); | 6055 | to_intel_atomic_state(state); |
| 6052 | 6056 | ||
| 6053 | intel_state->cdclk = intel_state->dev_cdclk = | 6057 | intel_state->cdclk = intel_state->dev_cdclk = |
| 6054 | broxton_calc_cdclk(max_pixclk); | 6058 | bxt_calc_cdclk(max_pixclk); |
| 6055 | 6059 | ||
| 6056 | if (!intel_state->active_crtcs) | 6060 | if (!intel_state->active_crtcs) |
| 6057 | intel_state->dev_cdclk = broxton_calc_cdclk(0); | 6061 | intel_state->dev_cdclk = bxt_calc_cdclk(0); |
| 6058 | 6062 | ||
| 6059 | return 0; | 6063 | return 0; |
| 6060 | } | 6064 | } |
| @@ -8430,12 +8434,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8430 | else | 8434 | else |
| 8431 | final |= DREF_NONSPREAD_SOURCE_ENABLE; | 8435 | final |= DREF_NONSPREAD_SOURCE_ENABLE; |
| 8432 | 8436 | ||
| 8437 | final &= ~DREF_SSC_SOURCE_MASK; | ||
| 8433 | final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 8438 | final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
| 8434 | 8439 | final &= ~DREF_SSC1_ENABLE; | |
| 8435 | if (!using_ssc_source) { | ||
| 8436 | final &= ~DREF_SSC_SOURCE_MASK; | ||
| 8437 | final &= ~DREF_SSC1_ENABLE; | ||
| 8438 | } | ||
| 8439 | 8440 | ||
| 8440 | if (has_panel) { | 8441 | if (has_panel) { |
| 8441 | final |= DREF_SSC_SOURCE_ENABLE; | 8442 | final |= DREF_SSC_SOURCE_ENABLE; |
| @@ -8450,9 +8451,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8450 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 8451 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
| 8451 | } else | 8452 | } else |
| 8452 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 8453 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
| 8453 | } else { | 8454 | } else if (using_ssc_source) { |
| 8454 | final |= DREF_SSC_SOURCE_DISABLE; | 8455 | final |= DREF_SSC_SOURCE_ENABLE; |
| 8455 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 8456 | final |= DREF_SSC1_ENABLE; |
| 8456 | } | 8457 | } |
| 8457 | 8458 | ||
| 8458 | if (final == val) | 8459 | if (final == val) |
| @@ -9673,14 +9674,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) | |||
| 9673 | } | 9674 | } |
| 9674 | } | 9675 | } |
| 9675 | 9676 | ||
| 9676 | static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) | 9677 | static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) |
| 9677 | { | 9678 | { |
| 9678 | struct drm_device *dev = old_state->dev; | 9679 | struct drm_device *dev = old_state->dev; |
| 9679 | struct intel_atomic_state *old_intel_state = | 9680 | struct intel_atomic_state *old_intel_state = |
| 9680 | to_intel_atomic_state(old_state); | 9681 | to_intel_atomic_state(old_state); |
| 9681 | unsigned int req_cdclk = old_intel_state->dev_cdclk; | 9682 | unsigned int req_cdclk = old_intel_state->dev_cdclk; |
| 9682 | 9683 | ||
| 9683 | broxton_set_cdclk(to_i915(dev), req_cdclk); | 9684 | bxt_set_cdclk(to_i915(dev), req_cdclk); |
| 9684 | } | 9685 | } |
| 9685 | 9686 | ||
| 9686 | /* compute the max rate for new configuration */ | 9687 | /* compute the max rate for new configuration */ |
| @@ -11428,6 +11429,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 11428 | static bool use_mmio_flip(struct intel_engine_cs *engine, | 11429 | static bool use_mmio_flip(struct intel_engine_cs *engine, |
| 11429 | struct drm_i915_gem_object *obj) | 11430 | struct drm_i915_gem_object *obj) |
| 11430 | { | 11431 | { |
| 11432 | struct reservation_object *resv; | ||
| 11433 | |||
| 11431 | /* | 11434 | /* |
| 11432 | * This is not being used for older platforms, because | 11435 | * This is not being used for older platforms, because |
| 11433 | * non-availability of flip done interrupt forces us to use | 11436 | * non-availability of flip done interrupt forces us to use |
| @@ -11448,12 +11451,12 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, | |||
| 11448 | return true; | 11451 | return true; |
| 11449 | else if (i915.enable_execlists) | 11452 | else if (i915.enable_execlists) |
| 11450 | return true; | 11453 | return true; |
| 11451 | else if (obj->base.dma_buf && | 11454 | |
| 11452 | !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, | 11455 | resv = i915_gem_object_get_dmabuf_resv(obj); |
| 11453 | false)) | 11456 | if (resv && !reservation_object_test_signaled_rcu(resv, false)) |
| 11454 | return true; | 11457 | return true; |
| 11455 | else | 11458 | |
| 11456 | return engine != i915_gem_request_get_engine(obj->last_write_req); | 11459 | return engine != i915_gem_request_get_engine(obj->last_write_req); |
| 11457 | } | 11460 | } |
| 11458 | 11461 | ||
| 11459 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, | 11462 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, |
| @@ -11542,6 +11545,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w) | |||
| 11542 | struct intel_framebuffer *intel_fb = | 11545 | struct intel_framebuffer *intel_fb = |
| 11543 | to_intel_framebuffer(crtc->base.primary->fb); | 11546 | to_intel_framebuffer(crtc->base.primary->fb); |
| 11544 | struct drm_i915_gem_object *obj = intel_fb->obj; | 11547 | struct drm_i915_gem_object *obj = intel_fb->obj; |
| 11548 | struct reservation_object *resv; | ||
| 11545 | 11549 | ||
| 11546 | if (work->flip_queued_req) | 11550 | if (work->flip_queued_req) |
| 11547 | WARN_ON(__i915_wait_request(work->flip_queued_req, | 11551 | WARN_ON(__i915_wait_request(work->flip_queued_req, |
| @@ -11549,9 +11553,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w) | |||
| 11549 | &dev_priv->rps.mmioflips)); | 11553 | &dev_priv->rps.mmioflips)); |
| 11550 | 11554 | ||
| 11551 | /* For framebuffer backed by dmabuf, wait for fence */ | 11555 | /* For framebuffer backed by dmabuf, wait for fence */ |
| 11552 | if (obj->base.dma_buf) | 11556 | resv = i915_gem_object_get_dmabuf_resv(obj); |
| 11553 | WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, | 11557 | if (resv) |
| 11554 | false, false, | 11558 | WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false, |
| 11555 | MAX_SCHEDULE_TIMEOUT) < 0); | 11559 | MAX_SCHEDULE_TIMEOUT) < 0); |
| 11556 | 11560 | ||
| 11557 | intel_pipe_update_start(crtc); | 11561 | intel_pipe_update_start(crtc); |
| @@ -11642,6 +11646,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) | |||
| 11642 | spin_unlock(&dev->event_lock); | 11646 | spin_unlock(&dev->event_lock); |
| 11643 | } | 11647 | } |
| 11644 | 11648 | ||
| 11649 | __maybe_unused | ||
| 11645 | static int intel_crtc_page_flip(struct drm_crtc *crtc, | 11650 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
| 11646 | struct drm_framebuffer *fb, | 11651 | struct drm_framebuffer *fb, |
| 11647 | struct drm_pending_vblank_event *event, | 11652 | struct drm_pending_vblank_event *event, |
| @@ -11727,7 +11732,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11727 | 11732 | ||
| 11728 | crtc->primary->fb = fb; | 11733 | crtc->primary->fb = fb; |
| 11729 | update_state_fb(crtc->primary); | 11734 | update_state_fb(crtc->primary); |
| 11730 | intel_fbc_pre_update(intel_crtc); | 11735 | |
| 11736 | intel_fbc_pre_update(intel_crtc, intel_crtc->config, | ||
| 11737 | to_intel_plane_state(primary->state)); | ||
| 11731 | 11738 | ||
| 11732 | work->pending_flip_obj = obj; | 11739 | work->pending_flip_obj = obj; |
| 11733 | 11740 | ||
| @@ -12816,6 +12823,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 12816 | 12823 | ||
| 12817 | PIPE_CONF_CHECK_I(has_dp_encoder); | 12824 | PIPE_CONF_CHECK_I(has_dp_encoder); |
| 12818 | PIPE_CONF_CHECK_I(lane_count); | 12825 | PIPE_CONF_CHECK_I(lane_count); |
| 12826 | PIPE_CONF_CHECK_X(lane_lat_optim_mask); | ||
| 12819 | 12827 | ||
| 12820 | if (INTEL_INFO(dev)->gen < 8) { | 12828 | if (INTEL_INFO(dev)->gen < 8) { |
| 12821 | PIPE_CONF_CHECK_M_N(dp_m_n); | 12829 | PIPE_CONF_CHECK_M_N(dp_m_n); |
| @@ -13567,11 +13575,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, | |||
| 13567 | struct drm_crtc *crtc; | 13575 | struct drm_crtc *crtc; |
| 13568 | int i, ret; | 13576 | int i, ret; |
| 13569 | 13577 | ||
| 13570 | if (nonblock) { | ||
| 13571 | DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n"); | ||
| 13572 | return -EINVAL; | ||
| 13573 | } | ||
| 13574 | |||
| 13575 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 13578 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 13576 | if (state->legacy_cursor_update) | 13579 | if (state->legacy_cursor_update) |
| 13577 | continue; | 13580 | continue; |
| @@ -13690,46 +13693,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) | |||
| 13690 | return false; | 13693 | return false; |
| 13691 | } | 13694 | } |
| 13692 | 13695 | ||
| 13693 | /** | 13696 | static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
| 13694 | * intel_atomic_commit - commit validated state object | ||
| 13695 | * @dev: DRM device | ||
| 13696 | * @state: the top-level driver state object | ||
| 13697 | * @nonblock: nonblocking commit | ||
| 13698 | * | ||
| 13699 | * This function commits a top-level state object that has been validated | ||
| 13700 | * with drm_atomic_helper_check(). | ||
| 13701 | * | ||
| 13702 | * FIXME: Atomic modeset support for i915 is not yet complete. At the moment | ||
| 13703 | * we can only handle plane-related operations and do not yet support | ||
| 13704 | * nonblocking commit. | ||
| 13705 | * | ||
| 13706 | * RETURNS | ||
| 13707 | * Zero for success or -errno. | ||
| 13708 | */ | ||
| 13709 | static int intel_atomic_commit(struct drm_device *dev, | ||
| 13710 | struct drm_atomic_state *state, | ||
| 13711 | bool nonblock) | ||
| 13712 | { | 13697 | { |
| 13698 | struct drm_device *dev = state->dev; | ||
| 13713 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 13699 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
| 13714 | struct drm_i915_private *dev_priv = dev->dev_private; | 13700 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 13715 | struct drm_crtc_state *old_crtc_state; | 13701 | struct drm_crtc_state *old_crtc_state; |
| 13716 | struct drm_crtc *crtc; | 13702 | struct drm_crtc *crtc; |
| 13717 | struct intel_crtc_state *intel_cstate; | 13703 | struct intel_crtc_state *intel_cstate; |
| 13718 | int ret = 0, i; | 13704 | struct drm_plane *plane; |
| 13705 | struct drm_plane_state *plane_state; | ||
| 13719 | bool hw_check = intel_state->modeset; | 13706 | bool hw_check = intel_state->modeset; |
| 13720 | unsigned long put_domains[I915_MAX_PIPES] = {}; | 13707 | unsigned long put_domains[I915_MAX_PIPES] = {}; |
| 13721 | unsigned crtc_vblank_mask = 0; | 13708 | unsigned crtc_vblank_mask = 0; |
| 13709 | int i, ret; | ||
| 13722 | 13710 | ||
| 13723 | ret = intel_atomic_prepare_commit(dev, state, nonblock); | 13711 | for_each_plane_in_state(state, plane, plane_state, i) { |
| 13724 | if (ret) { | 13712 | struct intel_plane_state *intel_plane_state = |
| 13725 | DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); | 13713 | to_intel_plane_state(plane_state); |
| 13726 | return ret; | 13714 | |
| 13715 | if (!intel_plane_state->wait_req) | ||
| 13716 | continue; | ||
| 13717 | |||
| 13718 | ret = __i915_wait_request(intel_plane_state->wait_req, | ||
| 13719 | true, NULL, NULL); | ||
| 13720 | /* EIO should be eaten, and we can't get interrupted in the | ||
| 13721 | * worker, and blocking commits have waited already. */ | ||
| 13722 | WARN_ON(ret); | ||
| 13727 | } | 13723 | } |
| 13728 | 13724 | ||
| 13729 | drm_atomic_helper_swap_state(state, true); | 13725 | drm_atomic_helper_wait_for_dependencies(state); |
| 13730 | dev_priv->wm.distrust_bios_wm = false; | ||
| 13731 | dev_priv->wm.skl_results = intel_state->wm_results; | ||
| 13732 | intel_shared_dpll_commit(state); | ||
| 13733 | 13726 | ||
| 13734 | if (intel_state->modeset) { | 13727 | if (intel_state->modeset) { |
| 13735 | memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, | 13728 | memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, |
| @@ -13797,30 +13790,44 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13797 | bool modeset = needs_modeset(crtc->state); | 13790 | bool modeset = needs_modeset(crtc->state); |
| 13798 | struct intel_crtc_state *pipe_config = | 13791 | struct intel_crtc_state *pipe_config = |
| 13799 | to_intel_crtc_state(crtc->state); | 13792 | to_intel_crtc_state(crtc->state); |
| 13800 | bool update_pipe = !modeset && pipe_config->update_pipe; | ||
| 13801 | 13793 | ||
| 13802 | if (modeset && crtc->state->active) { | 13794 | if (modeset && crtc->state->active) { |
| 13803 | update_scanline_offset(to_intel_crtc(crtc)); | 13795 | update_scanline_offset(to_intel_crtc(crtc)); |
| 13804 | dev_priv->display.crtc_enable(crtc); | 13796 | dev_priv->display.crtc_enable(crtc); |
| 13805 | } | 13797 | } |
| 13806 | 13798 | ||
| 13799 | /* Complete events for now disable pipes here. */ | ||
| 13800 | if (modeset && !crtc->state->active && crtc->state->event) { | ||
| 13801 | spin_lock_irq(&dev->event_lock); | ||
| 13802 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | ||
| 13803 | spin_unlock_irq(&dev->event_lock); | ||
| 13804 | |||
| 13805 | crtc->state->event = NULL; | ||
| 13806 | } | ||
| 13807 | |||
| 13807 | if (!modeset) | 13808 | if (!modeset) |
| 13808 | intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); | 13809 | intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); |
| 13809 | 13810 | ||
| 13810 | if (crtc->state->active && | 13811 | if (crtc->state->active && |
| 13811 | drm_atomic_get_existing_plane_state(state, crtc->primary)) | 13812 | drm_atomic_get_existing_plane_state(state, crtc->primary)) |
| 13812 | intel_fbc_enable(intel_crtc); | 13813 | intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); |
| 13813 | 13814 | ||
| 13814 | if (crtc->state->active && | 13815 | if (crtc->state->active) |
| 13815 | (crtc->state->planes_changed || update_pipe)) | ||
| 13816 | drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); | 13816 | drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); |
| 13817 | 13817 | ||
| 13818 | if (pipe_config->base.active && needs_vblank_wait(pipe_config)) | 13818 | if (pipe_config->base.active && needs_vblank_wait(pipe_config)) |
| 13819 | crtc_vblank_mask |= 1 << i; | 13819 | crtc_vblank_mask |= 1 << i; |
| 13820 | } | 13820 | } |
| 13821 | 13821 | ||
| 13822 | /* FIXME: add subpixel order */ | 13822 | /* FIXME: We should call drm_atomic_helper_commit_hw_done() here |
| 13823 | 13823 | * already, but still need the state for the delayed optimization. To | |
| 13824 | * fix this: | ||
| 13825 | * - wrap the optimization/post_plane_update stuff into a per-crtc work. | ||
| 13826 | * - schedule that vblank worker _before_ calling hw_done | ||
| 13827 | * - at the start of commit_tail, cancel it _synchrously | ||
| 13828 | * - switch over to the vblank wait helper in the core after that since | ||
| 13829 | * we don't need out special handling any more. | ||
| 13830 | */ | ||
| 13824 | if (!state->legacy_cursor_update) | 13831 | if (!state->legacy_cursor_update) |
| 13825 | intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); | 13832 | intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); |
| 13826 | 13833 | ||
| @@ -13847,6 +13854,8 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13847 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); | 13854 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); |
| 13848 | } | 13855 | } |
| 13849 | 13856 | ||
| 13857 | drm_atomic_helper_commit_hw_done(state); | ||
| 13858 | |||
| 13850 | if (intel_state->modeset) | 13859 | if (intel_state->modeset) |
| 13851 | intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); | 13860 | intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); |
| 13852 | 13861 | ||
| @@ -13854,6 +13863,8 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13854 | drm_atomic_helper_cleanup_planes(dev, state); | 13863 | drm_atomic_helper_cleanup_planes(dev, state); |
| 13855 | mutex_unlock(&dev->struct_mutex); | 13864 | mutex_unlock(&dev->struct_mutex); |
| 13856 | 13865 | ||
| 13866 | drm_atomic_helper_commit_cleanup_done(state); | ||
| 13867 | |||
| 13857 | drm_atomic_state_free(state); | 13868 | drm_atomic_state_free(state); |
| 13858 | 13869 | ||
| 13859 | /* As one of the primary mmio accessors, KMS has a high likelihood | 13870 | /* As one of the primary mmio accessors, KMS has a high likelihood |
| @@ -13868,6 +13879,86 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13868 | * can happen also when the device is completely off. | 13879 | * can happen also when the device is completely off. |
| 13869 | */ | 13880 | */ |
| 13870 | intel_uncore_arm_unclaimed_mmio_detection(dev_priv); | 13881 | intel_uncore_arm_unclaimed_mmio_detection(dev_priv); |
| 13882 | } | ||
| 13883 | |||
| 13884 | static void intel_atomic_commit_work(struct work_struct *work) | ||
| 13885 | { | ||
| 13886 | struct drm_atomic_state *state = container_of(work, | ||
| 13887 | struct drm_atomic_state, | ||
| 13888 | commit_work); | ||
| 13889 | intel_atomic_commit_tail(state); | ||
| 13890 | } | ||
| 13891 | |||
| 13892 | static void intel_atomic_track_fbs(struct drm_atomic_state *state) | ||
| 13893 | { | ||
| 13894 | struct drm_plane_state *old_plane_state; | ||
| 13895 | struct drm_plane *plane; | ||
| 13896 | struct drm_i915_gem_object *obj, *old_obj; | ||
| 13897 | struct intel_plane *intel_plane; | ||
| 13898 | int i; | ||
| 13899 | |||
| 13900 | mutex_lock(&state->dev->struct_mutex); | ||
| 13901 | for_each_plane_in_state(state, plane, old_plane_state, i) { | ||
| 13902 | obj = intel_fb_obj(plane->state->fb); | ||
| 13903 | old_obj = intel_fb_obj(old_plane_state->fb); | ||
| 13904 | intel_plane = to_intel_plane(plane); | ||
| 13905 | |||
| 13906 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); | ||
| 13907 | } | ||
| 13908 | mutex_unlock(&state->dev->struct_mutex); | ||
| 13909 | } | ||
| 13910 | |||
| 13911 | /** | ||
| 13912 | * intel_atomic_commit - commit validated state object | ||
| 13913 | * @dev: DRM device | ||
| 13914 | * @state: the top-level driver state object | ||
| 13915 | * @nonblock: nonblocking commit | ||
| 13916 | * | ||
| 13917 | * This function commits a top-level state object that has been validated | ||
| 13918 | * with drm_atomic_helper_check(). | ||
| 13919 | * | ||
| 13920 | * FIXME: Atomic modeset support for i915 is not yet complete. At the moment | ||
| 13921 | * nonblocking commits are only safe for pure plane updates. Everything else | ||
| 13922 | * should work though. | ||
| 13923 | * | ||
| 13924 | * RETURNS | ||
| 13925 | * Zero for success or -errno. | ||
| 13926 | */ | ||
| 13927 | static int intel_atomic_commit(struct drm_device *dev, | ||
| 13928 | struct drm_atomic_state *state, | ||
| 13929 | bool nonblock) | ||
| 13930 | { | ||
| 13931 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
| 13932 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 13933 | int ret = 0; | ||
| 13934 | |||
| 13935 | if (intel_state->modeset && nonblock) { | ||
| 13936 | DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); | ||
| 13937 | return -EINVAL; | ||
| 13938 | } | ||
| 13939 | |||
| 13940 | ret = drm_atomic_helper_setup_commit(state, nonblock); | ||
| 13941 | if (ret) | ||
| 13942 | return ret; | ||
| 13943 | |||
| 13944 | INIT_WORK(&state->commit_work, intel_atomic_commit_work); | ||
| 13945 | |||
| 13946 | ret = intel_atomic_prepare_commit(dev, state, nonblock); | ||
| 13947 | if (ret) { | ||
| 13948 | DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); | ||
| 13949 | return ret; | ||
| 13950 | } | ||
| 13951 | |||
| 13952 | drm_atomic_helper_swap_state(state, true); | ||
| 13953 | dev_priv->wm.distrust_bios_wm = false; | ||
| 13954 | dev_priv->wm.skl_results = intel_state->wm_results; | ||
| 13955 | intel_shared_dpll_commit(state); | ||
| 13956 | intel_atomic_track_fbs(state); | ||
| 13957 | |||
| 13958 | if (nonblock) | ||
| 13959 | queue_work(system_unbound_wq, &state->commit_work); | ||
| 13960 | else | ||
| 13961 | intel_atomic_commit_tail(state); | ||
| 13871 | 13962 | ||
| 13872 | return 0; | 13963 | return 0; |
| 13873 | } | 13964 | } |
| @@ -13917,7 +14008,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
| 13917 | .set_config = drm_atomic_helper_set_config, | 14008 | .set_config = drm_atomic_helper_set_config, |
| 13918 | .set_property = drm_atomic_helper_crtc_set_property, | 14009 | .set_property = drm_atomic_helper_crtc_set_property, |
| 13919 | .destroy = intel_crtc_destroy, | 14010 | .destroy = intel_crtc_destroy, |
| 13920 | .page_flip = intel_crtc_page_flip, | 14011 | .page_flip = drm_atomic_helper_page_flip, |
| 13921 | .atomic_duplicate_state = intel_crtc_duplicate_state, | 14012 | .atomic_duplicate_state = intel_crtc_duplicate_state, |
| 13922 | .atomic_destroy_state = intel_crtc_destroy_state, | 14013 | .atomic_destroy_state = intel_crtc_destroy_state, |
| 13923 | }; | 14014 | }; |
| @@ -13942,9 +14033,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13942 | { | 14033 | { |
| 13943 | struct drm_device *dev = plane->dev; | 14034 | struct drm_device *dev = plane->dev; |
| 13944 | struct drm_framebuffer *fb = new_state->fb; | 14035 | struct drm_framebuffer *fb = new_state->fb; |
| 13945 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
| 13946 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 14036 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 13947 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); | 14037 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); |
| 14038 | struct reservation_object *resv; | ||
| 13948 | int ret = 0; | 14039 | int ret = 0; |
| 13949 | 14040 | ||
| 13950 | if (!obj && !old_obj) | 14041 | if (!obj && !old_obj) |
| @@ -13974,12 +14065,15 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13974 | } | 14065 | } |
| 13975 | } | 14066 | } |
| 13976 | 14067 | ||
| 14068 | if (!obj) | ||
| 14069 | return 0; | ||
| 14070 | |||
| 13977 | /* For framebuffer backed by dmabuf, wait for fence */ | 14071 | /* For framebuffer backed by dmabuf, wait for fence */ |
| 13978 | if (obj && obj->base.dma_buf) { | 14072 | resv = i915_gem_object_get_dmabuf_resv(obj); |
| 14073 | if (resv) { | ||
| 13979 | long lret; | 14074 | long lret; |
| 13980 | 14075 | ||
| 13981 | lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, | 14076 | lret = reservation_object_wait_timeout_rcu(resv, false, true, |
| 13982 | false, true, | ||
| 13983 | MAX_SCHEDULE_TIMEOUT); | 14077 | MAX_SCHEDULE_TIMEOUT); |
| 13984 | if (lret == -ERESTARTSYS) | 14078 | if (lret == -ERESTARTSYS) |
| 13985 | return lret; | 14079 | return lret; |
| @@ -13987,9 +14081,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13987 | WARN(lret < 0, "waiting returns %li\n", lret); | 14081 | WARN(lret < 0, "waiting returns %li\n", lret); |
| 13988 | } | 14082 | } |
| 13989 | 14083 | ||
| 13990 | if (!obj) { | 14084 | if (plane->type == DRM_PLANE_TYPE_CURSOR && |
| 13991 | ret = 0; | ||
| 13992 | } else if (plane->type == DRM_PLANE_TYPE_CURSOR && | ||
| 13993 | INTEL_INFO(dev)->cursor_needs_physical) { | 14085 | INTEL_INFO(dev)->cursor_needs_physical) { |
| 13994 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 14086 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
| 13995 | ret = i915_gem_object_attach_phys(obj, align); | 14087 | ret = i915_gem_object_attach_phys(obj, align); |
| @@ -14000,15 +14092,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 14000 | } | 14092 | } |
| 14001 | 14093 | ||
| 14002 | if (ret == 0) { | 14094 | if (ret == 0) { |
| 14003 | if (obj) { | 14095 | struct intel_plane_state *plane_state = |
| 14004 | struct intel_plane_state *plane_state = | 14096 | to_intel_plane_state(new_state); |
| 14005 | to_intel_plane_state(new_state); | ||
| 14006 | |||
| 14007 | i915_gem_request_assign(&plane_state->wait_req, | ||
| 14008 | obj->last_write_req); | ||
| 14009 | } | ||
| 14010 | 14097 | ||
| 14011 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); | 14098 | i915_gem_request_assign(&plane_state->wait_req, |
| 14099 | obj->last_write_req); | ||
| 14012 | } | 14100 | } |
| 14013 | 14101 | ||
| 14014 | return ret; | 14102 | return ret; |
| @@ -14028,7 +14116,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, | |||
| 14028 | const struct drm_plane_state *old_state) | 14116 | const struct drm_plane_state *old_state) |
| 14029 | { | 14117 | { |
| 14030 | struct drm_device *dev = plane->dev; | 14118 | struct drm_device *dev = plane->dev; |
| 14031 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
| 14032 | struct intel_plane_state *old_intel_state; | 14119 | struct intel_plane_state *old_intel_state; |
| 14033 | struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); | 14120 | struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); |
| 14034 | struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); | 14121 | struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); |
| @@ -14042,11 +14129,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, | |||
| 14042 | !INTEL_INFO(dev)->cursor_needs_physical)) | 14129 | !INTEL_INFO(dev)->cursor_needs_physical)) |
| 14043 | intel_unpin_fb_obj(old_state->fb, old_state->rotation); | 14130 | intel_unpin_fb_obj(old_state->fb, old_state->rotation); |
| 14044 | 14131 | ||
| 14045 | /* prepare_fb aborted? */ | ||
| 14046 | if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || | ||
| 14047 | (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) | ||
| 14048 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); | ||
| 14049 | |||
| 14050 | i915_gem_request_assign(&old_intel_state->wait_req, NULL); | 14132 | i915_gem_request_assign(&old_intel_state->wait_req, NULL); |
| 14051 | } | 14133 | } |
| 14052 | 14134 | ||
| @@ -14704,7 +14786,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14704 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 14786 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
| 14705 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 14787 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
| 14706 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 14788 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
| 14707 | bool has_edp; | 14789 | bool has_edp, has_port; |
| 14708 | 14790 | ||
| 14709 | /* | 14791 | /* |
| 14710 | * The DP_DETECTED bit is the latched state of the DDC | 14792 | * The DP_DETECTED bit is the latched state of the DDC |
| @@ -14714,25 +14796,37 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14714 | * Thus we can't rely on the DP_DETECTED bit alone to detect | 14796 | * Thus we can't rely on the DP_DETECTED bit alone to detect |
| 14715 | * eDP ports. Consult the VBT as well as DP_DETECTED to | 14797 | * eDP ports. Consult the VBT as well as DP_DETECTED to |
| 14716 | * detect eDP ports. | 14798 | * detect eDP ports. |
| 14799 | * | ||
| 14800 | * Sadly the straps seem to be missing sometimes even for HDMI | ||
| 14801 | * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap | ||
| 14802 | * and VBT for the presence of the port. Additionally we can't | ||
| 14803 | * trust the port type the VBT declares as we've seen at least | ||
| 14804 | * HDMI ports that the VBT claim are DP or eDP. | ||
| 14717 | */ | 14805 | */ |
| 14718 | has_edp = intel_dp_is_edp(dev, PORT_B); | 14806 | has_edp = intel_dp_is_edp(dev, PORT_B); |
| 14719 | if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp) | 14807 | has_port = intel_bios_is_port_present(dev_priv, PORT_B); |
| 14808 | if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) | ||
| 14720 | has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); | 14809 | has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); |
| 14721 | if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp) | 14810 | if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) |
| 14722 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); | 14811 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); |
| 14723 | 14812 | ||
| 14724 | has_edp = intel_dp_is_edp(dev, PORT_C); | 14813 | has_edp = intel_dp_is_edp(dev, PORT_C); |
| 14725 | if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp) | 14814 | has_port = intel_bios_is_port_present(dev_priv, PORT_C); |
| 14815 | if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) | ||
| 14726 | has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); | 14816 | has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); |
| 14727 | if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp) | 14817 | if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) |
| 14728 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); | 14818 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); |
| 14729 | 14819 | ||
| 14730 | if (IS_CHERRYVIEW(dev)) { | 14820 | if (IS_CHERRYVIEW(dev)) { |
| 14731 | /* eDP not supported on port D, so don't check VBT */ | 14821 | /* |
| 14732 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED) | 14822 | * eDP not supported on port D, |
| 14733 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | 14823 | * so no need to worry about it |
| 14734 | if (I915_READ(CHV_DP_D) & DP_DETECTED) | 14824 | */ |
| 14825 | has_port = intel_bios_is_port_present(dev_priv, PORT_D); | ||
| 14826 | if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) | ||
| 14735 | intel_dp_init(dev, CHV_DP_D, PORT_D); | 14827 | intel_dp_init(dev, CHV_DP_D, PORT_D); |
| 14828 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) | ||
| 14829 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | ||
| 14736 | } | 14830 | } |
| 14737 | 14831 | ||
| 14738 | intel_dsi_init(dev); | 14832 | intel_dsi_init(dev); |
| @@ -15214,9 +15308,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) | |||
| 15214 | valleyview_modeset_calc_cdclk; | 15308 | valleyview_modeset_calc_cdclk; |
| 15215 | } else if (IS_BROXTON(dev_priv)) { | 15309 | } else if (IS_BROXTON(dev_priv)) { |
| 15216 | dev_priv->display.modeset_commit_cdclk = | 15310 | dev_priv->display.modeset_commit_cdclk = |
| 15217 | broxton_modeset_commit_cdclk; | 15311 | bxt_modeset_commit_cdclk; |
| 15218 | dev_priv->display.modeset_calc_cdclk = | 15312 | dev_priv->display.modeset_calc_cdclk = |
| 15219 | broxton_modeset_calc_cdclk; | 15313 | bxt_modeset_calc_cdclk; |
| 15220 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | 15314 | } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
| 15221 | dev_priv->display.modeset_commit_cdclk = | 15315 | dev_priv->display.modeset_commit_cdclk = |
| 15222 | skl_modeset_commit_cdclk; | 15316 | skl_modeset_commit_cdclk; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 65e60ff3b553..7d0e071fe355 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -571,6 +571,12 @@ struct intel_crtc_state { | |||
| 571 | 571 | ||
| 572 | uint8_t lane_count; | 572 | uint8_t lane_count; |
| 573 | 573 | ||
| 574 | /* | ||
| 575 | * Used by platforms having DP/HDMI PHY with programmable lane | ||
| 576 | * latency optimization. | ||
| 577 | */ | ||
| 578 | uint8_t lane_lat_optim_mask; | ||
| 579 | |||
| 574 | /* Panel fitter controls for gen2-gen4 + VLV */ | 580 | /* Panel fitter controls for gen2-gen4 + VLV */ |
| 575 | struct { | 581 | struct { |
| 576 | u32 control; | 582 | u32 control; |
| @@ -1252,11 +1258,14 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv); | |||
| 1252 | void intel_finish_reset(struct drm_i915_private *dev_priv); | 1258 | void intel_finish_reset(struct drm_i915_private *dev_priv); |
| 1253 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); | 1259 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); |
| 1254 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); | 1260 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); |
| 1255 | void broxton_init_cdclk(struct drm_i915_private *dev_priv); | 1261 | void bxt_init_cdclk(struct drm_i915_private *dev_priv); |
| 1256 | void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); | 1262 | void bxt_uninit_cdclk(struct drm_i915_private *dev_priv); |
| 1257 | void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); | 1263 | void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); |
| 1258 | void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); | 1264 | void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); |
| 1259 | void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); | 1265 | bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, |
| 1266 | enum dpio_phy phy); | ||
| 1267 | bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, | ||
| 1268 | enum dpio_phy phy); | ||
| 1260 | void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); | 1269 | void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); |
| 1261 | void bxt_enable_dc9(struct drm_i915_private *dev_priv); | 1270 | void bxt_enable_dc9(struct drm_i915_private *dev_priv); |
| 1262 | void bxt_disable_dc9(struct drm_i915_private *dev_priv); | 1271 | void bxt_disable_dc9(struct drm_i915_private *dev_priv); |
| @@ -1414,11 +1423,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) | |||
| 1414 | void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, | 1423 | void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, |
| 1415 | struct drm_atomic_state *state); | 1424 | struct drm_atomic_state *state); |
| 1416 | bool intel_fbc_is_active(struct drm_i915_private *dev_priv); | 1425 | bool intel_fbc_is_active(struct drm_i915_private *dev_priv); |
| 1417 | void intel_fbc_pre_update(struct intel_crtc *crtc); | 1426 | void intel_fbc_pre_update(struct intel_crtc *crtc, |
| 1427 | struct intel_crtc_state *crtc_state, | ||
| 1428 | struct intel_plane_state *plane_state); | ||
| 1418 | void intel_fbc_post_update(struct intel_crtc *crtc); | 1429 | void intel_fbc_post_update(struct intel_crtc *crtc); |
| 1419 | void intel_fbc_init(struct drm_i915_private *dev_priv); | 1430 | void intel_fbc_init(struct drm_i915_private *dev_priv); |
| 1420 | void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); | 1431 | void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); |
| 1421 | void intel_fbc_enable(struct intel_crtc *crtc); | 1432 | void intel_fbc_enable(struct intel_crtc *crtc, |
| 1433 | struct intel_crtc_state *crtc_state, | ||
| 1434 | struct intel_plane_state *plane_state); | ||
| 1422 | void intel_fbc_disable(struct intel_crtc *crtc); | 1435 | void intel_fbc_disable(struct intel_crtc *crtc); |
| 1423 | void intel_fbc_global_disable(struct drm_i915_private *dev_priv); | 1436 | void intel_fbc_global_disable(struct drm_i915_private *dev_priv); |
| 1424 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, | 1437 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index e9eda691e8be..b444d0e35a98 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -1172,6 +1172,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) | |||
| 1172 | if (intel_dsi->clock_stop) | 1172 | if (intel_dsi->clock_stop) |
| 1173 | tmp |= CLOCKSTOP; | 1173 | tmp |= CLOCKSTOP; |
| 1174 | 1174 | ||
| 1175 | if (IS_BROXTON(dev_priv)) { | ||
| 1176 | tmp |= BXT_DPHY_DEFEATURE_EN; | ||
| 1177 | if (!is_cmd_mode(intel_dsi)) | ||
| 1178 | tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; | ||
| 1179 | } | ||
| 1180 | |||
| 1175 | for_each_dsi_port(port, intel_dsi->ports) { | 1181 | for_each_dsi_port(port, intel_dsi->ports) { |
| 1176 | I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); | 1182 | I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); |
| 1177 | 1183 | ||
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 45ee07b888a0..ecabd59ffbaf 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
| @@ -481,10 +481,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) | |||
| 481 | intel_fbc_hw_deactivate(dev_priv); | 481 | intel_fbc_hw_deactivate(dev_priv); |
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | static bool multiple_pipes_ok(struct intel_crtc *crtc) | 484 | static bool multiple_pipes_ok(struct intel_crtc *crtc, |
| 485 | struct intel_plane_state *plane_state) | ||
| 485 | { | 486 | { |
| 486 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 487 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| 487 | struct drm_plane *primary = crtc->base.primary; | ||
| 488 | struct intel_fbc *fbc = &dev_priv->fbc; | 488 | struct intel_fbc *fbc = &dev_priv->fbc; |
| 489 | enum pipe pipe = crtc->pipe; | 489 | enum pipe pipe = crtc->pipe; |
| 490 | 490 | ||
| @@ -492,9 +492,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc) | |||
| 492 | if (!no_fbc_on_multiple_pipes(dev_priv)) | 492 | if (!no_fbc_on_multiple_pipes(dev_priv)) |
| 493 | return true; | 493 | return true; |
| 494 | 494 | ||
| 495 | WARN_ON(!drm_modeset_is_locked(&primary->mutex)); | 495 | if (plane_state->visible) |
| 496 | |||
| 497 | if (to_intel_plane_state(primary->state)->visible) | ||
| 498 | fbc->visible_pipes_mask |= (1 << pipe); | 496 | fbc->visible_pipes_mask |= (1 << pipe); |
| 499 | else | 497 | else |
| 500 | fbc->visible_pipes_mask &= ~(1 << pipe); | 498 | fbc->visible_pipes_mask &= ~(1 << pipe); |
| @@ -709,21 +707,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) | |||
| 709 | return effective_w <= max_w && effective_h <= max_h; | 707 | return effective_w <= max_w && effective_h <= max_h; |
| 710 | } | 708 | } |
| 711 | 709 | ||
| 712 | static void intel_fbc_update_state_cache(struct intel_crtc *crtc) | 710 | static void intel_fbc_update_state_cache(struct intel_crtc *crtc, |
| 711 | struct intel_crtc_state *crtc_state, | ||
| 712 | struct intel_plane_state *plane_state) | ||
| 713 | { | 713 | { |
| 714 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 714 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 715 | struct intel_fbc *fbc = &dev_priv->fbc; | 715 | struct intel_fbc *fbc = &dev_priv->fbc; |
| 716 | struct intel_fbc_state_cache *cache = &fbc->state_cache; | 716 | struct intel_fbc_state_cache *cache = &fbc->state_cache; |
| 717 | struct intel_crtc_state *crtc_state = | ||
| 718 | to_intel_crtc_state(crtc->base.state); | ||
| 719 | struct intel_plane_state *plane_state = | ||
| 720 | to_intel_plane_state(crtc->base.primary->state); | ||
| 721 | struct drm_framebuffer *fb = plane_state->base.fb; | 717 | struct drm_framebuffer *fb = plane_state->base.fb; |
| 722 | struct drm_i915_gem_object *obj; | 718 | struct drm_i915_gem_object *obj; |
| 723 | 719 | ||
| 724 | WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); | ||
| 725 | WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex)); | ||
| 726 | |||
| 727 | cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; | 720 | cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; |
| 728 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 721 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
| 729 | cache->crtc.hsw_bdw_pixel_rate = | 722 | cache->crtc.hsw_bdw_pixel_rate = |
| @@ -888,7 +881,9 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, | |||
| 888 | return memcmp(params1, params2, sizeof(*params1)) == 0; | 881 | return memcmp(params1, params2, sizeof(*params1)) == 0; |
| 889 | } | 882 | } |
| 890 | 883 | ||
| 891 | void intel_fbc_pre_update(struct intel_crtc *crtc) | 884 | void intel_fbc_pre_update(struct intel_crtc *crtc, |
| 885 | struct intel_crtc_state *crtc_state, | ||
| 886 | struct intel_plane_state *plane_state) | ||
| 892 | { | 887 | { |
| 893 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 888 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 894 | struct intel_fbc *fbc = &dev_priv->fbc; | 889 | struct intel_fbc *fbc = &dev_priv->fbc; |
| @@ -898,7 +893,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc) | |||
| 898 | 893 | ||
| 899 | mutex_lock(&fbc->lock); | 894 | mutex_lock(&fbc->lock); |
| 900 | 895 | ||
| 901 | if (!multiple_pipes_ok(crtc)) { | 896 | if (!multiple_pipes_ok(crtc, plane_state)) { |
| 902 | fbc->no_fbc_reason = "more than one pipe active"; | 897 | fbc->no_fbc_reason = "more than one pipe active"; |
| 903 | goto deactivate; | 898 | goto deactivate; |
| 904 | } | 899 | } |
| @@ -906,7 +901,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc) | |||
| 906 | if (!fbc->enabled || fbc->crtc != crtc) | 901 | if (!fbc->enabled || fbc->crtc != crtc) |
| 907 | goto unlock; | 902 | goto unlock; |
| 908 | 903 | ||
| 909 | intel_fbc_update_state_cache(crtc); | 904 | intel_fbc_update_state_cache(crtc, crtc_state, plane_state); |
| 910 | 905 | ||
| 911 | deactivate: | 906 | deactivate: |
| 912 | intel_fbc_deactivate(dev_priv); | 907 | intel_fbc_deactivate(dev_priv); |
| @@ -1090,7 +1085,9 @@ out: | |||
| 1090 | * intel_fbc_enable multiple times for the same pipe without an | 1085 | * intel_fbc_enable multiple times for the same pipe without an |
| 1091 | * intel_fbc_disable in the middle, as long as it is deactivated. | 1086 | * intel_fbc_disable in the middle, as long as it is deactivated. |
| 1092 | */ | 1087 | */ |
| 1093 | void intel_fbc_enable(struct intel_crtc *crtc) | 1088 | void intel_fbc_enable(struct intel_crtc *crtc, |
| 1089 | struct intel_crtc_state *crtc_state, | ||
| 1090 | struct intel_plane_state *plane_state) | ||
| 1094 | { | 1091 | { |
| 1095 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 1092 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 1096 | struct intel_fbc *fbc = &dev_priv->fbc; | 1093 | struct intel_fbc *fbc = &dev_priv->fbc; |
| @@ -1103,19 +1100,19 @@ void intel_fbc_enable(struct intel_crtc *crtc) | |||
| 1103 | if (fbc->enabled) { | 1100 | if (fbc->enabled) { |
| 1104 | WARN_ON(fbc->crtc == NULL); | 1101 | WARN_ON(fbc->crtc == NULL); |
| 1105 | if (fbc->crtc == crtc) { | 1102 | if (fbc->crtc == crtc) { |
| 1106 | WARN_ON(!crtc->config->enable_fbc); | 1103 | WARN_ON(!crtc_state->enable_fbc); |
| 1107 | WARN_ON(fbc->active); | 1104 | WARN_ON(fbc->active); |
| 1108 | } | 1105 | } |
| 1109 | goto out; | 1106 | goto out; |
| 1110 | } | 1107 | } |
| 1111 | 1108 | ||
| 1112 | if (!crtc->config->enable_fbc) | 1109 | if (!crtc_state->enable_fbc) |
| 1113 | goto out; | 1110 | goto out; |
| 1114 | 1111 | ||
| 1115 | WARN_ON(fbc->active); | 1112 | WARN_ON(fbc->active); |
| 1116 | WARN_ON(fbc->crtc != NULL); | 1113 | WARN_ON(fbc->crtc != NULL); |
| 1117 | 1114 | ||
| 1118 | intel_fbc_update_state_cache(crtc); | 1115 | intel_fbc_update_state_cache(crtc, crtc_state, plane_state); |
| 1119 | if (intel_fbc_alloc_cfb(crtc)) { | 1116 | if (intel_fbc_alloc_cfb(crtc)) { |
| 1120 | fbc->no_fbc_reason = "not enough stolen memory"; | 1117 | fbc->no_fbc_reason = "not enough stolen memory"; |
| 1121 | goto out; | 1118 | goto out; |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 4c725ad6fb54..4babefc51eb2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -552,8 +552,6 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
| 552 | drm_fb_helper_fini(&ifbdev->helper); | 552 | drm_fb_helper_fini(&ifbdev->helper); |
| 553 | 553 | ||
| 554 | if (ifbdev->fb) { | 554 | if (ifbdev->fb) { |
| 555 | drm_framebuffer_unregister_private(&ifbdev->fb->base); | ||
| 556 | |||
| 557 | mutex_lock(&dev->struct_mutex); | 555 | mutex_lock(&dev->struct_mutex); |
| 558 | intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); | 556 | intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); |
| 559 | mutex_unlock(&dev->struct_mutex); | 557 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 41601c71f529..4df80cc9a291 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h | |||
| @@ -156,11 +156,11 @@ extern int intel_guc_suspend(struct drm_device *dev); | |||
| 156 | extern int intel_guc_resume(struct drm_device *dev); | 156 | extern int intel_guc_resume(struct drm_device *dev); |
| 157 | 157 | ||
| 158 | /* i915_guc_submission.c */ | 158 | /* i915_guc_submission.c */ |
| 159 | int i915_guc_submission_init(struct drm_device *dev); | 159 | int i915_guc_submission_init(struct drm_i915_private *dev_priv); |
| 160 | int i915_guc_submission_enable(struct drm_device *dev); | 160 | int i915_guc_submission_enable(struct drm_i915_private *dev_priv); |
| 161 | int i915_guc_wq_check_space(struct drm_i915_gem_request *rq); | 161 | int i915_guc_wq_check_space(struct drm_i915_gem_request *rq); |
| 162 | int i915_guc_submit(struct drm_i915_gem_request *rq); | 162 | int i915_guc_submit(struct drm_i915_gem_request *rq); |
| 163 | void i915_guc_submission_disable(struct drm_device *dev); | 163 | void i915_guc_submission_disable(struct drm_i915_private *dev_priv); |
| 164 | void i915_guc_submission_fini(struct drm_device *dev); | 164 | void i915_guc_submission_fini(struct drm_i915_private *dev_priv); |
| 165 | 165 | ||
| 166 | #endif | 166 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index f2b88c7209cb..8fe96a2d989e 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c | |||
| @@ -425,9 +425,13 @@ int intel_guc_setup(struct drm_device *dev) | |||
| 425 | if (!i915.enable_guc_loading) { | 425 | if (!i915.enable_guc_loading) { |
| 426 | err = 0; | 426 | err = 0; |
| 427 | goto fail; | 427 | goto fail; |
| 428 | } else if (fw_path == NULL || *fw_path == '\0') { | 428 | } else if (fw_path == NULL) { |
| 429 | if (*fw_path == '\0') | 429 | /* Device is known to have no uCode (e.g. no GuC) */ |
| 430 | DRM_INFO("No GuC firmware known for this platform\n"); | 430 | err = -ENXIO; |
| 431 | goto fail; | ||
| 432 | } else if (*fw_path == '\0') { | ||
| 433 | /* Device has a GuC but we don't know what f/w to load? */ | ||
| 434 | DRM_INFO("No GuC firmware known for this platform\n"); | ||
| 431 | err = -ENODEV; | 435 | err = -ENODEV; |
| 432 | goto fail; | 436 | goto fail; |
| 433 | } | 437 | } |
| @@ -449,7 +453,7 @@ int intel_guc_setup(struct drm_device *dev) | |||
| 449 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), | 453 | intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), |
| 450 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); | 454 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); |
| 451 | 455 | ||
| 452 | err = i915_guc_submission_init(dev); | 456 | err = i915_guc_submission_init(dev_priv); |
| 453 | if (err) | 457 | if (err) |
| 454 | goto fail; | 458 | goto fail; |
| 455 | 459 | ||
| @@ -488,10 +492,7 @@ int intel_guc_setup(struct drm_device *dev) | |||
| 488 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); | 492 | intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); |
| 489 | 493 | ||
| 490 | if (i915.enable_guc_submission) { | 494 | if (i915.enable_guc_submission) { |
| 491 | /* The execbuf_client will be recreated. Release it first. */ | 495 | err = i915_guc_submission_enable(dev_priv); |
| 492 | i915_guc_submission_disable(dev); | ||
| 493 | |||
| 494 | err = i915_guc_submission_enable(dev); | ||
| 495 | if (err) | 496 | if (err) |
| 496 | goto fail; | 497 | goto fail; |
| 497 | direct_interrupts_to_guc(dev_priv); | 498 | direct_interrupts_to_guc(dev_priv); |
| @@ -504,8 +505,8 @@ fail: | |||
| 504 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; | 505 | guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; |
| 505 | 506 | ||
| 506 | direct_interrupts_to_host(dev_priv); | 507 | direct_interrupts_to_host(dev_priv); |
| 507 | i915_guc_submission_disable(dev); | 508 | i915_guc_submission_disable(dev_priv); |
| 508 | i915_guc_submission_fini(dev); | 509 | i915_guc_submission_fini(dev_priv); |
| 509 | 510 | ||
| 510 | /* | 511 | /* |
| 511 | * We've failed to load the firmware :( | 512 | * We've failed to load the firmware :( |
| @@ -524,18 +525,20 @@ fail: | |||
| 524 | ret = 0; | 525 | ret = 0; |
| 525 | } | 526 | } |
| 526 | 527 | ||
| 527 | if (err == 0) | 528 | if (err == 0 && !HAS_GUC_UCODE(dev)) |
| 529 | ; /* Don't mention the GuC! */ | ||
| 530 | else if (err == 0) | ||
| 528 | DRM_INFO("GuC firmware load skipped\n"); | 531 | DRM_INFO("GuC firmware load skipped\n"); |
| 529 | else if (ret == -EIO) | 532 | else if (ret != -EIO) |
| 530 | DRM_ERROR("GuC firmware load failed: %d\n", err); | ||
| 531 | else | ||
| 532 | DRM_INFO("GuC firmware load failed: %d\n", err); | 533 | DRM_INFO("GuC firmware load failed: %d\n", err); |
| 534 | else | ||
| 535 | DRM_ERROR("GuC firmware load failed: %d\n", err); | ||
| 533 | 536 | ||
| 534 | if (i915.enable_guc_submission) { | 537 | if (i915.enable_guc_submission) { |
| 535 | if (fw_path == NULL) | 538 | if (fw_path == NULL) |
| 536 | DRM_INFO("GuC submission without firmware not supported\n"); | 539 | DRM_INFO("GuC submission without firmware not supported\n"); |
| 537 | if (ret == 0) | 540 | if (ret == 0) |
| 538 | DRM_INFO("Falling back to execlist mode\n"); | 541 | DRM_INFO("Falling back from GuC submission to execlist mode\n"); |
| 539 | else | 542 | else |
| 540 | DRM_ERROR("GuC init failed: %d\n", ret); | 543 | DRM_ERROR("GuC init failed: %d\n", ret); |
| 541 | } | 544 | } |
| @@ -730,8 +733,8 @@ void intel_guc_fini(struct drm_device *dev) | |||
| 730 | 733 | ||
| 731 | mutex_lock(&dev->struct_mutex); | 734 | mutex_lock(&dev->struct_mutex); |
| 732 | direct_interrupts_to_host(dev_priv); | 735 | direct_interrupts_to_host(dev_priv); |
| 733 | i915_guc_submission_disable(dev); | 736 | i915_guc_submission_disable(dev_priv); |
| 734 | i915_guc_submission_fini(dev); | 737 | i915_guc_submission_fini(dev_priv); |
| 735 | 738 | ||
| 736 | if (guc_fw->guc_fw_obj) | 739 | if (guc_fw->guc_fw_obj) |
| 737 | drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); | 740 | drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); |
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c new file mode 100644 index 000000000000..9fa458ce40a6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt.c | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include "i915_drv.h" | ||
| 25 | #include "intel_gvt.h" | ||
| 26 | |||
| 27 | /** | ||
| 28 | * DOC: Intel GVT-g host support | ||
| 29 | * | ||
| 30 | * Intel GVT-g is a graphics virtualization technology which shares the | ||
| 31 | * GPU among multiple virtual machines on a time-sharing basis. Each | ||
| 32 | * virtual machine is presented a virtual GPU (vGPU), which has equivalent | ||
| 33 | * features as the underlying physical GPU (pGPU), so i915 driver can run | ||
| 34 | * seamlessly in a virtual machine. This file provides the englightments | ||
| 35 | * of GVT and the necessary components used by GVT in i915 driver. | ||
| 36 | */ | ||
| 37 | |||
| 38 | static bool is_supported_device(struct drm_i915_private *dev_priv) | ||
| 39 | { | ||
| 40 | if (IS_BROADWELL(dev_priv)) | ||
| 41 | return true; | ||
| 42 | return false; | ||
| 43 | } | ||
| 44 | |||
| 45 | /** | ||
| 46 | * intel_gvt_init - initialize GVT components | ||
| 47 | * @dev_priv: drm i915 private data | ||
| 48 | * | ||
| 49 | * This function is called at the initialization stage to create a GVT device. | ||
| 50 | * | ||
| 51 | * Returns: | ||
| 52 | * Zero on success, negative error code if failed. | ||
| 53 | * | ||
| 54 | */ | ||
| 55 | int intel_gvt_init(struct drm_i915_private *dev_priv) | ||
| 56 | { | ||
| 57 | int ret; | ||
| 58 | |||
| 59 | if (!i915.enable_gvt) { | ||
| 60 | DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); | ||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | if (!is_supported_device(dev_priv)) { | ||
| 65 | DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* | ||
| 70 | * We're not in host or fail to find a MPT module, disable GVT-g | ||
| 71 | */ | ||
| 72 | ret = intel_gvt_init_host(); | ||
| 73 | if (ret) { | ||
| 74 | DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n"); | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | ret = intel_gvt_init_device(dev_priv); | ||
| 79 | if (ret) { | ||
| 80 | DRM_DEBUG_DRIVER("Fail to init GVT device\n"); | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | |||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | /** | ||
| 88 | * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading | ||
| 89 | * @dev_priv: drm i915 private * | ||
| 90 | * | ||
| 91 | * This function is called at the i915 driver unloading stage, to shutdown | ||
| 92 | * GVT components and release the related resources. | ||
| 93 | */ | ||
| 94 | void intel_gvt_cleanup(struct drm_i915_private *dev_priv) | ||
| 95 | { | ||
| 96 | if (!intel_gvt_active(dev_priv)) | ||
| 97 | return; | ||
| 98 | |||
| 99 | intel_gvt_clean_device(dev_priv); | ||
| 100 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h new file mode 100644 index 000000000000..960211df74db --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt.h | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the next | ||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 13 | * Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 21 | * SOFTWARE. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _INTEL_GVT_H_ | ||
| 25 | #define _INTEL_GVT_H_ | ||
| 26 | |||
| 27 | #include "gvt/gvt.h" | ||
| 28 | |||
| 29 | #ifdef CONFIG_DRM_I915_GVT | ||
| 30 | int intel_gvt_init(struct drm_i915_private *dev_priv); | ||
| 31 | void intel_gvt_cleanup(struct drm_i915_private *dev_priv); | ||
| 32 | int intel_gvt_init_device(struct drm_i915_private *dev_priv); | ||
| 33 | void intel_gvt_clean_device(struct drm_i915_private *dev_priv); | ||
| 34 | int intel_gvt_init_host(void); | ||
| 35 | #else | ||
| 36 | static inline int intel_gvt_init(struct drm_i915_private *dev_priv) | ||
| 37 | { | ||
| 38 | return 0; | ||
| 39 | } | ||
| 40 | static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv) | ||
| 41 | { | ||
| 42 | } | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #endif /* _INTEL_GVT_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index d4fc3d436d52..fb21626ada64 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -1810,6 +1810,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
| 1810 | enum port port = intel_dig_port->port; | 1810 | enum port port = intel_dig_port->port; |
| 1811 | uint8_t alternate_ddc_pin; | 1811 | uint8_t alternate_ddc_pin; |
| 1812 | 1812 | ||
| 1813 | DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", | ||
| 1814 | port_name(port)); | ||
| 1815 | |||
| 1813 | if (WARN(intel_dig_port->max_lanes < 4, | 1816 | if (WARN(intel_dig_port->max_lanes < 4, |
| 1814 | "Not enough lanes (%d) for HDMI on port %c\n", | 1817 | "Not enough lanes (%d) for HDMI on port %c\n", |
| 1815 | intel_dig_port->max_lanes, port_name(port))) | 1818 | intel_dig_port->max_lanes, port_name(port))) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 5c191a1afaaf..debed011a958 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -208,16 +208,6 @@ | |||
| 208 | } while (0) | 208 | } while (0) |
| 209 | 209 | ||
| 210 | enum { | 210 | enum { |
| 211 | ADVANCED_CONTEXT = 0, | ||
| 212 | LEGACY_32B_CONTEXT, | ||
| 213 | ADVANCED_AD_CONTEXT, | ||
| 214 | LEGACY_64B_CONTEXT | ||
| 215 | }; | ||
| 216 | #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 | ||
| 217 | #define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ | ||
| 218 | LEGACY_64B_CONTEXT :\ | ||
| 219 | LEGACY_32B_CONTEXT) | ||
| 220 | enum { | ||
| 221 | FAULT_AND_HANG = 0, | 211 | FAULT_AND_HANG = 0, |
| 222 | FAULT_AND_HALT, /* Debug only */ | 212 | FAULT_AND_HALT, /* Debug only */ |
| 223 | FAULT_AND_STREAM, | 213 | FAULT_AND_STREAM, |
| @@ -238,7 +228,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, | |||
| 238 | 228 | ||
| 239 | /** | 229 | /** |
| 240 | * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists | 230 | * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists |
| 241 | * @dev: DRM device. | 231 | * @dev_priv: i915 device private |
| 242 | * @enable_execlists: value of i915.enable_execlists module parameter. | 232 | * @enable_execlists: value of i915.enable_execlists module parameter. |
| 243 | * | 233 | * |
| 244 | * Only certain platforms support Execlists (the prerequisites being | 234 | * Only certain platforms support Execlists (the prerequisites being |
| @@ -281,8 +271,6 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) | |||
| 281 | (engine->id == VCS || engine->id == VCS2); | 271 | (engine->id == VCS || engine->id == VCS2); |
| 282 | 272 | ||
| 283 | engine->ctx_desc_template = GEN8_CTX_VALID; | 273 | engine->ctx_desc_template = GEN8_CTX_VALID; |
| 284 | engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) << | ||
| 285 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | ||
| 286 | if (IS_GEN8(dev_priv)) | 274 | if (IS_GEN8(dev_priv)) |
| 287 | engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; | 275 | engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; |
| 288 | engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; | 276 | engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; |
| @@ -325,7 +313,8 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, | |||
| 325 | 313 | ||
| 326 | BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); | 314 | BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); |
| 327 | 315 | ||
| 328 | desc = engine->ctx_desc_template; /* bits 0-11 */ | 316 | desc = ctx->desc_template; /* bits 3-4 */ |
| 317 | desc |= engine->ctx_desc_template; /* bits 0-11 */ | ||
| 329 | desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; | 318 | desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; |
| 330 | /* bits 12-31 */ | 319 | /* bits 12-31 */ |
| 331 | desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ | 320 | desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ |
| @@ -415,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0, | |||
| 415 | spin_unlock_irq(&dev_priv->uncore.lock); | 404 | spin_unlock_irq(&dev_priv->uncore.lock); |
| 416 | } | 405 | } |
| 417 | 406 | ||
| 407 | static inline void execlists_context_status_change( | ||
| 408 | struct drm_i915_gem_request *rq, | ||
| 409 | unsigned long status) | ||
| 410 | { | ||
| 411 | /* | ||
| 412 | * Only used when GVT-g is enabled now. When GVT-g is disabled, | ||
| 413 | * The compiler should eliminate this function as dead-code. | ||
| 414 | */ | ||
| 415 | if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) | ||
| 416 | return; | ||
| 417 | |||
| 418 | atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); | ||
| 419 | } | ||
| 420 | |||
| 418 | static void execlists_context_unqueue(struct intel_engine_cs *engine) | 421 | static void execlists_context_unqueue(struct intel_engine_cs *engine) |
| 419 | { | 422 | { |
| 420 | struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; | 423 | struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; |
| @@ -441,6 +444,20 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) | |||
| 441 | i915_gem_request_unreference(req0); | 444 | i915_gem_request_unreference(req0); |
| 442 | req0 = cursor; | 445 | req0 = cursor; |
| 443 | } else { | 446 | } else { |
| 447 | if (IS_ENABLED(CONFIG_DRM_I915_GVT)) { | ||
| 448 | /* | ||
| 449 | * req0 (after merged) ctx requires single | ||
| 450 | * submission, stop picking | ||
| 451 | */ | ||
| 452 | if (req0->ctx->execlists_force_single_submission) | ||
| 453 | break; | ||
| 454 | /* | ||
| 455 | * req0 ctx doesn't require single submission, | ||
| 456 | * but next req ctx requires, stop picking | ||
| 457 | */ | ||
| 458 | if (cursor->ctx->execlists_force_single_submission) | ||
| 459 | break; | ||
| 460 | } | ||
| 444 | req1 = cursor; | 461 | req1 = cursor; |
| 445 | WARN_ON(req1->elsp_submitted); | 462 | WARN_ON(req1->elsp_submitted); |
| 446 | break; | 463 | break; |
| @@ -450,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) | |||
| 450 | if (unlikely(!req0)) | 467 | if (unlikely(!req0)) |
| 451 | return; | 468 | return; |
| 452 | 469 | ||
| 470 | execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN); | ||
| 471 | |||
| 472 | if (req1) | ||
| 473 | execlists_context_status_change(req1, | ||
| 474 | INTEL_CONTEXT_SCHEDULE_IN); | ||
| 475 | |||
| 453 | if (req0->elsp_submitted & engine->idle_lite_restore_wa) { | 476 | if (req0->elsp_submitted & engine->idle_lite_restore_wa) { |
| 454 | /* | 477 | /* |
| 455 | * WaIdleLiteRestore: make sure we never cause a lite restore | 478 | * WaIdleLiteRestore: make sure we never cause a lite restore |
| @@ -488,6 +511,8 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id) | |||
| 488 | if (--head_req->elsp_submitted > 0) | 511 | if (--head_req->elsp_submitted > 0) |
| 489 | return 0; | 512 | return 0; |
| 490 | 513 | ||
| 514 | execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT); | ||
| 515 | |||
| 491 | list_del(&head_req->execlist_link); | 516 | list_del(&head_req->execlist_link); |
| 492 | i915_gem_request_unreference(head_req); | 517 | i915_gem_request_unreference(head_req); |
| 493 | 518 | ||
| @@ -516,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, | |||
| 516 | 541 | ||
| 517 | /** | 542 | /** |
| 518 | * intel_lrc_irq_handler() - handle Context Switch interrupts | 543 | * intel_lrc_irq_handler() - handle Context Switch interrupts |
| 519 | * @engine: Engine Command Streamer to handle. | 544 | * @data: tasklet handler passed in unsigned long |
| 520 | * | 545 | * |
| 521 | * Check the unread Context Status Buffers and manage the submission of new | 546 | * Check the unread Context Status Buffers and manage the submission of new |
| 522 | * contexts to the ELSP accordingly. | 547 | * contexts to the ELSP accordingly. |
| @@ -786,15 +811,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) | |||
| 786 | 811 | ||
| 787 | /** | 812 | /** |
| 788 | * execlists_submission() - submit a batchbuffer for execution, Execlists style | 813 | * execlists_submission() - submit a batchbuffer for execution, Execlists style |
| 789 | * @dev: DRM device. | 814 | * @params: execbuffer call parameters. |
| 790 | * @file: DRM file. | ||
| 791 | * @ring: Engine Command Streamer to submit to. | ||
| 792 | * @ctx: Context to employ for this submission. | ||
| 793 | * @args: execbuffer call arguments. | 815 | * @args: execbuffer call arguments. |
| 794 | * @vmas: list of vmas. | 816 | * @vmas: list of vmas. |
| 795 | * @batch_obj: the batchbuffer to submit. | ||
| 796 | * @exec_start: batchbuffer start virtual address pointer. | ||
| 797 | * @dispatch_flags: translated execbuffer call flags. | ||
| 798 | * | 817 | * |
| 799 | * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts | 818 | * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts |
| 800 | * away the submission details of the execbuffer ioctl call. | 819 | * away the submission details of the execbuffer ioctl call. |
| @@ -1081,12 +1100,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, | |||
| 1081 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); | 1100 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); |
| 1082 | 1101 | ||
| 1083 | /* | 1102 | /* |
| 1084 | * WaDisableLSQCROPERFforOCL:skl | 1103 | * WaDisableLSQCROPERFforOCL:skl,kbl |
| 1085 | * This WA is implemented in skl_init_clock_gating() but since | 1104 | * This WA is implemented in skl_init_clock_gating() but since |
| 1086 | * this batch updates GEN8_L3SQCREG4 with default value we need to | 1105 | * this batch updates GEN8_L3SQCREG4 with default value we need to |
| 1087 | * set this bit here to retain the WA during flush. | 1106 | * set this bit here to retain the WA during flush. |
| 1088 | */ | 1107 | */ |
| 1089 | if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0)) | 1108 | if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) || |
| 1109 | IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0)) | ||
| 1090 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; | 1110 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; |
| 1091 | 1111 | ||
| 1092 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | | 1112 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | |
| @@ -1138,7 +1158,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, | |||
| 1138 | /** | 1158 | /** |
| 1139 | * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA | 1159 | * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA |
| 1140 | * | 1160 | * |
| 1141 | * @ring: only applicable for RCS | 1161 | * @engine: only applicable for RCS |
| 1142 | * @wa_ctx: structure representing wa_ctx | 1162 | * @wa_ctx: structure representing wa_ctx |
| 1143 | * offset: specifies start of the batch, should be cache-aligned. This is updated | 1163 | * offset: specifies start of the batch, should be cache-aligned. This is updated |
| 1144 | * with the offset value received as input. | 1164 | * with the offset value received as input. |
| @@ -1212,7 +1232,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, | |||
| 1212 | /** | 1232 | /** |
| 1213 | * gen8_init_perctx_bb() - initialize per ctx batch with WA | 1233 | * gen8_init_perctx_bb() - initialize per ctx batch with WA |
| 1214 | * | 1234 | * |
| 1215 | * @ring: only applicable for RCS | 1235 | * @engine: only applicable for RCS |
| 1216 | * @wa_ctx: structure representing wa_ctx | 1236 | * @wa_ctx: structure representing wa_ctx |
| 1217 | * offset: specifies start of the batch, should be cache-aligned. | 1237 | * offset: specifies start of the batch, should be cache-aligned. |
| 1218 | * size: size of the batch in DWORDS but HW expects in terms of cachelines | 1238 | * size: size of the batch in DWORDS but HW expects in terms of cachelines |
| @@ -1260,6 +1280,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, | |||
| 1260 | return ret; | 1280 | return ret; |
| 1261 | index = ret; | 1281 | index = ret; |
| 1262 | 1282 | ||
| 1283 | /* WaClearSlmSpaceAtContextSwitch:kbl */ | ||
| 1284 | /* Actual scratch location is at 128 bytes offset */ | ||
| 1285 | if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) { | ||
| 1286 | uint32_t scratch_addr | ||
| 1287 | = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; | ||
| 1288 | |||
| 1289 | wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); | ||
| 1290 | wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | | ||
| 1291 | PIPE_CONTROL_GLOBAL_GTT_IVB | | ||
| 1292 | PIPE_CONTROL_CS_STALL | | ||
| 1293 | PIPE_CONTROL_QW_WRITE)); | ||
| 1294 | wa_ctx_emit(batch, index, scratch_addr); | ||
| 1295 | wa_ctx_emit(batch, index, 0); | ||
| 1296 | wa_ctx_emit(batch, index, 0); | ||
| 1297 | wa_ctx_emit(batch, index, 0); | ||
| 1298 | } | ||
| 1263 | /* Pad to end of cacheline */ | 1299 | /* Pad to end of cacheline */ |
| 1264 | while (index % CACHELINE_DWORDS) | 1300 | while (index % CACHELINE_DWORDS) |
| 1265 | wa_ctx_emit(batch, index, MI_NOOP); | 1301 | wa_ctx_emit(batch, index, MI_NOOP); |
| @@ -1657,9 +1693,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
| 1657 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1693 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
| 1658 | struct intel_engine_cs *engine = ringbuf->engine; | 1694 | struct intel_engine_cs *engine = ringbuf->engine; |
| 1659 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 1695 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
| 1660 | bool vf_flush_wa = false; | 1696 | bool vf_flush_wa = false, dc_flush_wa = false; |
| 1661 | u32 flags = 0; | 1697 | u32 flags = 0; |
| 1662 | int ret; | 1698 | int ret; |
| 1699 | int len; | ||
| 1663 | 1700 | ||
| 1664 | flags |= PIPE_CONTROL_CS_STALL; | 1701 | flags |= PIPE_CONTROL_CS_STALL; |
| 1665 | 1702 | ||
| @@ -1686,9 +1723,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
| 1686 | */ | 1723 | */ |
| 1687 | if (IS_GEN9(request->i915)) | 1724 | if (IS_GEN9(request->i915)) |
| 1688 | vf_flush_wa = true; | 1725 | vf_flush_wa = true; |
| 1726 | |||
| 1727 | /* WaForGAMHang:kbl */ | ||
| 1728 | if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) | ||
| 1729 | dc_flush_wa = true; | ||
| 1689 | } | 1730 | } |
| 1690 | 1731 | ||
| 1691 | ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); | 1732 | len = 6; |
| 1733 | |||
| 1734 | if (vf_flush_wa) | ||
| 1735 | len += 6; | ||
| 1736 | |||
| 1737 | if (dc_flush_wa) | ||
| 1738 | len += 12; | ||
| 1739 | |||
| 1740 | ret = intel_ring_begin(request, len); | ||
| 1692 | if (ret) | 1741 | if (ret) |
| 1693 | return ret; | 1742 | return ret; |
| 1694 | 1743 | ||
| @@ -1701,12 +1750,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
| 1701 | intel_logical_ring_emit(ringbuf, 0); | 1750 | intel_logical_ring_emit(ringbuf, 0); |
| 1702 | } | 1751 | } |
| 1703 | 1752 | ||
| 1753 | if (dc_flush_wa) { | ||
| 1754 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); | ||
| 1755 | intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE); | ||
| 1756 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1757 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1758 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1759 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1760 | } | ||
| 1761 | |||
| 1704 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); | 1762 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); |
| 1705 | intel_logical_ring_emit(ringbuf, flags); | 1763 | intel_logical_ring_emit(ringbuf, flags); |
| 1706 | intel_logical_ring_emit(ringbuf, scratch_addr); | 1764 | intel_logical_ring_emit(ringbuf, scratch_addr); |
| 1707 | intel_logical_ring_emit(ringbuf, 0); | 1765 | intel_logical_ring_emit(ringbuf, 0); |
| 1708 | intel_logical_ring_emit(ringbuf, 0); | 1766 | intel_logical_ring_emit(ringbuf, 0); |
| 1709 | intel_logical_ring_emit(ringbuf, 0); | 1767 | intel_logical_ring_emit(ringbuf, 0); |
| 1768 | |||
| 1769 | if (dc_flush_wa) { | ||
| 1770 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); | ||
| 1771 | intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL); | ||
| 1772 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1773 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1774 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1775 | intel_logical_ring_emit(ringbuf, 0); | ||
| 1776 | } | ||
| 1777 | |||
| 1710 | intel_logical_ring_advance(ringbuf); | 1778 | intel_logical_ring_advance(ringbuf); |
| 1711 | 1779 | ||
| 1712 | return 0; | 1780 | return 0; |
| @@ -1860,7 +1928,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req) | |||
| 1860 | /** | 1928 | /** |
| 1861 | * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer | 1929 | * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer |
| 1862 | * | 1930 | * |
| 1863 | * @ring: Engine Command Streamer. | 1931 | * @engine: Engine Command Streamer. |
| 1864 | * | 1932 | * |
| 1865 | */ | 1933 | */ |
| 1866 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine) | 1934 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine) |
| @@ -2413,7 +2481,7 @@ populate_lr_context(struct i915_gem_context *ctx, | |||
| 2413 | 2481 | ||
| 2414 | /** | 2482 | /** |
| 2415 | * intel_lr_context_size() - return the size of the context for an engine | 2483 | * intel_lr_context_size() - return the size of the context for an engine |
| 2416 | * @ring: which engine to find the context size for | 2484 | * @engine: which engine to find the context size for |
| 2417 | * | 2485 | * |
| 2418 | * Each engine may require a different amount of space for a context image, | 2486 | * Each engine may require a different amount of space for a context image, |
| 2419 | * so when allocating (or copying) an image, this function can be used to | 2487 | * so when allocating (or copying) an image, this function can be used to |
| @@ -2484,7 +2552,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | |||
| 2484 | return PTR_ERR(ctx_obj); | 2552 | return PTR_ERR(ctx_obj); |
| 2485 | } | 2553 | } |
| 2486 | 2554 | ||
| 2487 | ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); | 2555 | ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size); |
| 2488 | if (IS_ERR(ringbuf)) { | 2556 | if (IS_ERR(ringbuf)) { |
| 2489 | ret = PTR_ERR(ringbuf); | 2557 | ret = PTR_ERR(ringbuf); |
| 2490 | goto error_deref_obj; | 2558 | goto error_deref_obj; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index a8db42a9c50f..2b8255c19dcc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
| @@ -57,6 +57,11 @@ | |||
| 57 | #define GEN8_CSB_READ_PTR(csb_status) \ | 57 | #define GEN8_CSB_READ_PTR(csb_status) \ |
| 58 | (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) | 58 | (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) |
| 59 | 59 | ||
| 60 | enum { | ||
| 61 | INTEL_CONTEXT_SCHEDULE_IN = 0, | ||
| 62 | INTEL_CONTEXT_SCHEDULE_OUT, | ||
| 63 | }; | ||
| 64 | |||
| 60 | /* Logical Rings */ | 65 | /* Logical Rings */ |
| 61 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); | 66 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
| 62 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); | 67 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); |
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index b765c75f3fcd..3c1482b8f2f4 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
| @@ -156,6 +156,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, | |||
| 156 | "Platform that should have a MOCS table does not.\n"); | 156 | "Platform that should have a MOCS table does not.\n"); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | /* WaDisableSkipCaching:skl,bxt,kbl */ | ||
| 160 | if (IS_GEN9(dev_priv)) { | ||
| 161 | int i; | ||
| 162 | |||
| 163 | for (i = 0; i < table->size; i++) | ||
| 164 | if (WARN_ON(table->table[i].l3cc_value & | ||
| 165 | (L3_ESC(1) | L3_SCC(0x7)))) | ||
| 166 | return false; | ||
| 167 | } | ||
| 168 | |||
| 159 | return result; | 169 | return result; |
| 160 | } | 170 | } |
| 161 | 171 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 08274591db7e..658a75659657 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -55,14 +55,38 @@ | |||
| 55 | #define INTEL_RC6p_ENABLE (1<<1) | 55 | #define INTEL_RC6p_ENABLE (1<<1) |
| 56 | #define INTEL_RC6pp_ENABLE (1<<2) | 56 | #define INTEL_RC6pp_ENABLE (1<<2) |
| 57 | 57 | ||
| 58 | static void bxt_init_clock_gating(struct drm_device *dev) | 58 | static void gen9_init_clock_gating(struct drm_device *dev) |
| 59 | { | 59 | { |
| 60 | struct drm_i915_private *dev_priv = dev->dev_private; | 60 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 61 | 61 | ||
| 62 | /* See Bspec note for PSR2_CTL bit 31, Wa#828:bxt */ | 62 | /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ |
| 63 | I915_WRITE(CHICKEN_PAR1_1, | 63 | I915_WRITE(CHICKEN_PAR1_1, |
| 64 | I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); | 64 | I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); |
| 65 | 65 | ||
| 66 | I915_WRITE(GEN8_CONFIG0, | ||
| 67 | I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); | ||
| 68 | |||
| 69 | /* WaEnableChickenDCPR:skl,bxt,kbl */ | ||
| 70 | I915_WRITE(GEN8_CHICKEN_DCPR_1, | ||
| 71 | I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); | ||
| 72 | |||
| 73 | /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */ | ||
| 74 | /* WaFbcWakeMemOn:skl,bxt,kbl */ | ||
| 75 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | | ||
| 76 | DISP_FBC_WM_DIS | | ||
| 77 | DISP_FBC_MEMORY_WAKE); | ||
| 78 | |||
| 79 | /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */ | ||
| 80 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | | ||
| 81 | ILK_DPFC_DISABLE_DUMMY0); | ||
| 82 | } | ||
| 83 | |||
| 84 | static void bxt_init_clock_gating(struct drm_device *dev) | ||
| 85 | { | ||
| 86 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 87 | |||
| 88 | gen9_init_clock_gating(dev); | ||
| 89 | |||
| 66 | /* WaDisableSDEUnitClockGating:bxt */ | 90 | /* WaDisableSDEUnitClockGating:bxt */ |
| 67 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | 91 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | |
| 68 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | 92 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); |
| @@ -6963,13 +6987,40 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, | |||
| 6963 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | 6987 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
| 6964 | } | 6988 | } |
| 6965 | 6989 | ||
| 6990 | static void kabylake_init_clock_gating(struct drm_device *dev) | ||
| 6991 | { | ||
| 6992 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 6993 | |||
| 6994 | gen9_init_clock_gating(dev); | ||
| 6995 | |||
| 6996 | /* WaDisableSDEUnitClockGating:kbl */ | ||
| 6997 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | ||
| 6998 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | ||
| 6999 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | ||
| 7000 | |||
| 7001 | /* WaDisableGamClockGating:kbl */ | ||
| 7002 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | ||
| 7003 | I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | | ||
| 7004 | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); | ||
| 7005 | |||
| 7006 | /* WaFbcNukeOnHostModify:kbl */ | ||
| 7007 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | | ||
| 7008 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); | ||
| 7009 | } | ||
| 7010 | |||
| 6966 | static void skylake_init_clock_gating(struct drm_device *dev) | 7011 | static void skylake_init_clock_gating(struct drm_device *dev) |
| 6967 | { | 7012 | { |
| 6968 | struct drm_i915_private *dev_priv = dev->dev_private; | 7013 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 6969 | 7014 | ||
| 6970 | /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,kbl */ | 7015 | gen9_init_clock_gating(dev); |
| 6971 | I915_WRITE(CHICKEN_PAR1_1, | 7016 | |
| 6972 | I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); | 7017 | /* WAC6entrylatency:skl */ |
| 7018 | I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | | ||
| 7019 | FBC_LLC_FULLY_OPEN); | ||
| 7020 | |||
| 7021 | /* WaFbcNukeOnHostModify:skl */ | ||
| 7022 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | | ||
| 7023 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); | ||
| 6973 | } | 7024 | } |
| 6974 | 7025 | ||
| 6975 | static void broadwell_init_clock_gating(struct drm_device *dev) | 7026 | static void broadwell_init_clock_gating(struct drm_device *dev) |
| @@ -7016,6 +7067,10 @@ static void broadwell_init_clock_gating(struct drm_device *dev) | |||
| 7016 | */ | 7067 | */ |
| 7017 | I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); | 7068 | I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); |
| 7018 | 7069 | ||
| 7070 | /* WaKVMNotificationOnConfigChange:bdw */ | ||
| 7071 | I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) | ||
| 7072 | | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); | ||
| 7073 | |||
| 7019 | lpt_init_clock_gating(dev); | 7074 | lpt_init_clock_gating(dev); |
| 7020 | } | 7075 | } |
| 7021 | 7076 | ||
| @@ -7433,7 +7488,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) | |||
| 7433 | if (IS_SKYLAKE(dev_priv)) | 7488 | if (IS_SKYLAKE(dev_priv)) |
| 7434 | dev_priv->display.init_clock_gating = skylake_init_clock_gating; | 7489 | dev_priv->display.init_clock_gating = skylake_init_clock_gating; |
| 7435 | else if (IS_KABYLAKE(dev_priv)) | 7490 | else if (IS_KABYLAKE(dev_priv)) |
| 7436 | dev_priv->display.init_clock_gating = skylake_init_clock_gating; | 7491 | dev_priv->display.init_clock_gating = kabylake_init_clock_gating; |
| 7437 | else if (IS_BROXTON(dev_priv)) | 7492 | else if (IS_BROXTON(dev_priv)) |
| 7438 | dev_priv->display.init_clock_gating = bxt_init_clock_gating; | 7493 | dev_priv->display.init_clock_gating = bxt_init_clock_gating; |
| 7439 | else if (IS_BROADWELL(dev_priv)) | 7494 | else if (IS_BROADWELL(dev_priv)) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8d35a3978f9b..fedd27049814 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -908,24 +908,26 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) | |||
| 908 | static int gen9_init_workarounds(struct intel_engine_cs *engine) | 908 | static int gen9_init_workarounds(struct intel_engine_cs *engine) |
| 909 | { | 909 | { |
| 910 | struct drm_i915_private *dev_priv = engine->i915; | 910 | struct drm_i915_private *dev_priv = engine->i915; |
| 911 | uint32_t tmp; | ||
| 912 | int ret; | 911 | int ret; |
| 913 | 912 | ||
| 914 | /* WaEnableLbsSlaRetryTimerDecrement:skl */ | 913 | /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ |
| 914 | I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); | ||
| 915 | |||
| 916 | /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ | ||
| 915 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | 917 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | |
| 916 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | 918 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); |
| 917 | 919 | ||
| 918 | /* WaDisableKillLogic:bxt,skl */ | 920 | /* WaDisableKillLogic:bxt,skl,kbl */ |
| 919 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | 921 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | |
| 920 | ECOCHK_DIS_TLB); | 922 | ECOCHK_DIS_TLB); |
| 921 | 923 | ||
| 922 | /* WaClearFlowControlGpgpuContextSave:skl,bxt */ | 924 | /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ |
| 923 | /* WaDisablePartialInstShootdown:skl,bxt */ | 925 | /* WaDisablePartialInstShootdown:skl,bxt,kbl */ |
| 924 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 926 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
| 925 | FLOW_CONTROL_ENABLE | | 927 | FLOW_CONTROL_ENABLE | |
| 926 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); | 928 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); |
| 927 | 929 | ||
| 928 | /* Syncing dependencies between camera and graphics:skl,bxt */ | 930 | /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ |
| 929 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 931 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, |
| 930 | GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); | 932 | GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); |
| 931 | 933 | ||
| @@ -947,18 +949,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
| 947 | */ | 949 | */ |
| 948 | } | 950 | } |
| 949 | 951 | ||
| 950 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ | 952 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */ |
| 951 | /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */ | 953 | /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ |
| 952 | WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, | 954 | WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, |
| 953 | GEN9_ENABLE_YV12_BUGFIX | | 955 | GEN9_ENABLE_YV12_BUGFIX | |
| 954 | GEN9_ENABLE_GPGPU_PREEMPTION); | 956 | GEN9_ENABLE_GPGPU_PREEMPTION); |
| 955 | 957 | ||
| 956 | /* Wa4x4STCOptimizationDisable:skl,bxt */ | 958 | /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ |
| 957 | /* WaDisablePartialResolveInVc:skl,bxt */ | 959 | /* WaDisablePartialResolveInVc:skl,bxt,kbl */ |
| 958 | WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | | 960 | WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | |
| 959 | GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); | 961 | GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); |
| 960 | 962 | ||
| 961 | /* WaCcsTlbPrefetchDisable:skl,bxt */ | 963 | /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ |
| 962 | WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, | 964 | WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, |
| 963 | GEN9_CCS_TLB_PREFETCH_ENABLE); | 965 | GEN9_CCS_TLB_PREFETCH_ENABLE); |
| 964 | 966 | ||
| @@ -968,31 +970,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
| 968 | WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, | 970 | WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, |
| 969 | PIXEL_MASK_CAMMING_DISABLE); | 971 | PIXEL_MASK_CAMMING_DISABLE); |
| 970 | 972 | ||
| 971 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ | 973 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ |
| 972 | tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; | 974 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
| 973 | if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) || | 975 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | |
| 974 | IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) | 976 | HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); |
| 975 | tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; | 977 | |
| 976 | WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); | 978 | /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are |
| 979 | * both tied to WaForceContextSaveRestoreNonCoherent | ||
| 980 | * in some hsds for skl. We keep the tie for all gen9. The | ||
| 981 | * documentation is a bit hazy and so we want to get common behaviour, | ||
| 982 | * even though there is no clear evidence we would need both on kbl/bxt. | ||
| 983 | * This area has been source of system hangs so we play it safe | ||
| 984 | * and mimic the skl regardless of what bspec says. | ||
| 985 | * | ||
| 986 | * Use Force Non-Coherent whenever executing a 3D context. This | ||
| 987 | * is a workaround for a possible hang in the unlikely event | ||
| 988 | * a TLB invalidation occurs during a PSD flush. | ||
| 989 | */ | ||
| 977 | 990 | ||
| 978 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ | 991 | /* WaForceEnableNonCoherent:skl,bxt,kbl */ |
| 979 | if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) | 992 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
| 993 | HDC_FORCE_NON_COHERENT); | ||
| 994 | |||
| 995 | /* WaDisableHDCInvalidation:skl,bxt,kbl */ | ||
| 996 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
| 997 | BDW_DISABLE_HDC_INVALIDATION); | ||
| 998 | |||
| 999 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ | ||
| 1000 | if (IS_SKYLAKE(dev_priv) || | ||
| 1001 | IS_KABYLAKE(dev_priv) || | ||
| 1002 | IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) | ||
| 980 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 1003 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, |
| 981 | GEN8_SAMPLER_POWER_BYPASS_DIS); | 1004 | GEN8_SAMPLER_POWER_BYPASS_DIS); |
| 982 | 1005 | ||
| 983 | /* WaDisableSTUnitPowerOptimization:skl,bxt */ | 1006 | /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ |
| 984 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); | 1007 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); |
| 985 | 1008 | ||
| 986 | /* WaOCLCoherentLineFlush:skl,bxt */ | 1009 | /* WaOCLCoherentLineFlush:skl,bxt,kbl */ |
| 987 | I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | | 1010 | I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | |
| 988 | GEN8_LQSC_FLUSH_COHERENT_LINES)); | 1011 | GEN8_LQSC_FLUSH_COHERENT_LINES)); |
| 989 | 1012 | ||
| 990 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ | 1013 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ |
| 1014 | ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); | ||
| 1015 | if (ret) | ||
| 1016 | return ret; | ||
| 1017 | |||
| 1018 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ | ||
| 991 | ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); | 1019 | ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); |
| 992 | if (ret) | 1020 | if (ret) |
| 993 | return ret; | 1021 | return ret; |
| 994 | 1022 | ||
| 995 | /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ | 1023 | /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ |
| 996 | ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); | 1024 | ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); |
| 997 | if (ret) | 1025 | if (ret) |
| 998 | return ret; | 1026 | return ret; |
| @@ -1060,7 +1088,7 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) | |||
| 1060 | _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); | 1088 | _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); |
| 1061 | } | 1089 | } |
| 1062 | 1090 | ||
| 1063 | if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) { | 1091 | if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) { |
| 1064 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ | 1092 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ |
| 1065 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | 1093 | I915_WRITE(FF_SLICE_CS_CHICKEN2, |
| 1066 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); | 1094 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); |
| @@ -1085,22 +1113,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) | |||
| 1085 | WA_SET_BIT_MASKED(HIZ_CHICKEN, | 1113 | WA_SET_BIT_MASKED(HIZ_CHICKEN, |
| 1086 | BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); | 1114 | BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); |
| 1087 | 1115 | ||
| 1088 | /* This is tied to WaForceContextSaveRestoreNonCoherent */ | ||
| 1089 | if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) { | ||
| 1090 | /* | ||
| 1091 | *Use Force Non-Coherent whenever executing a 3D context. This | ||
| 1092 | * is a workaround for a possible hang in the unlikely event | ||
| 1093 | * a TLB invalidation occurs during a PSD flush. | ||
| 1094 | */ | ||
| 1095 | /* WaForceEnableNonCoherent:skl */ | ||
| 1096 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | ||
| 1097 | HDC_FORCE_NON_COHERENT); | ||
| 1098 | |||
| 1099 | /* WaDisableHDCInvalidation:skl */ | ||
| 1100 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
| 1101 | BDW_DISABLE_HDC_INVALIDATION); | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | /* WaBarrierPerformanceFixDisable:skl */ | 1116 | /* WaBarrierPerformanceFixDisable:skl */ |
| 1105 | if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) | 1117 | if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) |
| 1106 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | 1118 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
| @@ -1113,6 +1125,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) | |||
| 1113 | GEN7_HALF_SLICE_CHICKEN1, | 1125 | GEN7_HALF_SLICE_CHICKEN1, |
| 1114 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); | 1126 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); |
| 1115 | 1127 | ||
| 1128 | /* WaDisableGafsUnitClkGating:skl */ | ||
| 1129 | WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
| 1130 | |||
| 1116 | /* WaDisableLSQCROPERFforOCL:skl */ | 1131 | /* WaDisableLSQCROPERFforOCL:skl */ |
| 1117 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); | 1132 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); |
| 1118 | if (ret) | 1133 | if (ret) |
| @@ -1145,6 +1160,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
| 1145 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 1160 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
| 1146 | STALL_DOP_GATING_DISABLE); | 1161 | STALL_DOP_GATING_DISABLE); |
| 1147 | 1162 | ||
| 1163 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | ||
| 1164 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { | ||
| 1165 | WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, | ||
| 1166 | GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); | ||
| 1167 | } | ||
| 1168 | |||
| 1148 | /* WaDisableSbeCacheDispatchPortSharing:bxt */ | 1169 | /* WaDisableSbeCacheDispatchPortSharing:bxt */ |
| 1149 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { | 1170 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { |
| 1150 | WA_SET_BIT_MASKED( | 1171 | WA_SET_BIT_MASKED( |
| @@ -1171,6 +1192,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
| 1171 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1192 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | |
| 1172 | L3_HIGH_PRIO_CREDITS(2)); | 1193 | L3_HIGH_PRIO_CREDITS(2)); |
| 1173 | 1194 | ||
| 1195 | /* WaInsertDummyPushConstPs:bxt */ | ||
| 1196 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) | ||
| 1197 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | ||
| 1198 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | ||
| 1199 | |||
| 1200 | return 0; | ||
| 1201 | } | ||
| 1202 | |||
| 1203 | static int kbl_init_workarounds(struct intel_engine_cs *engine) | ||
| 1204 | { | ||
| 1205 | struct drm_i915_private *dev_priv = engine->i915; | ||
| 1206 | int ret; | ||
| 1207 | |||
| 1208 | ret = gen9_init_workarounds(engine); | ||
| 1209 | if (ret) | ||
| 1210 | return ret; | ||
| 1211 | |||
| 1212 | /* WaEnableGapsTsvCreditFix:kbl */ | ||
| 1213 | I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | | ||
| 1214 | GEN9_GAPS_TSV_CREDIT_DISABLE)); | ||
| 1215 | |||
| 1216 | /* WaDisableDynamicCreditSharing:kbl */ | ||
| 1217 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | ||
| 1218 | WA_SET_BIT(GAMT_CHKN_BIT_REG, | ||
| 1219 | GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); | ||
| 1220 | |||
| 1221 | /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ | ||
| 1222 | if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) | ||
| 1223 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | ||
| 1224 | HDC_FENCE_DEST_SLM_DISABLE); | ||
| 1225 | |||
| 1226 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | ||
| 1227 | * involving this register should also be added to WA batch as required. | ||
| 1228 | */ | ||
| 1229 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) | ||
| 1230 | /* WaDisableLSQCROPERFforOCL:kbl */ | ||
| 1231 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
| 1232 | GEN8_LQSC_RO_PERF_DIS); | ||
| 1233 | |||
| 1234 | /* WaInsertDummyPushConstPs:kbl */ | ||
| 1235 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | ||
| 1236 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | ||
| 1237 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | ||
| 1238 | |||
| 1239 | /* WaDisableGafsUnitClkGating:kbl */ | ||
| 1240 | WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
| 1241 | |||
| 1242 | /* WaDisableSbeCacheDispatchPortSharing:kbl */ | ||
| 1243 | WA_SET_BIT_MASKED( | ||
| 1244 | GEN7_HALF_SLICE_CHICKEN1, | ||
| 1245 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); | ||
| 1246 | |||
| 1247 | /* WaDisableLSQCROPERFforOCL:kbl */ | ||
| 1248 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); | ||
| 1249 | if (ret) | ||
| 1250 | return ret; | ||
| 1251 | |||
| 1174 | return 0; | 1252 | return 0; |
| 1175 | } | 1253 | } |
| 1176 | 1254 | ||
| @@ -1195,6 +1273,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine) | |||
| 1195 | if (IS_BROXTON(dev_priv)) | 1273 | if (IS_BROXTON(dev_priv)) |
| 1196 | return bxt_init_workarounds(engine); | 1274 | return bxt_init_workarounds(engine); |
| 1197 | 1275 | ||
| 1276 | if (IS_KABYLAKE(dev_priv)) | ||
| 1277 | return kbl_init_workarounds(engine); | ||
| 1278 | |||
| 1198 | return 0; | 1279 | return 0; |
| 1199 | } | 1280 | } |
| 1200 | 1281 | ||
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index fe8faf30bda7..e856d49d6dc3 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
| @@ -65,6 +65,9 @@ | |||
| 65 | bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, | 65 | bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, |
| 66 | int power_well_id); | 66 | int power_well_id); |
| 67 | 67 | ||
| 68 | static struct i915_power_well * | ||
| 69 | lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); | ||
| 70 | |||
| 68 | const char * | 71 | const char * |
| 69 | intel_display_power_domain_str(enum intel_display_power_domain domain) | 72 | intel_display_power_domain_str(enum intel_display_power_domain domain) |
| 70 | { | 73 | { |
| @@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv, | |||
| 151 | power_well->ops->disable(dev_priv, power_well); | 154 | power_well->ops->disable(dev_priv, power_well); |
| 152 | } | 155 | } |
| 153 | 156 | ||
| 157 | static void intel_power_well_get(struct drm_i915_private *dev_priv, | ||
| 158 | struct i915_power_well *power_well) | ||
| 159 | { | ||
| 160 | if (!power_well->count++) | ||
| 161 | intel_power_well_enable(dev_priv, power_well); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void intel_power_well_put(struct drm_i915_private *dev_priv, | ||
| 165 | struct i915_power_well *power_well) | ||
| 166 | { | ||
| 167 | WARN(!power_well->count, "Use count on power well %s is already zero", | ||
| 168 | power_well->name); | ||
| 169 | |||
| 170 | if (!--power_well->count) | ||
| 171 | intel_power_well_disable(dev_priv, power_well); | ||
| 172 | } | ||
| 173 | |||
| 154 | /* | 174 | /* |
| 155 | * We should only use the power well if we explicitly asked the hardware to | 175 | * We should only use the power well if we explicitly asked the hardware to |
| 156 | * enable it, so check if it's enabled and also check if we've requested it to | 176 | * enable it, so check if it's enabled and also check if we've requested it to |
| @@ -419,6 +439,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |||
| 419 | BIT(POWER_DOMAIN_MODESET) | \ | 439 | BIT(POWER_DOMAIN_MODESET) | \ |
| 420 | BIT(POWER_DOMAIN_AUX_A) | \ | 440 | BIT(POWER_DOMAIN_AUX_A) | \ |
| 421 | BIT(POWER_DOMAIN_INIT)) | 441 | BIT(POWER_DOMAIN_INIT)) |
| 442 | #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ | ||
| 443 | BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ | ||
| 444 | BIT(POWER_DOMAIN_AUX_A) | \ | ||
| 445 | BIT(POWER_DOMAIN_INIT)) | ||
| 446 | #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ | ||
| 447 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
| 448 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
| 449 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
| 450 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
| 451 | BIT(POWER_DOMAIN_INIT)) | ||
| 422 | 452 | ||
| 423 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) | 453 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) |
| 424 | { | 454 | { |
| @@ -800,6 +830,72 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv, | |||
| 800 | skl_set_power_well(dev_priv, power_well, false); | 830 | skl_set_power_well(dev_priv, power_well, false); |
| 801 | } | 831 | } |
| 802 | 832 | ||
| 833 | static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well) | ||
| 834 | { | ||
| 835 | enum skl_disp_power_wells power_well_id = power_well->data; | ||
| 836 | |||
| 837 | return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0; | ||
| 838 | } | ||
| 839 | |||
| 840 | static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | ||
| 841 | struct i915_power_well *power_well) | ||
| 842 | { | ||
| 843 | enum skl_disp_power_wells power_well_id = power_well->data; | ||
| 844 | struct i915_power_well *cmn_a_well; | ||
| 845 | |||
| 846 | if (power_well_id == BXT_DPIO_CMN_BC) { | ||
| 847 | /* | ||
| 848 | * We need to copy the GRC calibration value from the eDP PHY, | ||
| 849 | * so make sure it's powered up. | ||
| 850 | */ | ||
| 851 | cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); | ||
| 852 | intel_power_well_get(dev_priv, cmn_a_well); | ||
| 853 | } | ||
| 854 | |||
| 855 | bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); | ||
| 856 | |||
| 857 | if (power_well_id == BXT_DPIO_CMN_BC) | ||
| 858 | intel_power_well_put(dev_priv, cmn_a_well); | ||
| 859 | } | ||
| 860 | |||
| 861 | static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | ||
| 862 | struct i915_power_well *power_well) | ||
| 863 | { | ||
| 864 | bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well)); | ||
| 865 | } | ||
| 866 | |||
| 867 | static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, | ||
| 868 | struct i915_power_well *power_well) | ||
| 869 | { | ||
| 870 | return bxt_ddi_phy_is_enabled(dev_priv, | ||
| 871 | bxt_power_well_to_phy(power_well)); | ||
| 872 | } | ||
| 873 | |||
| 874 | static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, | ||
| 875 | struct i915_power_well *power_well) | ||
| 876 | { | ||
| 877 | if (power_well->count > 0) | ||
| 878 | bxt_dpio_cmn_power_well_enable(dev_priv, power_well); | ||
| 879 | else | ||
| 880 | bxt_dpio_cmn_power_well_disable(dev_priv, power_well); | ||
| 881 | } | ||
| 882 | |||
| 883 | |||
| 884 | static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) | ||
| 885 | { | ||
| 886 | struct i915_power_well *power_well; | ||
| 887 | |||
| 888 | power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); | ||
| 889 | if (power_well->count > 0) | ||
| 890 | bxt_ddi_phy_verify_state(dev_priv, | ||
| 891 | bxt_power_well_to_phy(power_well)); | ||
| 892 | |||
| 893 | power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); | ||
| 894 | if (power_well->count > 0) | ||
| 895 | bxt_ddi_phy_verify_state(dev_priv, | ||
| 896 | bxt_power_well_to_phy(power_well)); | ||
| 897 | } | ||
| 898 | |||
| 803 | static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, | 899 | static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, |
| 804 | struct i915_power_well *power_well) | 900 | struct i915_power_well *power_well) |
| 805 | { | 901 | { |
| @@ -826,7 +922,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 826 | gen9_assert_dbuf_enabled(dev_priv); | 922 | gen9_assert_dbuf_enabled(dev_priv); |
| 827 | 923 | ||
| 828 | if (IS_BROXTON(dev_priv)) | 924 | if (IS_BROXTON(dev_priv)) |
| 829 | broxton_ddi_phy_verify_state(dev_priv); | 925 | bxt_verify_ddi_phy_power_wells(dev_priv); |
| 830 | } | 926 | } |
| 831 | 927 | ||
| 832 | static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, | 928 | static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, |
| @@ -1518,10 +1614,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, | |||
| 1518 | struct i915_power_well *power_well; | 1614 | struct i915_power_well *power_well; |
| 1519 | int i; | 1615 | int i; |
| 1520 | 1616 | ||
| 1521 | for_each_power_well(i, power_well, BIT(domain), power_domains) { | 1617 | for_each_power_well(i, power_well, BIT(domain), power_domains) |
| 1522 | if (!power_well->count++) | 1618 | intel_power_well_get(dev_priv, power_well); |
| 1523 | intel_power_well_enable(dev_priv, power_well); | ||
| 1524 | } | ||
| 1525 | 1619 | ||
| 1526 | power_domains->domain_use_count[domain]++; | 1620 | power_domains->domain_use_count[domain]++; |
| 1527 | } | 1621 | } |
| @@ -1615,14 +1709,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
| 1615 | intel_display_power_domain_str(domain)); | 1709 | intel_display_power_domain_str(domain)); |
| 1616 | power_domains->domain_use_count[domain]--; | 1710 | power_domains->domain_use_count[domain]--; |
| 1617 | 1711 | ||
| 1618 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { | 1712 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) |
| 1619 | WARN(!power_well->count, | 1713 | intel_power_well_put(dev_priv, power_well); |
| 1620 | "Use count on power well %s is already zero", | ||
| 1621 | power_well->name); | ||
| 1622 | |||
| 1623 | if (!--power_well->count) | ||
| 1624 | intel_power_well_disable(dev_priv, power_well); | ||
| 1625 | } | ||
| 1626 | 1714 | ||
| 1627 | mutex_unlock(&power_domains->lock); | 1715 | mutex_unlock(&power_domains->lock); |
| 1628 | 1716 | ||
| @@ -1793,6 +1881,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { | |||
| 1793 | .is_enabled = gen9_dc_off_power_well_enabled, | 1881 | .is_enabled = gen9_dc_off_power_well_enabled, |
| 1794 | }; | 1882 | }; |
| 1795 | 1883 | ||
| 1884 | static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { | ||
| 1885 | .sync_hw = bxt_dpio_cmn_power_well_sync_hw, | ||
| 1886 | .enable = bxt_dpio_cmn_power_well_enable, | ||
| 1887 | .disable = bxt_dpio_cmn_power_well_disable, | ||
| 1888 | .is_enabled = bxt_dpio_cmn_power_well_enabled, | ||
| 1889 | }; | ||
| 1890 | |||
| 1796 | static struct i915_power_well hsw_power_wells[] = { | 1891 | static struct i915_power_well hsw_power_wells[] = { |
| 1797 | { | 1892 | { |
| 1798 | .name = "always-on", | 1893 | .name = "always-on", |
| @@ -2029,6 +2124,18 @@ static struct i915_power_well bxt_power_wells[] = { | |||
| 2029 | .ops = &skl_power_well_ops, | 2124 | .ops = &skl_power_well_ops, |
| 2030 | .data = SKL_DISP_PW_2, | 2125 | .data = SKL_DISP_PW_2, |
| 2031 | }, | 2126 | }, |
| 2127 | { | ||
| 2128 | .name = "dpio-common-a", | ||
| 2129 | .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, | ||
| 2130 | .ops = &bxt_dpio_cmn_power_well_ops, | ||
| 2131 | .data = BXT_DPIO_CMN_A, | ||
| 2132 | }, | ||
| 2133 | { | ||
| 2134 | .name = "dpio-common-bc", | ||
| 2135 | .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, | ||
| 2136 | .ops = &bxt_dpio_cmn_power_well_ops, | ||
| 2137 | .data = BXT_DPIO_CMN_BC, | ||
| 2138 | }, | ||
| 2032 | }; | 2139 | }; |
| 2033 | 2140 | ||
| 2034 | static int | 2141 | static int |
| @@ -2294,14 +2401,10 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, | |||
| 2294 | 2401 | ||
| 2295 | mutex_unlock(&power_domains->lock); | 2402 | mutex_unlock(&power_domains->lock); |
| 2296 | 2403 | ||
| 2297 | broxton_init_cdclk(dev_priv); | 2404 | bxt_init_cdclk(dev_priv); |
| 2298 | 2405 | ||
| 2299 | gen9_dbuf_enable(dev_priv); | 2406 | gen9_dbuf_enable(dev_priv); |
| 2300 | 2407 | ||
| 2301 | broxton_ddi_phy_init(dev_priv); | ||
| 2302 | |||
| 2303 | broxton_ddi_phy_verify_state(dev_priv); | ||
| 2304 | |||
| 2305 | if (resume && dev_priv->csr.dmc_payload) | 2408 | if (resume && dev_priv->csr.dmc_payload) |
| 2306 | intel_csr_load_program(dev_priv); | 2409 | intel_csr_load_program(dev_priv); |
| 2307 | } | 2410 | } |
| @@ -2313,11 +2416,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) | |||
| 2313 | 2416 | ||
| 2314 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 2417 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
| 2315 | 2418 | ||
| 2316 | broxton_ddi_phy_uninit(dev_priv); | ||
| 2317 | |||
| 2318 | gen9_dbuf_disable(dev_priv); | 2419 | gen9_dbuf_disable(dev_priv); |
| 2319 | 2420 | ||
| 2320 | broxton_uninit_cdclk(dev_priv); | 2421 | bxt_uninit_cdclk(dev_priv); |
| 2321 | 2422 | ||
| 2322 | /* The spec doesn't call for removing the reset handshake flag */ | 2423 | /* The spec doesn't call for removing the reset handshake flag */ |
| 2323 | 2424 | ||
| @@ -2448,6 +2549,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) | |||
| 2448 | /** | 2549 | /** |
| 2449 | * intel_power_domains_init_hw - initialize hardware power domain state | 2550 | * intel_power_domains_init_hw - initialize hardware power domain state |
| 2450 | * @dev_priv: i915 device instance | 2551 | * @dev_priv: i915 device instance |
| 2552 | * @resume: Called from resume code paths or not | ||
| 2451 | * | 2553 | * |
| 2452 | * This function initializes the hardware power domain state and enables all | 2554 | * This function initializes the hardware power domain state and enables all |
| 2453 | * power domains using intel_display_set_init_power(). | 2555 | * power domains using intel_display_set_init_power(). |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 324ccb06397d..fc654173c491 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -166,6 +166,20 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work | |||
| 166 | 166 | ||
| 167 | trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); | 167 | trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); |
| 168 | 168 | ||
| 169 | /* We're still in the vblank-evade critical section, this can't race. | ||
| 170 | * Would be slightly nice to just grab the vblank count and arm the | ||
| 171 | * event outside of the critical section - the spinlock might spin for a | ||
| 172 | * while ... */ | ||
| 173 | if (crtc->base.state->event) { | ||
| 174 | WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0); | ||
| 175 | |||
| 176 | spin_lock(&crtc->base.dev->event_lock); | ||
| 177 | drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event); | ||
| 178 | spin_unlock(&crtc->base.dev->event_lock); | ||
| 179 | |||
| 180 | crtc->base.state->event = NULL; | ||
| 181 | } | ||
| 182 | |||
| 169 | local_irq_enable(); | 183 | local_irq_enable(); |
| 170 | 184 | ||
| 171 | if (crtc->debug.start_vbl_count && | 185 | if (crtc->debug.start_vbl_count && |
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 9e9bddaa58a5..f49edecd66a3 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h | |||
| @@ -13,6 +13,9 @@ void intel_gmch_remove(void); | |||
| 13 | bool intel_enable_gtt(void); | 13 | bool intel_enable_gtt(void); |
| 14 | 14 | ||
| 15 | void intel_gtt_chipset_flush(void); | 15 | void intel_gtt_chipset_flush(void); |
| 16 | void intel_gtt_insert_page(dma_addr_t addr, | ||
| 17 | unsigned int pg, | ||
| 18 | unsigned int flags); | ||
| 16 | void intel_gtt_insert_sg_entries(struct sg_table *st, | 19 | void intel_gtt_insert_sg_entries(struct sg_table *st, |
| 17 | unsigned int pg_start, | 20 | unsigned int pg_start, |
| 18 | unsigned int flags); | 21 | unsigned int flags); |
