aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2017-04-28 03:53:36 -0400
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2017-04-28 05:11:59 -0400
commit63ffbcdadcf2b5dde2cd6db6715fc94e77cd43b6 (patch)
treefdb59ad7c9664bf324f71fbb053535de891e22e1
parenta3662830e1e120e9950072a48d75a61ed921ad4a (diff)
drm/i915: Sanitize engine context sizes
Pre-calculate engine context size based on engine class and device generation and store it in the engine instance. v2: - Squash and get rid of hw_context_size (Chris) v3: - Move after MMIO init for probing on Gen7 and 8 (Chris) - Retained rounding (Tvrtko) v4: - Rebase for deferred legacy context allocation Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: intel-gvt-dev@lists.freedesktop.org Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c56
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c90
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
11 files changed, 112 insertions, 138 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index a77db2332e68..ac538dcfff61 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -69,8 +69,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id, 69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca); 70 workload->ctx_desc.lrca);
71 71
72 context_page_num = intel_lr_context_size( 72 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
73 gvt->dev_priv->engine[ring_id]);
74 73
75 context_page_num = context_page_num >> PAGE_SHIFT; 74 context_page_num = context_page_num >> PAGE_SHIFT;
76 75
@@ -333,8 +332,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
333 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, 332 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
334 workload->ctx_desc.lrca); 333 workload->ctx_desc.lrca);
335 334
336 context_page_num = intel_lr_context_size( 335 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
337 gvt->dev_priv->engine[ring_id]);
338 336
339 context_page_num = context_page_num >> PAGE_SHIFT; 337 context_page_num = context_page_num >> PAGE_SHIFT;
340 338
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c7d68e789642..2d3c426465d3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -835,10 +835,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
835 intel_uc_init_early(dev_priv); 835 intel_uc_init_early(dev_priv);
836 i915_memcpy_init_early(dev_priv); 836 i915_memcpy_init_early(dev_priv);
837 837
838 ret = intel_engines_init_early(dev_priv);
839 if (ret)
840 return ret;
841
842 ret = i915_workqueues_init(dev_priv); 838 ret = i915_workqueues_init(dev_priv);
843 if (ret < 0) 839 if (ret < 0)
844 goto err_engines; 840 goto err_engines;
@@ -948,14 +944,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
948 944
949 ret = i915_mmio_setup(dev_priv); 945 ret = i915_mmio_setup(dev_priv);
950 if (ret < 0) 946 if (ret < 0)
951 goto put_bridge; 947 goto err_bridge;
952 948
953 intel_uncore_init(dev_priv); 949 intel_uncore_init(dev_priv);
950
951 ret = intel_engines_init_mmio(dev_priv);
952 if (ret)
953 goto err_uncore;
954
954 i915_gem_init_mmio(dev_priv); 955 i915_gem_init_mmio(dev_priv);
955 956
956 return 0; 957 return 0;
957 958
958put_bridge: 959err_uncore:
960 intel_uncore_fini(dev_priv);
961err_bridge:
959 pci_dev_put(dev_priv->bridge_dev); 962 pci_dev_put(dev_priv->bridge_dev);
960 963
961 return ret; 964 return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d1f7c48e4ae3..e68edf1305cc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2359,7 +2359,6 @@ struct drm_i915_private {
2359 */ 2359 */
2360 struct mutex av_mutex; 2360 struct mutex av_mutex;
2361 2361
2362 uint32_t hw_context_size;
2363 struct list_head context_list; 2362 struct list_head context_list;
2364 2363
2365 u32 fdi_rx_config; 2364 u32 fdi_rx_config;
@@ -3023,7 +3022,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
3023extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 3022extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
3024int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 3023int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
3025 3024
3026int intel_engines_init_early(struct drm_i915_private *dev_priv); 3025int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
3027int intel_engines_init(struct drm_i915_private *dev_priv); 3026int intel_engines_init(struct drm_i915_private *dev_priv);
3028 3027
3029/* intel_hotplug.c */ 3028/* intel_hotplug.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d46a69d3d390..31a73c39239f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -92,33 +92,6 @@
92 92
93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
94 94
95static int get_context_size(struct drm_i915_private *dev_priv)
96{
97 int ret;
98 u32 reg;
99
100 switch (INTEL_GEN(dev_priv)) {
101 case 6:
102 reg = I915_READ(CXT_SIZE);
103 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
104 break;
105 case 7:
106 reg = I915_READ(GEN7_CXT_SIZE);
107 if (IS_HASWELL(dev_priv))
108 ret = HSW_CXT_TOTAL_SIZE;
109 else
110 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
111 break;
112 case 8:
113 ret = GEN8_CXT_TOTAL_SIZE;
114 break;
115 default:
116 BUG();
117 }
118
119 return ret;
120}
121
122void i915_gem_context_free(struct kref *ctx_ref) 95void i915_gem_context_free(struct kref *ctx_ref)
123{ 96{
124 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 97 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -384,21 +357,6 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
384 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); 357 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
385 ida_init(&dev_priv->context_hw_ida); 358 ida_init(&dev_priv->context_hw_ida);
386 359
387 if (i915.enable_execlists) {
388 /* NB: intentionally left blank. We will allocate our own
389 * backing objects as we need them, thank you very much */
390 dev_priv->hw_context_size = 0;
391 } else if (HAS_HW_CONTEXTS(dev_priv)) {
392 dev_priv->hw_context_size =
393 round_up(get_context_size(dev_priv),
394 I915_GTT_PAGE_SIZE);
395 if (dev_priv->hw_context_size > (1<<20)) {
396 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
397 dev_priv->hw_context_size);
398 dev_priv->hw_context_size = 0;
399 }
400 }
401
402 ctx = i915_gem_create_context(dev_priv, NULL); 360 ctx = i915_gem_create_context(dev_priv, NULL);
403 if (IS_ERR(ctx)) { 361 if (IS_ERR(ctx)) {
404 DRM_ERROR("Failed to create default global context (error %ld)\n", 362 DRM_ERROR("Failed to create default global context (error %ld)\n",
@@ -418,8 +376,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
418 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 376 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
419 377
420 DRM_DEBUG_DRIVER("%s context support initialized\n", 378 DRM_DEBUG_DRIVER("%s context support initialized\n",
421 i915.enable_execlists ? "LR" : 379 dev_priv->engine[RCS]->context_size ? "logical" :
422 dev_priv->hw_context_size ? "HW" : "fake"); 380 "fake");
423 return 0; 381 return 0;
424} 382}
425 383
@@ -882,11 +840,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
882 return 0; 840 return 0;
883} 841}
884 842
885static bool contexts_enabled(struct drm_device *dev)
886{
887 return i915.enable_execlists || to_i915(dev)->hw_context_size;
888}
889
890static bool client_is_banned(struct drm_i915_file_private *file_priv) 843static bool client_is_banned(struct drm_i915_file_private *file_priv)
891{ 844{
892 return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS; 845 return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
@@ -895,12 +848,13 @@ static bool client_is_banned(struct drm_i915_file_private *file_priv)
895int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 848int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
896 struct drm_file *file) 849 struct drm_file *file)
897{ 850{
851 struct drm_i915_private *dev_priv = to_i915(dev);
898 struct drm_i915_gem_context_create *args = data; 852 struct drm_i915_gem_context_create *args = data;
899 struct drm_i915_file_private *file_priv = file->driver_priv; 853 struct drm_i915_file_private *file_priv = file->driver_priv;
900 struct i915_gem_context *ctx; 854 struct i915_gem_context *ctx;
901 int ret; 855 int ret;
902 856
903 if (!contexts_enabled(dev)) 857 if (!dev_priv->engine[RCS]->context_size)
904 return -ENODEV; 858 return -ENODEV;
905 859
906 if (args->pad != 0) 860 if (args->pad != 0)
@@ -918,7 +872,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
918 if (ret) 872 if (ret)
919 return ret; 873 return ret;
920 874
921 ctx = i915_gem_create_context(to_i915(dev), file_priv); 875 ctx = i915_gem_create_context(dev_priv, file_priv);
922 mutex_unlock(&dev->struct_mutex); 876 mutex_unlock(&dev->struct_mutex);
923 if (IS_ERR(ctx)) 877 if (IS_ERR(ctx))
924 return PTR_ERR(ctx); 878 return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 4cc97bf1bdac..7e85b5ab8ae2 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -1051,8 +1051,7 @@ static int guc_ads_create(struct intel_guc *guc)
1051 dev_priv->engine[RCS]->status_page.ggtt_offset; 1051 dev_priv->engine[RCS]->status_page.ggtt_offset;
1052 1052
1053 for_each_engine(engine, dev_priv, id) 1053 for_each_engine(engine, dev_priv, id)
1054 blob->ads.eng_state_size[engine->guc_id] = 1054 blob->ads.eng_state_size[engine->guc_id] = engine->context_size;
1055 intel_lr_context_size(engine);
1056 1055
1057 base = guc_ggtt_offset(vma); 1056 base = guc_ggtt_offset(vma);
1058 blob->ads.scheduler_policies = base + ptr_offset(blob, policies); 1057 blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4c72adae368b..ee8170cda93e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3370,16 +3370,6 @@ enum skl_disp_power_wells {
3370#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f) 3370#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
3371#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 3371#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
3372 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 3372 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
3373/* Haswell does have the CXT_SIZE register however it does not appear to be
3374 * valid. Now, docs explain in dwords what is in the context object. The full
3375 * size is 70720 bytes, however, the power context and execlist context will
3376 * never be saved (power context is stored elsewhere, and execlists don't work
3377 * on HSW) - so the final size, including the extra state required for the
3378 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
3379 */
3380#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
3381/* Same as Haswell, but 72064 bytes now. */
3382#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
3383 3373
3384enum { 3374enum {
3385 INTEL_ADVANCED_CONTEXT = 0, 3375 INTEL_ADVANCED_CONTEXT = 0,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 82a274b336c5..6d3d83876da9 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -26,6 +26,22 @@
26#include "intel_ringbuffer.h" 26#include "intel_ringbuffer.h"
27#include "intel_lrc.h" 27#include "intel_lrc.h"
28 28
29/* Haswell does have the CXT_SIZE register however it does not appear to be
30 * valid. Now, docs explain in dwords what is in the context object. The full
31 * size is 70720 bytes, however, the power context and execlist context will
32 * never be saved (power context is stored elsewhere, and execlists don't work
33 * on HSW) - so the final size, including the extra state required for the
34 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
35 */
36#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
37/* Same as Haswell, but 72064 bytes now. */
38#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
39
40#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
41#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
42
43#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
44
29struct engine_class_info { 45struct engine_class_info {
30 const char *name; 46 const char *name;
31 int (*init_legacy)(struct intel_engine_cs *engine); 47 int (*init_legacy)(struct intel_engine_cs *engine);
@@ -107,6 +123,69 @@ static const struct engine_info intel_engines[] = {
107 }, 123 },
108}; 124};
109 125
126/**
127 * ___intel_engine_context_size() - return the size of the context for an engine
128 * @dev_priv: i915 device private
129 * @class: engine class
130 *
131 * Each engine class may require a different amount of space for a context
132 * image.
133 *
134 * Return: size (in bytes) of an engine class specific context image
135 *
136 * Note: this size includes the HWSP, which is part of the context image
137 * in LRC mode, but does not include the "shared data page" used with
138 * GuC submission. The caller should account for this if using the GuC.
139 */
140static u32
141__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
142{
143 u32 cxt_size;
144
145 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
146
147 switch (class) {
148 case RENDER_CLASS:
149 switch (INTEL_GEN(dev_priv)) {
150 default:
151 MISSING_CASE(INTEL_GEN(dev_priv));
152 case 9:
153 return GEN9_LR_CONTEXT_RENDER_SIZE;
154 case 8:
155 return i915.enable_execlists ?
156 GEN8_LR_CONTEXT_RENDER_SIZE :
157 GEN8_CXT_TOTAL_SIZE;
158 case 7:
159 if (IS_HASWELL(dev_priv))
160 return HSW_CXT_TOTAL_SIZE;
161
162 cxt_size = I915_READ(GEN7_CXT_SIZE);
163 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
164 PAGE_SIZE);
165 case 6:
166 cxt_size = I915_READ(CXT_SIZE);
167 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
168 PAGE_SIZE);
169 case 5:
170 case 4:
171 case 3:
172 case 2:
173 /* For the special day when i810 gets merged. */
174 case 1:
175 return 0;
176 }
177 break;
178 default:
179 MISSING_CASE(class);
180 case VIDEO_DECODE_CLASS:
181 case VIDEO_ENHANCEMENT_CLASS:
182 case COPY_ENGINE_CLASS:
183 if (INTEL_GEN(dev_priv) < 8)
184 return 0;
185 return GEN8_LR_CONTEXT_OTHER_SIZE;
186 }
187}
188
110static int 189static int
111intel_engine_setup(struct drm_i915_private *dev_priv, 190intel_engine_setup(struct drm_i915_private *dev_priv,
112 enum intel_engine_id id) 191 enum intel_engine_id id)
@@ -135,6 +214,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
135 engine->class = info->class; 214 engine->class = info->class;
136 engine->instance = info->instance; 215 engine->instance = info->instance;
137 216
217 engine->context_size = __intel_engine_context_size(dev_priv,
218 engine->class);
219 if (WARN_ON(engine->context_size > BIT(20)))
220 engine->context_size = 0;
221
138 /* Nothing to do here, execute in order of dependencies */ 222 /* Nothing to do here, execute in order of dependencies */
139 engine->schedule = NULL; 223 engine->schedule = NULL;
140 224
@@ -145,12 +229,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
145} 229}
146 230
147/** 231/**
148 * intel_engines_init_early() - allocate the Engine Command Streamers 232 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
149 * @dev_priv: i915 device private 233 * @dev_priv: i915 device private
150 * 234 *
151 * Return: non-zero if the initialization failed. 235 * Return: non-zero if the initialization failed.
152 */ 236 */
153int intel_engines_init_early(struct drm_i915_private *dev_priv) 237int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
154{ 238{
155 struct intel_device_info *device_info = mkwrite_device_info(dev_priv); 239 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
156 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; 240 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
@@ -200,7 +284,7 @@ cleanup:
200} 284}
201 285
202/** 286/**
203 * intel_engines_init() - allocate, populate and init the Engine Command Streamers 287 * intel_engines_init() - init the Engine Command Streamers
204 * @dev_priv: i915 device private 288 * @dev_priv: i915 device private
205 * 289 *
206 * Return: non-zero if the initialization failed. 290 * Return: non-zero if the initialization failed.
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5ec064a56a7d..0909549ad320 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -138,10 +138,6 @@
138#include "i915_drv.h" 138#include "i915_drv.h"
139#include "intel_mocs.h" 139#include "intel_mocs.h"
140 140
141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
145#define RING_EXECLIST_QFULL (1 << 0x2) 141#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3) 142#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4) 143#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -1918,53 +1914,6 @@ populate_lr_context(struct i915_gem_context *ctx,
1918 return 0; 1914 return 0;
1919} 1915}
1920 1916
1921/**
1922 * intel_lr_context_size() - return the size of the context for an engine
1923 * @engine: which engine to find the context size for
1924 *
1925 * Each engine may require a different amount of space for a context image,
1926 * so when allocating (or copying) an image, this function can be used to
1927 * find the right size for the specific engine.
1928 *
1929 * Return: size (in bytes) of an engine-specific context image
1930 *
1931 * Note: this size includes the HWSP, which is part of the context image
1932 * in LRC mode, but does not include the "shared data page" used with
1933 * GuC submission. The caller should account for this if using the GuC.
1934 */
1935uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
1936{
1937 struct drm_i915_private *dev_priv = engine->i915;
1938 int ret;
1939
1940 WARN_ON(INTEL_GEN(dev_priv) < 8);
1941
1942 switch (engine->class) {
1943 case RENDER_CLASS:
1944 switch (INTEL_GEN(dev_priv)) {
1945 default:
1946 MISSING_CASE(INTEL_GEN(dev_priv));
1947 case 9:
1948 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
1949 break;
1950 case 8:
1951 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1952 break;
1953 }
1954 break;
1955
1956 default:
1957 MISSING_CASE(engine->class);
1958 case VIDEO_DECODE_CLASS:
1959 case VIDEO_ENHANCEMENT_CLASS:
1960 case COPY_ENGINE_CLASS:
1961 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1962 break;
1963 }
1964
1965 return ret;
1966}
1967
1968static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 1917static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
1969 struct intel_engine_cs *engine) 1918 struct intel_engine_cs *engine)
1970{ 1919{
@@ -1977,8 +1926,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
1977 1926
1978 WARN_ON(ce->state); 1927 WARN_ON(ce->state);
1979 1928
1980 context_size = round_up(intel_lr_context_size(engine), 1929 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
1981 I915_GTT_PAGE_SIZE);
1982 1930
1983 /* One extra page as the sharing data between driver and GuC */ 1931 /* One extra page as the sharing data between driver and GuC */
1984 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 1932 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e8015e7bf4e9..52b3a1fd4059 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -78,8 +78,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
78struct drm_i915_private; 78struct drm_i915_private;
79struct i915_gem_context; 79struct i915_gem_context;
80 80
81uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
82
83void intel_lr_context_resume(struct drm_i915_private *dev_priv); 81void intel_lr_context_resume(struct drm_i915_private *dev_priv);
84uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, 82uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
85 struct intel_engine_cs *engine); 83 struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 61f612454ce7..29b5afac7856 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1444,7 +1444,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
1444 struct drm_i915_gem_object *obj; 1444 struct drm_i915_gem_object *obj;
1445 struct i915_vma *vma; 1445 struct i915_vma *vma;
1446 1446
1447 obj = i915_gem_object_create(i915, i915->hw_context_size); 1447 obj = i915_gem_object_create(i915, engine->context_size);
1448 if (IS_ERR(obj)) 1448 if (IS_ERR(obj))
1449 return ERR_CAST(obj); 1449 return ERR_CAST(obj);
1450 1450
@@ -1487,7 +1487,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
1487 return 0; 1487 return 0;
1488 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ 1488 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1489 1489
1490 if (engine->id == RCS && !ce->state && engine->i915->hw_context_size) { 1490 if (!ce->state && engine->context_size) {
1491 struct i915_vma *vma; 1491 struct i915_vma *vma;
1492 1492
1493 vma = alloc_context_vma(engine); 1493 vma = alloc_context_vma(engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2506bbe26fa0..02d741ef99ad 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -196,13 +196,14 @@ struct intel_engine_cs {
196 enum intel_engine_id id; 196 enum intel_engine_id id;
197 unsigned int uabi_id; 197 unsigned int uabi_id;
198 unsigned int hw_id; 198 unsigned int hw_id;
199 unsigned int guc_id;
199 200
200 u8 class; 201 u8 class;
201 u8 instance; 202 u8 instance;
202 203 u32 context_size;
203 unsigned int guc_id; 204 u32 mmio_base;
204 u32 mmio_base;
205 unsigned int irq_shift; 205 unsigned int irq_shift;
206
206 struct intel_ring *buffer; 207 struct intel_ring *buffer;
207 struct intel_timeline *timeline; 208 struct intel_timeline *timeline;
208 209