diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 79 |
1 files changed, 72 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2863d5a65187..f867aa6c31fc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -166,7 +166,6 @@ struct i915_ctx_workarounds { | |||
166 | }; | 166 | }; |
167 | 167 | ||
168 | struct drm_i915_gem_request; | 168 | struct drm_i915_gem_request; |
169 | struct intel_render_state; | ||
170 | 169 | ||
171 | /* | 170 | /* |
172 | * Engine IDs definitions. | 171 | * Engine IDs definitions. |
@@ -195,9 +194,9 @@ struct i915_priolist { | |||
195 | */ | 194 | */ |
196 | struct intel_engine_execlists { | 195 | struct intel_engine_execlists { |
197 | /** | 196 | /** |
198 | * @irq_tasklet: softirq tasklet for bottom handler | 197 | * @tasklet: softirq tasklet for bottom handler |
199 | */ | 198 | */ |
200 | struct tasklet_struct irq_tasklet; | 199 | struct tasklet_struct tasklet; |
201 | 200 | ||
202 | /** | 201 | /** |
203 | * @default_priolist: priority list for I915_PRIORITY_NORMAL | 202 | * @default_priolist: priority list for I915_PRIORITY_NORMAL |
@@ -290,11 +289,14 @@ struct intel_engine_execlists { | |||
290 | struct intel_engine_cs { | 289 | struct intel_engine_cs { |
291 | struct drm_i915_private *i915; | 290 | struct drm_i915_private *i915; |
292 | char name[INTEL_ENGINE_CS_MAX_NAME]; | 291 | char name[INTEL_ENGINE_CS_MAX_NAME]; |
292 | |||
293 | enum intel_engine_id id; | 293 | enum intel_engine_id id; |
294 | unsigned int uabi_id; | ||
295 | unsigned int hw_id; | 294 | unsigned int hw_id; |
296 | unsigned int guc_id; | 295 | unsigned int guc_id; |
297 | 296 | ||
297 | u8 uabi_id; | ||
298 | u8 uabi_class; | ||
299 | |||
298 | u8 class; | 300 | u8 class; |
299 | u8 instance; | 301 | u8 instance; |
300 | u32 context_size; | 302 | u32 context_size; |
@@ -304,7 +306,7 @@ struct intel_engine_cs { | |||
304 | struct intel_ring *buffer; | 306 | struct intel_ring *buffer; |
305 | struct intel_timeline *timeline; | 307 | struct intel_timeline *timeline; |
306 | 308 | ||
307 | struct intel_render_state *render_state; | 309 | struct drm_i915_gem_object *default_state; |
308 | 310 | ||
309 | atomic_t irq_count; | 311 | atomic_t irq_count; |
310 | unsigned long irq_posted; | 312 | unsigned long irq_posted; |
@@ -340,9 +342,9 @@ struct intel_engine_cs { | |||
340 | struct timer_list hangcheck; /* detect missed interrupts */ | 342 | struct timer_list hangcheck; /* detect missed interrupts */ |
341 | 343 | ||
342 | unsigned int hangcheck_interrupts; | 344 | unsigned int hangcheck_interrupts; |
345 | unsigned int irq_enabled; | ||
343 | 346 | ||
344 | bool irq_armed : 1; | 347 | bool irq_armed : 1; |
345 | bool irq_enabled : 1; | ||
346 | I915_SELFTEST_DECLARE(bool mock : 1); | 348 | I915_SELFTEST_DECLARE(bool mock : 1); |
347 | } breadcrumbs; | 349 | } breadcrumbs; |
348 | 350 | ||
@@ -366,6 +368,9 @@ struct intel_engine_cs { | |||
366 | void (*reset_hw)(struct intel_engine_cs *engine, | 368 | void (*reset_hw)(struct intel_engine_cs *engine, |
367 | struct drm_i915_gem_request *req); | 369 | struct drm_i915_gem_request *req); |
368 | 370 | ||
371 | void (*park)(struct intel_engine_cs *engine); | ||
372 | void (*unpark)(struct intel_engine_cs *engine); | ||
373 | |||
369 | void (*set_default_submission)(struct intel_engine_cs *engine); | 374 | void (*set_default_submission)(struct intel_engine_cs *engine); |
370 | 375 | ||
371 | struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, | 376 | struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, |
@@ -555,6 +560,12 @@ execlists_is_active(const struct intel_engine_execlists *execlists, | |||
555 | return test_bit(bit, (unsigned long *)&execlists->active); | 560 | return test_bit(bit, (unsigned long *)&execlists->active); |
556 | } | 561 | } |
557 | 562 | ||
563 | void | ||
564 | execlists_cancel_port_requests(struct intel_engine_execlists * const execlists); | ||
565 | |||
566 | void | ||
567 | execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); | ||
568 | |||
558 | static inline unsigned int | 569 | static inline unsigned int |
559 | execlists_num_ports(const struct intel_engine_execlists * const execlists) | 570 | execlists_num_ports(const struct intel_engine_execlists * const execlists) |
560 | { | 571 | { |
@@ -624,6 +635,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) | |||
624 | */ | 635 | */ |
625 | #define I915_GEM_HWS_INDEX 0x30 | 636 | #define I915_GEM_HWS_INDEX 0x30 |
626 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 637 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
638 | #define I915_GEM_HWS_PREEMPT_INDEX 0x32 | ||
639 | #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | ||
627 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 | 640 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
628 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 641 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
629 | 642 | ||
@@ -648,6 +661,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); | |||
648 | 661 | ||
649 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); | 662 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
650 | 663 | ||
664 | int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes); | ||
651 | u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, | 665 | u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, |
652 | unsigned int n); | 666 | unsigned int n); |
653 | 667 | ||
@@ -776,6 +790,11 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) | |||
776 | return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; | 790 | return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; |
777 | } | 791 | } |
778 | 792 | ||
793 | static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) | ||
794 | { | ||
795 | return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR; | ||
796 | } | ||
797 | |||
779 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ | 798 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ |
780 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); | 799 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); |
781 | 800 | ||
@@ -846,6 +865,9 @@ unsigned int intel_engine_wakeup(struct intel_engine_cs *engine); | |||
846 | #define ENGINE_WAKEUP_WAITER BIT(0) | 865 | #define ENGINE_WAKEUP_WAITER BIT(0) |
847 | #define ENGINE_WAKEUP_ASLEEP BIT(1) | 866 | #define ENGINE_WAKEUP_ASLEEP BIT(1) |
848 | 867 | ||
868 | void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine); | ||
869 | void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine); | ||
870 | |||
849 | void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); | 871 | void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); |
850 | void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); | 872 | void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); |
851 | 873 | ||
@@ -864,11 +886,54 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) | |||
864 | return batch + 6; | 886 | return batch + 6; |
865 | } | 887 | } |
866 | 888 | ||
889 | static inline u32 * | ||
890 | gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset) | ||
891 | { | ||
892 | /* We're using qword write, offset should be aligned to 8 bytes. */ | ||
893 | GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); | ||
894 | |||
895 | /* w/a for post sync ops following a GPGPU operation we | ||
896 | * need a prior CS_STALL, which is emitted by the flush | ||
897 | * following the batch. | ||
898 | */ | ||
899 | *cs++ = GFX_OP_PIPE_CONTROL(6); | ||
900 | *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | | ||
901 | PIPE_CONTROL_QW_WRITE; | ||
902 | *cs++ = gtt_offset; | ||
903 | *cs++ = 0; | ||
904 | *cs++ = value; | ||
905 | /* We're thrashing one dword of HWS. */ | ||
906 | *cs++ = 0; | ||
907 | |||
908 | return cs; | ||
909 | } | ||
910 | |||
911 | static inline u32 * | ||
912 | gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) | ||
913 | { | ||
914 | /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ | ||
915 | GEM_BUG_ON(gtt_offset & (1 << 5)); | ||
916 | /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ | ||
917 | GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); | ||
918 | |||
919 | *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW; | ||
920 | *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; | ||
921 | *cs++ = 0; | ||
922 | *cs++ = value; | ||
923 | |||
924 | return cs; | ||
925 | } | ||
926 | |||
867 | bool intel_engine_is_idle(struct intel_engine_cs *engine); | 927 | bool intel_engine_is_idle(struct intel_engine_cs *engine); |
868 | bool intel_engines_are_idle(struct drm_i915_private *dev_priv); | 928 | bool intel_engines_are_idle(struct drm_i915_private *dev_priv); |
869 | 929 | ||
870 | void intel_engines_mark_idle(struct drm_i915_private *i915); | 930 | bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine); |
931 | |||
932 | void intel_engines_park(struct drm_i915_private *i915); | ||
933 | void intel_engines_unpark(struct drm_i915_private *i915); | ||
934 | |||
871 | void intel_engines_reset_default_submission(struct drm_i915_private *i915); | 935 | void intel_engines_reset_default_submission(struct drm_i915_private *i915); |
936 | unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); | ||
872 | 937 | ||
873 | bool intel_engine_can_store_dword(struct intel_engine_cs *engine); | 938 | bool intel_engine_can_store_dword(struct intel_engine_cs *engine); |
874 | 939 | ||