diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-02 06:38:58 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-02 06:48:48 -0400 |
commit | 5588978882b5f4b81169bd7f9bc941e3a12ee8ba (patch) | |
tree | 01c9af11069c0402e483369728bc9a879ce0a86e /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 80dbf4b72b0bcac71fc683914293555edb7bc7ee (diff) |
drm/i915: SNB BLT workaround
On some stepping of SNB cpu, the first command to be parsed in BLT
command streamer should be MI_BATCHBUFFER_START otherwise the GPU
may hang.
(cherry picked from commit 8d19215be8254f4f75e9c5a0d28345947b0382db)
Conflicts:
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Cc: stable@kernel.org
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 116 |
1 files changed, 113 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae2..7c1f3ff2f788 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
654 | i915_gem_object_unpin(ring->gem_object); | 654 | i915_gem_object_unpin(ring->gem_object); |
655 | drm_gem_object_unreference(ring->gem_object); | 655 | drm_gem_object_unreference(ring->gem_object); |
656 | ring->gem_object = NULL; | 656 | ring->gem_object = NULL; |
657 | |||
658 | if (ring->cleanup) | ||
659 | ring->cleanup(ring); | ||
660 | |||
657 | cleanup_status_page(dev, ring); | 661 | cleanup_status_page(dev, ring); |
658 | } | 662 | } |
659 | 663 | ||
@@ -854,19 +858,125 @@ blt_ring_put_user_irq(struct drm_device *dev, | |||
854 | /* do nothing */ | 858 | /* do nothing */ |
855 | } | 859 | } |
856 | 860 | ||
861 | |||
862 | /* Workaround for some stepping of SNB, | ||
863 | * each time when BLT engine ring tail moved, | ||
864 | * the first command in the ring to be parsed | ||
865 | * should be MI_BATCH_BUFFER_START | ||
866 | */ | ||
867 | #define NEED_BLT_WORKAROUND(dev) \ | ||
868 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
869 | |||
870 | static inline struct drm_i915_gem_object * | ||
871 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
872 | { | ||
873 | return ring->private; | ||
874 | } | ||
875 | |||
876 | static int blt_ring_init(struct drm_device *dev, | ||
877 | struct intel_ring_buffer *ring) | ||
878 | { | ||
879 | if (NEED_BLT_WORKAROUND(dev)) { | ||
880 | struct drm_i915_gem_object *obj; | ||
881 | u32 __iomem *ptr; | ||
882 | int ret; | ||
883 | |||
884 | obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); | ||
885 | if (obj == NULL) | ||
886 | return -ENOMEM; | ||
887 | |||
888 | ret = i915_gem_object_pin(&obj->base, 4096); | ||
889 | if (ret) { | ||
890 | drm_gem_object_unreference(&obj->base); | ||
891 | return ret; | ||
892 | } | ||
893 | |||
894 | ptr = kmap(obj->pages[0]); | ||
895 | iowrite32(MI_BATCH_BUFFER_END, ptr); | ||
896 | iowrite32(MI_NOOP, ptr+1); | ||
897 | kunmap(obj->pages[0]); | ||
898 | |||
899 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | ||
900 | if (ret) { | ||
901 | i915_gem_object_unpin(&obj->base); | ||
902 | drm_gem_object_unreference(&obj->base); | ||
903 | return ret; | ||
904 | } | ||
905 | |||
906 | ring->private = obj; | ||
907 | } | ||
908 | |||
909 | return init_ring_common(dev, ring); | ||
910 | } | ||
911 | |||
912 | static void blt_ring_begin(struct drm_device *dev, | ||
913 | struct intel_ring_buffer *ring, | ||
914 | int num_dwords) | ||
915 | { | ||
916 | if (ring->private) { | ||
917 | intel_ring_begin(dev, ring, num_dwords+2); | ||
918 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); | ||
919 | intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); | ||
920 | } else | ||
921 | intel_ring_begin(dev, ring, 4); | ||
922 | } | ||
923 | |||
924 | static void blt_ring_flush(struct drm_device *dev, | ||
925 | struct intel_ring_buffer *ring, | ||
926 | u32 invalidate_domains, | ||
927 | u32 flush_domains) | ||
928 | { | ||
929 | blt_ring_begin(dev, ring, 4); | ||
930 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
931 | intel_ring_emit(dev, ring, 0); | ||
932 | intel_ring_emit(dev, ring, 0); | ||
933 | intel_ring_emit(dev, ring, 0); | ||
934 | intel_ring_advance(dev, ring); | ||
935 | } | ||
936 | |||
937 | static u32 | ||
938 | blt_ring_add_request(struct drm_device *dev, | ||
939 | struct intel_ring_buffer *ring, | ||
940 | u32 flush_domains) | ||
941 | { | ||
942 | u32 seqno = i915_gem_get_seqno(dev); | ||
943 | |||
944 | blt_ring_begin(dev, ring, 4); | ||
945 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | ||
946 | intel_ring_emit(dev, ring, | ||
947 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
948 | intel_ring_emit(dev, ring, seqno); | ||
949 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
950 | intel_ring_advance(dev, ring); | ||
951 | |||
952 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
953 | return seqno; | ||
954 | } | ||
955 | |||
956 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
957 | { | ||
958 | if (!ring->private) | ||
959 | return; | ||
960 | |||
961 | i915_gem_object_unpin(ring->private); | ||
962 | drm_gem_object_unreference(ring->private); | ||
963 | ring->private = NULL; | ||
964 | } | ||
965 | |||
857 | static const struct intel_ring_buffer gen6_blt_ring = { | 966 | static const struct intel_ring_buffer gen6_blt_ring = { |
858 | .name = "blt ring", | 967 | .name = "blt ring", |
859 | .id = RING_BLT, | 968 | .id = RING_BLT, |
860 | .mmio_base = BLT_RING_BASE, | 969 | .mmio_base = BLT_RING_BASE, |
861 | .size = 32 * PAGE_SIZE, | 970 | .size = 32 * PAGE_SIZE, |
862 | .init = init_ring_common, | 971 | .init = blt_ring_init, |
863 | .write_tail = ring_write_tail, | 972 | .write_tail = ring_write_tail, |
864 | .flush = gen6_ring_flush, | 973 | .flush = blt_ring_flush, |
865 | .add_request = ring_add_request, | 974 | .add_request = blt_ring_add_request, |
866 | .get_seqno = ring_status_page_get_seqno, | 975 | .get_seqno = ring_status_page_get_seqno, |
867 | .user_irq_get = blt_ring_get_user_irq, | 976 | .user_irq_get = blt_ring_get_user_irq, |
868 | .user_irq_put = blt_ring_put_user_irq, | 977 | .user_irq_put = blt_ring_put_user_irq, |
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 978 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, |
979 | .cleanup = blt_ring_cleanup, | ||
870 | }; | 980 | }; |
871 | 981 | ||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | 982 | int intel_init_render_ring_buffer(struct drm_device *dev) |