diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-04 12:35:21 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-11 15:43:56 -0500 |
commit | 0f46832fab779a9a3314ce5e833155fe4cf18f6c (patch) | |
tree | 59abd23496ab8f2baf3656359aa4e7bd4d7b2b00 /drivers/gpu/drm | |
parent | b72f3acb71646de073abdc070fe1108866c96634 (diff) |
drm/i915: Mask USER interrupts on gen6 (until required)
Otherwise we may consume 20% of the CPU just handling IRQs whilst
rendering. Ouch.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 120 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 3 |
5 files changed, 113 insertions, 62 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 972e08e4e054..1f77d8c6c6a2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1007,12 +1007,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); | |||
1007 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | 1007 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); |
1008 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 1008 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
1009 | struct drm_file *file_priv); | 1009 | struct drm_file *file_priv); |
1010 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
1011 | extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
1012 | extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, | ||
1013 | u32 mask); | ||
1014 | extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, | ||
1015 | u32 mask); | ||
1016 | 1010 | ||
1017 | void | 1011 | void |
1018 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1012 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0dadc025b77b..826873a23db0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,26 +64,6 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
73 | POSTING_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
83 | POSTING_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | static void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
@@ -105,26 +85,6 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
105 | } | 85 | } |
106 | } | 86 | } |
107 | 87 | ||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask & mask) != 0) { | ||
112 | dev_priv->irq_mask &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
114 | POSTING_READ(IMR); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | void | ||
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask & mask) != mask) { | ||
122 | dev_priv->irq_mask |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
124 | POSTING_READ(IMR); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline u32 | 88 | static inline u32 |
129 | i915_pipestat(int pipe) | 89 | i915_pipestat(int pipe) |
130 | { | 90 | { |
@@ -1673,11 +1633,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1673 | 1633 | ||
1674 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1634 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1675 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 1635 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1676 | if (IS_GEN6(dev)) { | ||
1677 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); | ||
1678 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); | ||
1679 | I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1680 | } | ||
1681 | 1636 | ||
1682 | if (IS_GEN6(dev)) | 1637 | if (IS_GEN6(dev)) |
1683 | render_irqs = | 1638 | render_irqs = |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ecfb0023f60d..b0ab4247ce48 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -288,6 +288,7 @@ | |||
288 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 288 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
289 | #define RING_ACTHD(base) ((base)+0x74) | 289 | #define RING_ACTHD(base) ((base)+0x74) |
290 | #define RING_NOPID(base) ((base)+0x94) | 290 | #define RING_NOPID(base) ((base)+0x94) |
291 | #define RING_IMR(base) ((base)+0xa8) | ||
291 | #define TAIL_ADDR 0x001FFFF8 | 292 | #define TAIL_ADDR 0x001FFFF8 |
292 | #define HEAD_WRAP_COUNT 0xFFE00000 | 293 | #define HEAD_WRAP_COUNT 0xFFE00000 |
293 | #define HEAD_WRAP_ONE 0x00200000 | 294 | #define HEAD_WRAP_ONE 0x00200000 |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aa8f6abf16f2..3bff7fb72341 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -485,6 +485,38 @@ pc_render_get_seqno(struct intel_ring_buffer *ring) | |||
485 | return pc->cpu_page[0]; | 485 | return pc->cpu_page[0]; |
486 | } | 486 | } |
487 | 487 | ||
488 | static void | ||
489 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
490 | { | ||
491 | dev_priv->gt_irq_mask &= ~mask; | ||
492 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
493 | POSTING_READ(GTIMR); | ||
494 | } | ||
495 | |||
496 | static void | ||
497 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
498 | { | ||
499 | dev_priv->gt_irq_mask |= mask; | ||
500 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
501 | POSTING_READ(GTIMR); | ||
502 | } | ||
503 | |||
504 | static void | ||
505 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
506 | { | ||
507 | dev_priv->irq_mask &= ~mask; | ||
508 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
509 | POSTING_READ(IMR); | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
514 | { | ||
515 | dev_priv->irq_mask |= mask; | ||
516 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
517 | POSTING_READ(IMR); | ||
518 | } | ||
519 | |||
488 | static bool | 520 | static bool |
489 | render_ring_get_irq(struct intel_ring_buffer *ring) | 521 | render_ring_get_irq(struct intel_ring_buffer *ring) |
490 | { | 522 | { |
@@ -499,8 +531,8 @@ render_ring_get_irq(struct intel_ring_buffer *ring) | |||
499 | 531 | ||
500 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 532 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
501 | if (HAS_PCH_SPLIT(dev)) | 533 | if (HAS_PCH_SPLIT(dev)) |
502 | ironlake_enable_graphics_irq(dev_priv, | 534 | ironlake_enable_irq(dev_priv, |
503 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | 535 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); |
504 | else | 536 | else |
505 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 537 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
506 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 538 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
@@ -520,9 +552,9 @@ render_ring_put_irq(struct intel_ring_buffer *ring) | |||
520 | 552 | ||
521 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 553 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
522 | if (HAS_PCH_SPLIT(dev)) | 554 | if (HAS_PCH_SPLIT(dev)) |
523 | ironlake_disable_graphics_irq(dev_priv, | 555 | ironlake_disable_irq(dev_priv, |
524 | GT_USER_INTERRUPT | | 556 | GT_USER_INTERRUPT | |
525 | GT_PIPE_NOTIFY); | 557 | GT_PIPE_NOTIFY); |
526 | else | 558 | else |
527 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 559 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
528 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 560 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
@@ -596,7 +628,7 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | |||
596 | unsigned long irqflags; | 628 | unsigned long irqflags; |
597 | 629 | ||
598 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 630 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
599 | ironlake_enable_graphics_irq(dev_priv, flag); | 631 | ironlake_enable_irq(dev_priv, flag); |
600 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 632 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
601 | } | 633 | } |
602 | 634 | ||
@@ -613,7 +645,46 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag) | |||
613 | unsigned long irqflags; | 645 | unsigned long irqflags; |
614 | 646 | ||
615 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 647 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
616 | ironlake_disable_graphics_irq(dev_priv, flag); | 648 | ironlake_disable_irq(dev_priv, flag); |
649 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
650 | } | ||
651 | } | ||
652 | |||
653 | static bool | ||
654 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | ||
655 | { | ||
656 | struct drm_device *dev = ring->dev; | ||
657 | |||
658 | if (!dev->irq_enabled) | ||
659 | return false; | ||
660 | |||
661 | if (atomic_inc_return(&ring->irq_refcount) == 1) { | ||
662 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
663 | unsigned long irqflags; | ||
664 | |||
665 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
666 | ring->irq_mask &= ~rflag; | ||
667 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
668 | ironlake_enable_irq(dev_priv, gflag); | ||
669 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
670 | } | ||
671 | |||
672 | return true; | ||
673 | } | ||
674 | |||
675 | static void | ||
676 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | ||
677 | { | ||
678 | struct drm_device *dev = ring->dev; | ||
679 | |||
680 | if (atomic_dec_and_test(&ring->irq_refcount)) { | ||
681 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
682 | unsigned long irqflags; | ||
683 | |||
684 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
685 | ring->irq_mask |= rflag; | ||
686 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
687 | ironlake_disable_irq(dev_priv, gflag); | ||
617 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 688 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
618 | } | 689 | } |
619 | } | 690 | } |
@@ -757,6 +828,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
757 | INIT_LIST_HEAD(&ring->active_list); | 828 | INIT_LIST_HEAD(&ring->active_list); |
758 | INIT_LIST_HEAD(&ring->request_list); | 829 | INIT_LIST_HEAD(&ring->request_list); |
759 | INIT_LIST_HEAD(&ring->gpu_write_list); | 830 | INIT_LIST_HEAD(&ring->gpu_write_list); |
831 | ring->irq_mask = ~0; | ||
760 | 832 | ||
761 | if (I915_NEED_GFX_HWS(dev)) { | 833 | if (I915_NEED_GFX_HWS(dev)) { |
762 | ret = init_status_page(ring); | 834 | ret = init_status_page(ring); |
@@ -1030,15 +1102,35 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
1030 | } | 1102 | } |
1031 | 1103 | ||
1032 | static bool | 1104 | static bool |
1105 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) | ||
1106 | { | ||
1107 | return gen6_ring_get_irq(ring, | ||
1108 | GT_USER_INTERRUPT, | ||
1109 | GEN6_RENDER_USER_INTERRUPT); | ||
1110 | } | ||
1111 | |||
1112 | static void | ||
1113 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) | ||
1114 | { | ||
1115 | return gen6_ring_put_irq(ring, | ||
1116 | GT_USER_INTERRUPT, | ||
1117 | GEN6_RENDER_USER_INTERRUPT); | ||
1118 | } | ||
1119 | |||
1120 | static bool | ||
1033 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) | 1121 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
1034 | { | 1122 | { |
1035 | return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); | 1123 | return gen6_ring_get_irq(ring, |
1124 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1125 | GEN6_BSD_USER_INTERRUPT); | ||
1036 | } | 1126 | } |
1037 | 1127 | ||
1038 | static void | 1128 | static void |
1039 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | 1129 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) |
1040 | { | 1130 | { |
1041 | ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); | 1131 | return gen6_ring_put_irq(ring, |
1132 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1133 | GEN6_BSD_USER_INTERRUPT); | ||
1042 | } | 1134 | } |
1043 | 1135 | ||
1044 | /* ring buffer for Video Codec for Gen6+ */ | 1136 | /* ring buffer for Video Codec for Gen6+ */ |
@@ -1062,13 +1154,17 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
1062 | static bool | 1154 | static bool |
1063 | blt_ring_get_irq(struct intel_ring_buffer *ring) | 1155 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
1064 | { | 1156 | { |
1065 | return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); | 1157 | return gen6_ring_get_irq(ring, |
1158 | GT_BLT_USER_INTERRUPT, | ||
1159 | GEN6_BLITTER_USER_INTERRUPT); | ||
1066 | } | 1160 | } |
1067 | 1161 | ||
1068 | static void | 1162 | static void |
1069 | blt_ring_put_irq(struct intel_ring_buffer *ring) | 1163 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
1070 | { | 1164 | { |
1071 | ring_put_irq(ring, GT_BLT_USER_INTERRUPT); | 1165 | gen6_ring_put_irq(ring, |
1166 | GT_BLT_USER_INTERRUPT, | ||
1167 | GEN6_BLITTER_USER_INTERRUPT); | ||
1072 | } | 1168 | } |
1073 | 1169 | ||
1074 | 1170 | ||
@@ -1192,6 +1288,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1192 | *ring = render_ring; | 1288 | *ring = render_ring; |
1193 | if (INTEL_INFO(dev)->gen >= 6) { | 1289 | if (INTEL_INFO(dev)->gen >= 6) { |
1194 | ring->add_request = gen6_add_request; | 1290 | ring->add_request = gen6_add_request; |
1291 | ring->irq_get = gen6_render_ring_get_irq; | ||
1292 | ring->irq_put = gen6_render_ring_put_irq; | ||
1195 | } else if (IS_GEN5(dev)) { | 1293 | } else if (IS_GEN5(dev)) { |
1196 | ring->add_request = pc_render_add_request; | 1294 | ring->add_request = pc_render_add_request; |
1197 | ring->get_seqno = pc_render_get_seqno; | 1295 | ring->get_seqno = pc_render_get_seqno; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5969c2ed1028..634f6f84cb57 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -28,6 +28,8 @@ struct intel_hw_status_page { | |||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) | 28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) |
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) | 29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) |
30 | 30 | ||
31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR(ring->mmio_base), val) | ||
32 | |||
31 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) | 33 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) |
32 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) | 34 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) |
33 | #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) | 35 | #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) |
@@ -52,6 +54,7 @@ struct intel_ring_buffer { | |||
52 | int effective_size; | 54 | int effective_size; |
53 | struct intel_hw_status_page status_page; | 55 | struct intel_hw_status_page status_page; |
54 | 56 | ||
57 | u32 irq_mask; | ||
55 | u32 irq_seqno; /* last seq seem at irq time */ | 58 | u32 irq_seqno; /* last seq seem at irq time */ |
56 | u32 waiting_seqno; | 59 | u32 waiting_seqno; |
57 | u32 sync_seqno[I915_NUM_RINGS-1]; | 60 | u32 sync_seqno[I915_NUM_RINGS-1]; |