diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/gpu/drm/i915/intel_ringbuffer.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 236 |
1 files changed, 154 insertions, 82 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 525e7d3edda8..39ac2b634ae5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -1,69 +1,80 @@ | |||
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | enum { | ||
5 | RCS = 0x0, | ||
6 | VCS, | ||
7 | BCS, | ||
8 | I915_NUM_RINGS, | ||
9 | }; | ||
10 | |||
4 | struct intel_hw_status_page { | 11 | struct intel_hw_status_page { |
5 | void *page_addr; | 12 | u32 __iomem *page_addr; |
6 | unsigned int gfx_addr; | 13 | unsigned int gfx_addr; |
7 | struct drm_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
8 | }; | 15 | }; |
9 | 16 | ||
10 | struct drm_i915_gem_execbuffer2; | 17 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
18 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | ||
19 | |||
20 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) | ||
21 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | ||
22 | |||
23 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) | ||
24 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | ||
25 | |||
26 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) | ||
27 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | ||
28 | |||
29 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) | ||
30 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
31 | |||
32 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) | ||
33 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) | ||
34 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) | ||
35 | |||
11 | struct intel_ring_buffer { | 36 | struct intel_ring_buffer { |
12 | const char *name; | 37 | const char *name; |
13 | struct ring_regs { | 38 | enum intel_ring_id { |
14 | u32 ctl; | 39 | RING_RENDER = 0x1, |
15 | u32 head; | 40 | RING_BSD = 0x2, |
16 | u32 tail; | 41 | RING_BLT = 0x4, |
17 | u32 start; | 42 | } id; |
18 | } regs; | 43 | u32 mmio_base; |
19 | unsigned int ring_flag; | 44 | void __iomem *virtual_start; |
20 | unsigned long size; | ||
21 | unsigned int alignment; | ||
22 | void *virtual_start; | ||
23 | struct drm_device *dev; | 45 | struct drm_device *dev; |
24 | struct drm_gem_object *gem_object; | 46 | struct drm_i915_gem_object *obj; |
25 | 47 | ||
26 | unsigned int head; | 48 | u32 head; |
27 | unsigned int tail; | 49 | u32 tail; |
28 | unsigned int space; | 50 | int space; |
51 | int size; | ||
52 | int effective_size; | ||
29 | struct intel_hw_status_page status_page; | 53 | struct intel_hw_status_page status_page; |
30 | 54 | ||
31 | u32 irq_gem_seqno; /* last seq seem at irq time */ | 55 | spinlock_t irq_lock; |
32 | u32 waiting_gem_seqno; | 56 | u32 irq_refcount; |
33 | int user_irq_refcount; | 57 | u32 irq_mask; |
34 | void (*user_irq_get)(struct drm_device *dev, | 58 | u32 irq_seqno; /* last seq seem at irq time */ |
35 | struct intel_ring_buffer *ring); | 59 | u32 trace_irq_seqno; |
36 | void (*user_irq_put)(struct drm_device *dev, | 60 | u32 waiting_seqno; |
37 | struct intel_ring_buffer *ring); | 61 | u32 sync_seqno[I915_NUM_RINGS-1]; |
38 | void (*setup_status_page)(struct drm_device *dev, | 62 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
39 | struct intel_ring_buffer *ring); | 63 | void (*irq_put)(struct intel_ring_buffer *ring); |
40 | 64 | ||
41 | int (*init)(struct drm_device *dev, | 65 | int (*init)(struct intel_ring_buffer *ring); |
42 | struct intel_ring_buffer *ring); | 66 | |
43 | 67 | void (*write_tail)(struct intel_ring_buffer *ring, | |
44 | unsigned int (*get_head)(struct drm_device *dev, | 68 | u32 value); |
45 | struct intel_ring_buffer *ring); | 69 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
46 | unsigned int (*get_tail)(struct drm_device *dev, | 70 | u32 invalidate_domains, |
47 | struct intel_ring_buffer *ring); | 71 | u32 flush_domains); |
48 | unsigned int (*get_active_head)(struct drm_device *dev, | 72 | int (*add_request)(struct intel_ring_buffer *ring, |
49 | struct intel_ring_buffer *ring); | 73 | u32 *seqno); |
50 | void (*advance_ring)(struct drm_device *dev, | 74 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
51 | struct intel_ring_buffer *ring); | 75 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
52 | void (*flush)(struct drm_device *dev, | 76 | u32 offset, u32 length); |
53 | struct intel_ring_buffer *ring, | 77 | void (*cleanup)(struct intel_ring_buffer *ring); |
54 | u32 invalidate_domains, | ||
55 | u32 flush_domains); | ||
56 | u32 (*add_request)(struct drm_device *dev, | ||
57 | struct intel_ring_buffer *ring, | ||
58 | struct drm_file *file_priv, | ||
59 | u32 flush_domains); | ||
60 | u32 (*get_gem_seqno)(struct drm_device *dev, | ||
61 | struct intel_ring_buffer *ring); | ||
62 | int (*dispatch_gem_execbuffer)(struct drm_device *dev, | ||
63 | struct intel_ring_buffer *ring, | ||
64 | struct drm_i915_gem_execbuffer2 *exec, | ||
65 | struct drm_clip_rect *cliprects, | ||
66 | uint64_t exec_offset); | ||
67 | 78 | ||
68 | /** | 79 | /** |
69 | * List of objects currently involved in rendering from the | 80 | * List of objects currently involved in rendering from the |
@@ -83,49 +94,110 @@ struct intel_ring_buffer { | |||
83 | */ | 94 | */ |
84 | struct list_head request_list; | 95 | struct list_head request_list; |
85 | 96 | ||
97 | /** | ||
98 | * List of objects currently pending a GPU write flush. | ||
99 | * | ||
100 | * All elements on this list will belong to either the | ||
101 | * active_list or flushing_list, last_rendering_seqno can | ||
102 | * be used to differentiate between the two elements. | ||
103 | */ | ||
104 | struct list_head gpu_write_list; | ||
105 | |||
106 | /** | ||
107 | * Do we have some not yet emitted requests outstanding? | ||
108 | */ | ||
109 | u32 outstanding_lazy_request; | ||
110 | |||
86 | wait_queue_head_t irq_queue; | 111 | wait_queue_head_t irq_queue; |
87 | drm_local_map_t map; | 112 | drm_local_map_t map; |
113 | |||
114 | void *private; | ||
88 | }; | 115 | }; |
89 | 116 | ||
90 | static inline u32 | 117 | static inline u32 |
118 | intel_ring_sync_index(struct intel_ring_buffer *ring, | ||
119 | struct intel_ring_buffer *other) | ||
120 | { | ||
121 | int idx; | ||
122 | |||
123 | /* | ||
124 | * cs -> 0 = vcs, 1 = bcs | ||
125 | * vcs -> 0 = bcs, 1 = cs, | ||
126 | * bcs -> 0 = cs, 1 = vcs. | ||
127 | */ | ||
128 | |||
129 | idx = (other - ring) - 1; | ||
130 | if (idx < 0) | ||
131 | idx += I915_NUM_RINGS; | ||
132 | |||
133 | return idx; | ||
134 | } | ||
135 | |||
136 | static inline u32 | ||
91 | intel_read_status_page(struct intel_ring_buffer *ring, | 137 | intel_read_status_page(struct intel_ring_buffer *ring, |
92 | int reg) | 138 | int reg) |
139 | { | ||
140 | return ioread32(ring->status_page.page_addr + reg); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * Reads a dword out of the status page, which is written to from the command | ||
145 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | ||
146 | * MI_STORE_DATA_IMM. | ||
147 | * | ||
148 | * The following dwords have a reserved meaning: | ||
149 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | ||
150 | * 0x04: ring 0 head pointer | ||
151 | * 0x05: ring 1 head pointer (915-class) | ||
152 | * 0x06: ring 2 head pointer (915-class) | ||
153 | * 0x10-0x1b: Context status DWords (GM45) | ||
154 | * 0x1f: Last written status offset. (GM45) | ||
155 | * | ||
156 | * The area from dword 0x20 to 0x3ff is available for driver usage. | ||
157 | */ | ||
158 | #define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) | ||
159 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | ||
160 | #define I915_GEM_HWS_INDEX 0x20 | ||
161 | #define I915_BREADCRUMB_INDEX 0x21 | ||
162 | |||
163 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | ||
164 | |||
165 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); | ||
166 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) | ||
93 | { | 167 | { |
94 | u32 *regs = ring->status_page.page_addr; | 168 | return intel_wait_ring_buffer(ring, ring->size - 8); |
95 | return regs[reg]; | ||
96 | } | 169 | } |
97 | 170 | ||
98 | int intel_init_ring_buffer(struct drm_device *dev, | 171 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
99 | struct intel_ring_buffer *ring); | 172 | |
100 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 173 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
101 | struct intel_ring_buffer *ring); | 174 | u32 data) |
102 | int intel_wait_ring_buffer(struct drm_device *dev, | ||
103 | struct intel_ring_buffer *ring, int n); | ||
104 | int intel_wrap_ring_buffer(struct drm_device *dev, | ||
105 | struct intel_ring_buffer *ring); | ||
106 | void intel_ring_begin(struct drm_device *dev, | ||
107 | struct intel_ring_buffer *ring, int n); | ||
108 | |||
109 | static inline void intel_ring_emit(struct drm_device *dev, | ||
110 | struct intel_ring_buffer *ring, | ||
111 | unsigned int data) | ||
112 | { | 175 | { |
113 | unsigned int *virt = ring->virtual_start + ring->tail; | 176 | iowrite32(data, ring->virtual_start + ring->tail); |
114 | *virt = data; | ||
115 | ring->tail += 4; | 177 | ring->tail += 4; |
116 | } | 178 | } |
117 | 179 | ||
118 | void intel_fill_struct(struct drm_device *dev, | 180 | void intel_ring_advance(struct intel_ring_buffer *ring); |
119 | struct intel_ring_buffer *ring, | 181 | |
120 | void *data, | 182 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
121 | unsigned int len); | 183 | int intel_ring_sync(struct intel_ring_buffer *ring, |
122 | void intel_ring_advance(struct drm_device *dev, | 184 | struct intel_ring_buffer *to, |
123 | struct intel_ring_buffer *ring); | 185 | u32 seqno); |
124 | 186 | ||
125 | u32 intel_ring_get_seqno(struct drm_device *dev, | 187 | int intel_init_render_ring_buffer(struct drm_device *dev); |
126 | struct intel_ring_buffer *ring); | 188 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
189 | int intel_init_blt_ring_buffer(struct drm_device *dev); | ||
190 | |||
191 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | ||
192 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | ||
193 | |||
194 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | ||
195 | { | ||
196 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | ||
197 | ring->trace_irq_seqno = seqno; | ||
198 | } | ||
127 | 199 | ||
128 | extern struct intel_ring_buffer render_ring; | 200 | /* DRI warts */ |
129 | extern struct intel_ring_buffer bsd_ring; | 201 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
130 | 202 | ||
131 | #endif /* _INTEL_RINGBUFFER_H_ */ | 203 | #endif /* _INTEL_RINGBUFFER_H_ */ |