aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-10-02 15:07:42 -0400
committerEric Anholt <eric@anholt.net>2009-10-02 15:07:42 -0400
commit8d23adf7610f2c000c08abe3941c493952d21a61 (patch)
tree5a793a37ec9d71493d2f7ea70e13aee3137cc911 /drivers/gpu
parent4781f20f29926ec68715f5cc930273a79fc0a9eb (diff)
parent9d34e5db07303c9609053e2e651aa6d1fc74e923 (diff)
Merge commit 'ickle/for-anholt' into drm-intel-next
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h49
5 files changed, 46 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 45d507ebd3ff..92aeb918e0c0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1468,6 +1468,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1468 spin_lock_init(&dev_priv->user_irq_lock); 1468 spin_lock_init(&dev_priv->user_irq_lock);
1469 spin_lock_init(&dev_priv->error_lock); 1469 spin_lock_init(&dev_priv->error_lock);
1470 dev_priv->user_irq_refcount = 0; 1470 dev_priv->user_irq_refcount = 0;
1471 dev_priv->trace_irq_seqno = 0;
1471 1472
1472 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1473 ret = drm_vblank_init(dev, I915_NUM_PIPE);
1473 1474
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b24b2d145b75..6035d3dae851 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -202,6 +202,7 @@ typedef struct drm_i915_private {
202 spinlock_t user_irq_lock; 202 spinlock_t user_irq_lock;
203 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 203 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
204 int user_irq_refcount; 204 int user_irq_refcount;
205 u32 trace_irq_seqno;
205 /** Cached value of IMR to avoid reads in updating the bitfield */ 206 /** Cached value of IMR to avoid reads in updating the bitfield */
206 u32 irq_mask_reg; 207 u32 irq_mask_reg;
207 u32 pipestat[2]; 208 u32 pipestat[2];
@@ -665,6 +666,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
665extern int i915_irq_wait(struct drm_device *dev, void *data, 666extern int i915_irq_wait(struct drm_device *dev, void *data,
666 struct drm_file *file_priv); 667 struct drm_file *file_priv);
667void i915_user_irq_get(struct drm_device *dev); 668void i915_user_irq_get(struct drm_device *dev);
669void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
668void i915_user_irq_put(struct drm_device *dev); 670void i915_user_irq_put(struct drm_device *dev);
669extern void i915_enable_interrupt (struct drm_device *dev); 671extern void i915_enable_interrupt (struct drm_device *dev);
670 672
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 40727d4c2919..abfc27b0c2ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1770,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1770 drm_i915_private_t *dev_priv = dev->dev_private; 1770 drm_i915_private_t *dev_priv = dev->dev_private;
1771 uint32_t seqno; 1771 uint32_t seqno;
1772 1772
1773 if (!dev_priv->hw_status_page) 1773 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
1774 return; 1774 return;
1775 1775
1776 seqno = i915_get_gem_seqno(dev); 1776 seqno = i915_get_gem_seqno(dev);
@@ -1794,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev)
1794 } else 1794 } else
1795 break; 1795 break;
1796 } 1796 }
1797
1798 if (unlikely (dev_priv->trace_irq_seqno &&
1799 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1800 i915_user_irq_put(dev);
1801 dev_priv->trace_irq_seqno = 0;
1802 }
1797} 1803}
1798 1804
1799void 1805void
@@ -3352,7 +3358,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3352 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3358 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3353 exec_len = (uint32_t) exec->batch_len; 3359 exec_len = (uint32_t) exec->batch_len;
3354 3360
3355 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); 3361 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3356 3362
3357 count = nbox ? nbox : 1; 3363 count = nbox ? nbox : 1;
3358 3364
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4dfeec7cdd42..c3ceffa46ea0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -725,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev)
725 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 725 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
726} 726}
727 727
728void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
729{
730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
731
732 if (dev_priv->trace_irq_seqno == 0)
733 i915_user_irq_get(dev);
734
735 dev_priv->trace_irq_seqno = seqno;
736}
737
728static int i915_wait_irq(struct drm_device * dev, int irq_nr) 738static int i915_wait_irq(struct drm_device * dev, int irq_nr)
729{ 739{
730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 740 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5567a40816f3..01840d9bc38f 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -158,16 +158,17 @@ TRACE_EVENT(i915_gem_request_submit,
158 TP_ARGS(dev, seqno), 158 TP_ARGS(dev, seqno),
159 159
160 TP_STRUCT__entry( 160 TP_STRUCT__entry(
161 __field(struct drm_device *, dev) 161 __field(u32, dev)
162 __field(u32, seqno) 162 __field(u32, seqno)
163 ), 163 ),
164 164
165 TP_fast_assign( 165 TP_fast_assign(
166 __entry->dev = dev; 166 __entry->dev = dev->primary->index;
167 __entry->seqno = seqno; 167 __entry->seqno = seqno;
168 i915_trace_irq_get(dev, seqno);
168 ), 169 ),
169 170
170 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 171 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
171); 172);
172 173
173TRACE_EVENT(i915_gem_request_flush, 174TRACE_EVENT(i915_gem_request_flush,
@@ -178,20 +179,20 @@ TRACE_EVENT(i915_gem_request_flush,
178 TP_ARGS(dev, seqno, flush_domains, invalidate_domains), 179 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
179 180
180 TP_STRUCT__entry( 181 TP_STRUCT__entry(
181 __field(struct drm_device *, dev) 182 __field(u32, dev)
182 __field(u32, seqno) 183 __field(u32, seqno)
183 __field(u32, flush_domains) 184 __field(u32, flush_domains)
184 __field(u32, invalidate_domains) 185 __field(u32, invalidate_domains)
185 ), 186 ),
186 187
187 TP_fast_assign( 188 TP_fast_assign(
188 __entry->dev = dev; 189 __entry->dev = dev->primary->index;
189 __entry->seqno = seqno; 190 __entry->seqno = seqno;
190 __entry->flush_domains = flush_domains; 191 __entry->flush_domains = flush_domains;
191 __entry->invalidate_domains = invalidate_domains; 192 __entry->invalidate_domains = invalidate_domains;
192 ), 193 ),
193 194
194 TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", 195 TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
195 __entry->dev, __entry->seqno, 196 __entry->dev, __entry->seqno,
196 __entry->flush_domains, __entry->invalidate_domains) 197 __entry->flush_domains, __entry->invalidate_domains)
197); 198);
@@ -204,16 +205,16 @@ TRACE_EVENT(i915_gem_request_complete,
204 TP_ARGS(dev, seqno), 205 TP_ARGS(dev, seqno),
205 206
206 TP_STRUCT__entry( 207 TP_STRUCT__entry(
207 __field(struct drm_device *, dev) 208 __field(u32, dev)
208 __field(u32, seqno) 209 __field(u32, seqno)
209 ), 210 ),
210 211
211 TP_fast_assign( 212 TP_fast_assign(
212 __entry->dev = dev; 213 __entry->dev = dev->primary->index;
213 __entry->seqno = seqno; 214 __entry->seqno = seqno;
214 ), 215 ),
215 216
216 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 217 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
217); 218);
218 219
219TRACE_EVENT(i915_gem_request_retire, 220TRACE_EVENT(i915_gem_request_retire,
@@ -223,16 +224,16 @@ TRACE_EVENT(i915_gem_request_retire,
223 TP_ARGS(dev, seqno), 224 TP_ARGS(dev, seqno),
224 225
225 TP_STRUCT__entry( 226 TP_STRUCT__entry(
226 __field(struct drm_device *, dev) 227 __field(u32, dev)
227 __field(u32, seqno) 228 __field(u32, seqno)
228 ), 229 ),
229 230
230 TP_fast_assign( 231 TP_fast_assign(
231 __entry->dev = dev; 232 __entry->dev = dev->primary->index;
232 __entry->seqno = seqno; 233 __entry->seqno = seqno;
233 ), 234 ),
234 235
235 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 236 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
236); 237);
237 238
238TRACE_EVENT(i915_gem_request_wait_begin, 239TRACE_EVENT(i915_gem_request_wait_begin,
@@ -242,16 +243,16 @@ TRACE_EVENT(i915_gem_request_wait_begin,
242 TP_ARGS(dev, seqno), 243 TP_ARGS(dev, seqno),
243 244
244 TP_STRUCT__entry( 245 TP_STRUCT__entry(
245 __field(struct drm_device *, dev) 246 __field(u32, dev)
246 __field(u32, seqno) 247 __field(u32, seqno)
247 ), 248 ),
248 249
249 TP_fast_assign( 250 TP_fast_assign(
250 __entry->dev = dev; 251 __entry->dev = dev->primary->index;
251 __entry->seqno = seqno; 252 __entry->seqno = seqno;
252 ), 253 ),
253 254
254 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 255 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
255); 256);
256 257
257TRACE_EVENT(i915_gem_request_wait_end, 258TRACE_EVENT(i915_gem_request_wait_end,
@@ -261,16 +262,16 @@ TRACE_EVENT(i915_gem_request_wait_end,
261 TP_ARGS(dev, seqno), 262 TP_ARGS(dev, seqno),
262 263
263 TP_STRUCT__entry( 264 TP_STRUCT__entry(
264 __field(struct drm_device *, dev) 265 __field(u32, dev)
265 __field(u32, seqno) 266 __field(u32, seqno)
266 ), 267 ),
267 268
268 TP_fast_assign( 269 TP_fast_assign(
269 __entry->dev = dev; 270 __entry->dev = dev->primary->index;
270 __entry->seqno = seqno; 271 __entry->seqno = seqno;
271 ), 272 ),
272 273
273 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) 274 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
274); 275);
275 276
276TRACE_EVENT(i915_ring_wait_begin, 277TRACE_EVENT(i915_ring_wait_begin,
@@ -280,14 +281,14 @@ TRACE_EVENT(i915_ring_wait_begin,
280 TP_ARGS(dev), 281 TP_ARGS(dev),
281 282
282 TP_STRUCT__entry( 283 TP_STRUCT__entry(
283 __field(struct drm_device *, dev) 284 __field(u32, dev)
284 ), 285 ),
285 286
286 TP_fast_assign( 287 TP_fast_assign(
287 __entry->dev = dev; 288 __entry->dev = dev->primary->index;
288 ), 289 ),
289 290
290 TP_printk("dev=%p", __entry->dev) 291 TP_printk("dev=%u", __entry->dev)
291); 292);
292 293
293TRACE_EVENT(i915_ring_wait_end, 294TRACE_EVENT(i915_ring_wait_end,
@@ -297,14 +298,14 @@ TRACE_EVENT(i915_ring_wait_end,
297 TP_ARGS(dev), 298 TP_ARGS(dev),
298 299
299 TP_STRUCT__entry( 300 TP_STRUCT__entry(
300 __field(struct drm_device *, dev) 301 __field(u32, dev)
301 ), 302 ),
302 303
303 TP_fast_assign( 304 TP_fast_assign(
304 __entry->dev = dev; 305 __entry->dev = dev->primary->index;
305 ), 306 ),
306 307
307 TP_printk("dev=%p", __entry->dev) 308 TP_printk("dev=%u", __entry->dev)
308); 309);
309 310
310#endif /* _I915_TRACE_H_ */ 311#endif /* _I915_TRACE_H_ */