diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-02-03 06:57:46 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-02-07 09:59:18 -0500 |
commit | db53a302611c06bde01851f61fa0675a84ca018c (patch) | |
tree | c1504cf7929af3372a3d96c3a87ee754ceb1eff9 /drivers/gpu/drm/i915 | |
parent | d9bc7e9f32716901c617e1f0fb6ce0f74f172686 (diff) |
drm/i915: Refine tracepoints
A lot of minor tweaks to fix the tracepoints, improve the outputting for
ftrace, and to generally make the tracepoints useful again. It is a start
and enough to begin identifying performance issues and gaps in our
coverage.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 174 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_debug.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_trace.h | 301 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 7 |
11 files changed, 330 insertions, 359 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 126e1747fb0c..c79efbc15c5e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -2004,7 +2004,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2004 | 2004 | ||
2005 | spin_lock_init(&dev_priv->irq_lock); | 2005 | spin_lock_init(&dev_priv->irq_lock); |
2006 | spin_lock_init(&dev_priv->error_lock); | 2006 | spin_lock_init(&dev_priv->error_lock); |
2007 | dev_priv->trace_irq_seqno = 0; | ||
2008 | 2007 | ||
2009 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2008 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
2010 | if (ret) | 2009 | if (ret) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fb5979774c09..bdfda0b8c604 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -76,10 +76,7 @@ enum plane { | |||
76 | #define DRIVER_PATCHLEVEL 0 | 76 | #define DRIVER_PATCHLEVEL 0 |
77 | 77 | ||
78 | #define WATCH_COHERENCY 0 | 78 | #define WATCH_COHERENCY 0 |
79 | #define WATCH_EXEC 0 | ||
80 | #define WATCH_RELOC 0 | ||
81 | #define WATCH_LISTS 0 | 79 | #define WATCH_LISTS 0 |
82 | #define WATCH_PWRITE 0 | ||
83 | 80 | ||
84 | #define I915_GEM_PHYS_CURSOR_0 1 | 81 | #define I915_GEM_PHYS_CURSOR_0 1 |
85 | #define I915_GEM_PHYS_CURSOR_1 2 | 82 | #define I915_GEM_PHYS_CURSOR_1 2 |
@@ -289,7 +286,6 @@ typedef struct drm_i915_private { | |||
289 | int page_flipping; | 286 | int page_flipping; |
290 | 287 | ||
291 | atomic_t irq_received; | 288 | atomic_t irq_received; |
292 | u32 trace_irq_seqno; | ||
293 | 289 | ||
294 | /* protects the irq masks */ | 290 | /* protects the irq masks */ |
295 | spinlock_t irq_lock; | 291 | spinlock_t irq_lock; |
@@ -1001,7 +997,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, | |||
1001 | struct drm_file *file_priv); | 997 | struct drm_file *file_priv); |
1002 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 998 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
1003 | struct drm_file *file_priv); | 999 | struct drm_file *file_priv); |
1004 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno); | ||
1005 | 1000 | ||
1006 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 1001 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); |
1007 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | 1002 | extern void i915_driver_irq_preinstall(struct drm_device * dev); |
@@ -1095,8 +1090,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
1095 | struct drm_file *file_priv); | 1090 | struct drm_file *file_priv); |
1096 | void i915_gem_load(struct drm_device *dev); | 1091 | void i915_gem_load(struct drm_device *dev); |
1097 | int i915_gem_init_object(struct drm_gem_object *obj); | 1092 | int i915_gem_init_object(struct drm_gem_object *obj); |
1098 | int __must_check i915_gem_flush_ring(struct drm_device *dev, | 1093 | int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, |
1099 | struct intel_ring_buffer *ring, | ||
1100 | uint32_t invalidate_domains, | 1094 | uint32_t invalidate_domains, |
1101 | uint32_t flush_domains); | 1095 | uint32_t flush_domains); |
1102 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1096 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
@@ -1127,10 +1121,9 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
1127 | } | 1121 | } |
1128 | 1122 | ||
1129 | static inline u32 | 1123 | static inline u32 |
1130 | i915_gem_next_request_seqno(struct drm_device *dev, | 1124 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
1131 | struct intel_ring_buffer *ring) | ||
1132 | { | 1125 | { |
1133 | drm_i915_private_t *dev_priv = dev->dev_private; | 1126 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1134 | return ring->outstanding_lazy_request = dev_priv->next_seqno; | 1127 | return ring->outstanding_lazy_request = dev_priv->next_seqno; |
1135 | } | 1128 | } |
1136 | 1129 | ||
@@ -1155,14 +1148,12 @@ void i915_gem_do_init(struct drm_device *dev, | |||
1155 | unsigned long end); | 1148 | unsigned long end); |
1156 | int __must_check i915_gpu_idle(struct drm_device *dev); | 1149 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1157 | int __must_check i915_gem_idle(struct drm_device *dev); | 1150 | int __must_check i915_gem_idle(struct drm_device *dev); |
1158 | int __must_check i915_add_request(struct drm_device *dev, | 1151 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
1159 | struct drm_file *file_priv, | 1152 | struct drm_file *file, |
1160 | struct drm_i915_gem_request *request, | 1153 | struct drm_i915_gem_request *request); |
1161 | struct intel_ring_buffer *ring); | 1154 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
1162 | int __must_check i915_do_wait_request(struct drm_device *dev, | 1155 | uint32_t seqno, |
1163 | uint32_t seqno, | 1156 | bool interruptible); |
1164 | bool interruptible, | ||
1165 | struct intel_ring_buffer *ring); | ||
1166 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1157 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1167 | int __must_check | 1158 | int __must_check |
1168 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 1159 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
@@ -1311,7 +1302,7 @@ extern void intel_display_print_error_state(struct seq_file *m, | |||
1311 | #define __i915_read(x, y) \ | 1302 | #define __i915_read(x, y) \ |
1312 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1303 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1313 | u##x val = read##y(dev_priv->regs + reg); \ | 1304 | u##x val = read##y(dev_priv->regs + reg); \ |
1314 | trace_i915_reg_rw('R', reg, val, sizeof(val)); \ | 1305 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
1315 | return val; \ | 1306 | return val; \ |
1316 | } | 1307 | } |
1317 | __i915_read(8, b) | 1308 | __i915_read(8, b) |
@@ -1322,7 +1313,7 @@ __i915_read(64, q) | |||
1322 | 1313 | ||
1323 | #define __i915_write(x, y) \ | 1314 | #define __i915_write(x, y) \ |
1324 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1315 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1325 | trace_i915_reg_rw('W', reg, val, sizeof(val)); \ | 1316 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
1326 | write##y(val, dev_priv->regs + reg); \ | 1317 | write##y(val, dev_priv->regs + reg); \ |
1327 | } | 1318 | } |
1328 | __i915_write(8, b) | 1319 | __i915_write(8, b) |
@@ -1371,25 +1362,4 @@ static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | |||
1371 | return val; | 1362 | return val; |
1372 | } | 1363 | } |
1373 | 1364 | ||
1374 | static inline void | ||
1375 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) | ||
1376 | { | ||
1377 | /* Trace down the write operation before the real write */ | ||
1378 | trace_i915_reg_rw('W', reg, val, len); | ||
1379 | switch (len) { | ||
1380 | case 8: | ||
1381 | writeq(val, dev_priv->regs + reg); | ||
1382 | break; | ||
1383 | case 4: | ||
1384 | writel(val, dev_priv->regs + reg); | ||
1385 | break; | ||
1386 | case 2: | ||
1387 | writew(val, dev_priv->regs + reg); | ||
1388 | break; | ||
1389 | case 1: | ||
1390 | writeb(val, dev_priv->regs + reg); | ||
1391 | break; | ||
1392 | } | ||
1393 | } | ||
1394 | |||
1395 | #endif | 1365 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a41c0e716805..f0f8c6ff684f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -518,6 +518,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
518 | goto out; | 518 | goto out; |
519 | } | 519 | } |
520 | 520 | ||
521 | trace_i915_gem_object_pread(obj, args->offset, args->size); | ||
522 | |||
521 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | 523 | ret = i915_gem_object_set_cpu_read_domain_range(obj, |
522 | args->offset, | 524 | args->offset, |
523 | args->size); | 525 | args->size); |
@@ -959,6 +961,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
959 | goto out; | 961 | goto out; |
960 | } | 962 | } |
961 | 963 | ||
964 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | ||
965 | |||
962 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 966 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
963 | * it would end up going through the fenced access, and we'll get | 967 | * it would end up going through the fenced access, and we'll get |
964 | * different detiling behavior between reading and writing. | 968 | * different detiling behavior between reading and writing. |
@@ -1175,6 +1179,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1175 | if (ret) | 1179 | if (ret) |
1176 | goto out; | 1180 | goto out; |
1177 | 1181 | ||
1182 | trace_i915_gem_object_fault(obj, page_offset, true, write); | ||
1183 | |||
1178 | /* Now bind it into the GTT if needed */ | 1184 | /* Now bind it into the GTT if needed */ |
1179 | if (!obj->map_and_fenceable) { | 1185 | if (!obj->map_and_fenceable) { |
1180 | ret = i915_gem_object_unbind(obj); | 1186 | ret = i915_gem_object_unbind(obj); |
@@ -1668,9 +1674,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | |||
1668 | } | 1674 | } |
1669 | 1675 | ||
1670 | static void | 1676 | static void |
1671 | i915_gem_process_flushing_list(struct drm_device *dev, | 1677 | i915_gem_process_flushing_list(struct intel_ring_buffer *ring, |
1672 | uint32_t flush_domains, | 1678 | uint32_t flush_domains) |
1673 | struct intel_ring_buffer *ring) | ||
1674 | { | 1679 | { |
1675 | struct drm_i915_gem_object *obj, *next; | 1680 | struct drm_i915_gem_object *obj, *next; |
1676 | 1681 | ||
@@ -1683,7 +1688,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1683 | obj->base.write_domain = 0; | 1688 | obj->base.write_domain = 0; |
1684 | list_del_init(&obj->gpu_write_list); | 1689 | list_del_init(&obj->gpu_write_list); |
1685 | i915_gem_object_move_to_active(obj, ring, | 1690 | i915_gem_object_move_to_active(obj, ring, |
1686 | i915_gem_next_request_seqno(dev, ring)); | 1691 | i915_gem_next_request_seqno(ring)); |
1687 | 1692 | ||
1688 | trace_i915_gem_object_change_domain(obj, | 1693 | trace_i915_gem_object_change_domain(obj, |
1689 | obj->base.read_domains, | 1694 | obj->base.read_domains, |
@@ -1693,27 +1698,22 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1693 | } | 1698 | } |
1694 | 1699 | ||
1695 | int | 1700 | int |
1696 | i915_add_request(struct drm_device *dev, | 1701 | i915_add_request(struct intel_ring_buffer *ring, |
1697 | struct drm_file *file, | 1702 | struct drm_file *file, |
1698 | struct drm_i915_gem_request *request, | 1703 | struct drm_i915_gem_request *request) |
1699 | struct intel_ring_buffer *ring) | ||
1700 | { | 1704 | { |
1701 | drm_i915_private_t *dev_priv = dev->dev_private; | 1705 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1702 | struct drm_i915_file_private *file_priv = NULL; | ||
1703 | uint32_t seqno; | 1706 | uint32_t seqno; |
1704 | int was_empty; | 1707 | int was_empty; |
1705 | int ret; | 1708 | int ret; |
1706 | 1709 | ||
1707 | BUG_ON(request == NULL); | 1710 | BUG_ON(request == NULL); |
1708 | 1711 | ||
1709 | if (file != NULL) | ||
1710 | file_priv = file->driver_priv; | ||
1711 | |||
1712 | ret = ring->add_request(ring, &seqno); | 1712 | ret = ring->add_request(ring, &seqno); |
1713 | if (ret) | 1713 | if (ret) |
1714 | return ret; | 1714 | return ret; |
1715 | 1715 | ||
1716 | ring->outstanding_lazy_request = false; | 1716 | trace_i915_gem_request_add(ring, seqno); |
1717 | 1717 | ||
1718 | request->seqno = seqno; | 1718 | request->seqno = seqno; |
1719 | request->ring = ring; | 1719 | request->ring = ring; |
@@ -1721,7 +1721,9 @@ i915_add_request(struct drm_device *dev, | |||
1721 | was_empty = list_empty(&ring->request_list); | 1721 | was_empty = list_empty(&ring->request_list); |
1722 | list_add_tail(&request->list, &ring->request_list); | 1722 | list_add_tail(&request->list, &ring->request_list); |
1723 | 1723 | ||
1724 | if (file_priv) { | 1724 | if (file) { |
1725 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
1726 | |||
1725 | spin_lock(&file_priv->mm.lock); | 1727 | spin_lock(&file_priv->mm.lock); |
1726 | request->file_priv = file_priv; | 1728 | request->file_priv = file_priv; |
1727 | list_add_tail(&request->client_list, | 1729 | list_add_tail(&request->client_list, |
@@ -1729,6 +1731,8 @@ i915_add_request(struct drm_device *dev, | |||
1729 | spin_unlock(&file_priv->mm.lock); | 1731 | spin_unlock(&file_priv->mm.lock); |
1730 | } | 1732 | } |
1731 | 1733 | ||
1734 | ring->outstanding_lazy_request = false; | ||
1735 | |||
1732 | if (!dev_priv->mm.suspended) { | 1736 | if (!dev_priv->mm.suspended) { |
1733 | mod_timer(&dev_priv->hangcheck_timer, | 1737 | mod_timer(&dev_priv->hangcheck_timer, |
1734 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1738 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
@@ -1845,18 +1849,15 @@ void i915_gem_reset(struct drm_device *dev) | |||
1845 | * This function clears the request list as sequence numbers are passed. | 1849 | * This function clears the request list as sequence numbers are passed. |
1846 | */ | 1850 | */ |
1847 | static void | 1851 | static void |
1848 | i915_gem_retire_requests_ring(struct drm_device *dev, | 1852 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
1849 | struct intel_ring_buffer *ring) | ||
1850 | { | 1853 | { |
1851 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1852 | uint32_t seqno; | 1854 | uint32_t seqno; |
1853 | int i; | 1855 | int i; |
1854 | 1856 | ||
1855 | if (!ring->status_page.page_addr || | 1857 | if (list_empty(&ring->request_list)) |
1856 | list_empty(&ring->request_list)) | ||
1857 | return; | 1858 | return; |
1858 | 1859 | ||
1859 | WARN_ON(i915_verify_lists(dev)); | 1860 | WARN_ON(i915_verify_lists(ring->dev)); |
1860 | 1861 | ||
1861 | seqno = ring->get_seqno(ring); | 1862 | seqno = ring->get_seqno(ring); |
1862 | 1863 | ||
@@ -1874,7 +1875,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1874 | if (!i915_seqno_passed(seqno, request->seqno)) | 1875 | if (!i915_seqno_passed(seqno, request->seqno)) |
1875 | break; | 1876 | break; |
1876 | 1877 | ||
1877 | trace_i915_gem_request_retire(dev, request->seqno); | 1878 | trace_i915_gem_request_retire(ring, request->seqno); |
1878 | 1879 | ||
1879 | list_del(&request->list); | 1880 | list_del(&request->list); |
1880 | i915_gem_request_remove_from_client(request); | 1881 | i915_gem_request_remove_from_client(request); |
@@ -1900,13 +1901,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1900 | i915_gem_object_move_to_inactive(obj); | 1901 | i915_gem_object_move_to_inactive(obj); |
1901 | } | 1902 | } |
1902 | 1903 | ||
1903 | if (unlikely (dev_priv->trace_irq_seqno && | 1904 | if (unlikely(ring->trace_irq_seqno && |
1904 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1905 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
1905 | ring->irq_put(ring); | 1906 | ring->irq_put(ring); |
1906 | dev_priv->trace_irq_seqno = 0; | 1907 | ring->trace_irq_seqno = 0; |
1907 | } | 1908 | } |
1908 | 1909 | ||
1909 | WARN_ON(i915_verify_lists(dev)); | 1910 | WARN_ON(i915_verify_lists(ring->dev)); |
1910 | } | 1911 | } |
1911 | 1912 | ||
1912 | void | 1913 | void |
@@ -1930,7 +1931,7 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1930 | } | 1931 | } |
1931 | 1932 | ||
1932 | for (i = 0; i < I915_NUM_RINGS; i++) | 1933 | for (i = 0; i < I915_NUM_RINGS; i++) |
1933 | i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); | 1934 | i915_gem_retire_requests_ring(&dev_priv->ring[i]); |
1934 | } | 1935 | } |
1935 | 1936 | ||
1936 | static void | 1937 | static void |
@@ -1964,11 +1965,11 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1964 | struct drm_i915_gem_request *request; | 1965 | struct drm_i915_gem_request *request; |
1965 | int ret; | 1966 | int ret; |
1966 | 1967 | ||
1967 | ret = i915_gem_flush_ring(dev, ring, 0, | 1968 | ret = i915_gem_flush_ring(ring, |
1968 | I915_GEM_GPU_DOMAINS); | 1969 | 0, I915_GEM_GPU_DOMAINS); |
1969 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 1970 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
1970 | if (ret || request == NULL || | 1971 | if (ret || request == NULL || |
1971 | i915_add_request(dev, NULL, request, ring)) | 1972 | i915_add_request(ring, NULL, request)) |
1972 | kfree(request); | 1973 | kfree(request); |
1973 | } | 1974 | } |
1974 | 1975 | ||
@@ -1981,11 +1982,16 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1981 | mutex_unlock(&dev->struct_mutex); | 1982 | mutex_unlock(&dev->struct_mutex); |
1982 | } | 1983 | } |
1983 | 1984 | ||
1985 | /** | ||
1986 | * Waits for a sequence number to be signaled, and cleans up the | ||
1987 | * request and object lists appropriately for that event. | ||
1988 | */ | ||
1984 | int | 1989 | int |
1985 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | 1990 | i915_wait_request(struct intel_ring_buffer *ring, |
1986 | bool interruptible, struct intel_ring_buffer *ring) | 1991 | uint32_t seqno, |
1992 | bool interruptible) | ||
1987 | { | 1993 | { |
1988 | drm_i915_private_t *dev_priv = dev->dev_private; | 1994 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1989 | u32 ier; | 1995 | u32 ier; |
1990 | int ret = 0; | 1996 | int ret = 0; |
1991 | 1997 | ||
@@ -2011,7 +2017,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2011 | if (request == NULL) | 2017 | if (request == NULL) |
2012 | return -ENOMEM; | 2018 | return -ENOMEM; |
2013 | 2019 | ||
2014 | ret = i915_add_request(dev, NULL, request, ring); | 2020 | ret = i915_add_request(ring, NULL, request); |
2015 | if (ret) { | 2021 | if (ret) { |
2016 | kfree(request); | 2022 | kfree(request); |
2017 | return ret; | 2023 | return ret; |
@@ -2021,18 +2027,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2021 | } | 2027 | } |
2022 | 2028 | ||
2023 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { | 2029 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
2024 | if (HAS_PCH_SPLIT(dev)) | 2030 | if (HAS_PCH_SPLIT(ring->dev)) |
2025 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 2031 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
2026 | else | 2032 | else |
2027 | ier = I915_READ(IER); | 2033 | ier = I915_READ(IER); |
2028 | if (!ier) { | 2034 | if (!ier) { |
2029 | DRM_ERROR("something (likely vbetool) disabled " | 2035 | DRM_ERROR("something (likely vbetool) disabled " |
2030 | "interrupts, re-enabling\n"); | 2036 | "interrupts, re-enabling\n"); |
2031 | i915_driver_irq_preinstall(dev); | 2037 | i915_driver_irq_preinstall(ring->dev); |
2032 | i915_driver_irq_postinstall(dev); | 2038 | i915_driver_irq_postinstall(ring->dev); |
2033 | } | 2039 | } |
2034 | 2040 | ||
2035 | trace_i915_gem_request_wait_begin(dev, seqno); | 2041 | trace_i915_gem_request_wait_begin(ring, seqno); |
2036 | 2042 | ||
2037 | ring->waiting_seqno = seqno; | 2043 | ring->waiting_seqno = seqno; |
2038 | if (ring->irq_get(ring)) { | 2044 | if (ring->irq_get(ring)) { |
@@ -2052,7 +2058,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2052 | ret = -EBUSY; | 2058 | ret = -EBUSY; |
2053 | ring->waiting_seqno = 0; | 2059 | ring->waiting_seqno = 0; |
2054 | 2060 | ||
2055 | trace_i915_gem_request_wait_end(dev, seqno); | 2061 | trace_i915_gem_request_wait_end(ring, seqno); |
2056 | } | 2062 | } |
2057 | if (atomic_read(&dev_priv->mm.wedged)) | 2063 | if (atomic_read(&dev_priv->mm.wedged)) |
2058 | ret = -EAGAIN; | 2064 | ret = -EAGAIN; |
@@ -2068,23 +2074,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2068 | * a separate wait queue to handle that. | 2074 | * a separate wait queue to handle that. |
2069 | */ | 2075 | */ |
2070 | if (ret == 0) | 2076 | if (ret == 0) |
2071 | i915_gem_retire_requests_ring(dev, ring); | 2077 | i915_gem_retire_requests_ring(ring); |
2072 | 2078 | ||
2073 | return ret; | 2079 | return ret; |
2074 | } | 2080 | } |
2075 | 2081 | ||
2076 | /** | 2082 | /** |
2077 | * Waits for a sequence number to be signaled, and cleans up the | ||
2078 | * request and object lists appropriately for that event. | ||
2079 | */ | ||
2080 | static int | ||
2081 | i915_wait_request(struct drm_device *dev, uint32_t seqno, | ||
2082 | struct intel_ring_buffer *ring) | ||
2083 | { | ||
2084 | return i915_do_wait_request(dev, seqno, 1, ring); | ||
2085 | } | ||
2086 | |||
2087 | /** | ||
2088 | * Ensures that all rendering to the object has completed and the object is | 2083 | * Ensures that all rendering to the object has completed and the object is |
2089 | * safe to unbind from the GTT or access from the CPU. | 2084 | * safe to unbind from the GTT or access from the CPU. |
2090 | */ | 2085 | */ |
@@ -2092,7 +2087,6 @@ int | |||
2092 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 2087 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
2093 | bool interruptible) | 2088 | bool interruptible) |
2094 | { | 2089 | { |
2095 | struct drm_device *dev = obj->base.dev; | ||
2096 | int ret; | 2090 | int ret; |
2097 | 2091 | ||
2098 | /* This function only exists to support waiting for existing rendering, | 2092 | /* This function only exists to support waiting for existing rendering, |
@@ -2104,10 +2098,9 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
2104 | * it. | 2098 | * it. |
2105 | */ | 2099 | */ |
2106 | if (obj->active) { | 2100 | if (obj->active) { |
2107 | ret = i915_do_wait_request(dev, | 2101 | ret = i915_wait_request(obj->ring, |
2108 | obj->last_rendering_seqno, | 2102 | obj->last_rendering_seqno, |
2109 | interruptible, | 2103 | interruptible); |
2110 | obj->ring); | ||
2111 | if (ret) | 2104 | if (ret) |
2112 | return ret; | 2105 | return ret; |
2113 | } | 2106 | } |
@@ -2157,6 +2150,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2157 | if (ret == -ERESTARTSYS) | 2150 | if (ret == -ERESTARTSYS) |
2158 | return ret; | 2151 | return ret; |
2159 | 2152 | ||
2153 | trace_i915_gem_object_unbind(obj); | ||
2154 | |||
2160 | i915_gem_gtt_unbind_object(obj); | 2155 | i915_gem_gtt_unbind_object(obj); |
2161 | i915_gem_object_put_pages_gtt(obj); | 2156 | i915_gem_object_put_pages_gtt(obj); |
2162 | 2157 | ||
@@ -2172,29 +2167,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2172 | if (i915_gem_object_is_purgeable(obj)) | 2167 | if (i915_gem_object_is_purgeable(obj)) |
2173 | i915_gem_object_truncate(obj); | 2168 | i915_gem_object_truncate(obj); |
2174 | 2169 | ||
2175 | trace_i915_gem_object_unbind(obj); | ||
2176 | |||
2177 | return ret; | 2170 | return ret; |
2178 | } | 2171 | } |
2179 | 2172 | ||
2180 | int | 2173 | int |
2181 | i915_gem_flush_ring(struct drm_device *dev, | 2174 | i915_gem_flush_ring(struct intel_ring_buffer *ring, |
2182 | struct intel_ring_buffer *ring, | ||
2183 | uint32_t invalidate_domains, | 2175 | uint32_t invalidate_domains, |
2184 | uint32_t flush_domains) | 2176 | uint32_t flush_domains) |
2185 | { | 2177 | { |
2186 | int ret; | 2178 | int ret; |
2187 | 2179 | ||
2180 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); | ||
2181 | |||
2188 | ret = ring->flush(ring, invalidate_domains, flush_domains); | 2182 | ret = ring->flush(ring, invalidate_domains, flush_domains); |
2189 | if (ret) | 2183 | if (ret) |
2190 | return ret; | 2184 | return ret; |
2191 | 2185 | ||
2192 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 2186 | i915_gem_process_flushing_list(ring, flush_domains); |
2193 | return 0; | 2187 | return 0; |
2194 | } | 2188 | } |
2195 | 2189 | ||
2196 | static int i915_ring_idle(struct drm_device *dev, | 2190 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
2197 | struct intel_ring_buffer *ring) | ||
2198 | { | 2191 | { |
2199 | int ret; | 2192 | int ret; |
2200 | 2193 | ||
@@ -2202,15 +2195,15 @@ static int i915_ring_idle(struct drm_device *dev, | |||
2202 | return 0; | 2195 | return 0; |
2203 | 2196 | ||
2204 | if (!list_empty(&ring->gpu_write_list)) { | 2197 | if (!list_empty(&ring->gpu_write_list)) { |
2205 | ret = i915_gem_flush_ring(dev, ring, | 2198 | ret = i915_gem_flush_ring(ring, |
2206 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2199 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2207 | if (ret) | 2200 | if (ret) |
2208 | return ret; | 2201 | return ret; |
2209 | } | 2202 | } |
2210 | 2203 | ||
2211 | return i915_wait_request(dev, | 2204 | return i915_wait_request(ring, |
2212 | i915_gem_next_request_seqno(dev, ring), | 2205 | i915_gem_next_request_seqno(ring), |
2213 | ring); | 2206 | true); |
2214 | } | 2207 | } |
2215 | 2208 | ||
2216 | int | 2209 | int |
@@ -2227,7 +2220,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2227 | 2220 | ||
2228 | /* Flush everything onto the inactive list. */ | 2221 | /* Flush everything onto the inactive list. */ |
2229 | for (i = 0; i < I915_NUM_RINGS; i++) { | 2222 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2230 | ret = i915_ring_idle(dev, &dev_priv->ring[i]); | 2223 | ret = i915_ring_idle(&dev_priv->ring[i]); |
2231 | if (ret) | 2224 | if (ret) |
2232 | return ret; | 2225 | return ret; |
2233 | } | 2226 | } |
@@ -2418,8 +2411,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2418 | 2411 | ||
2419 | if (obj->fenced_gpu_access) { | 2412 | if (obj->fenced_gpu_access) { |
2420 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 2413 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2421 | ret = i915_gem_flush_ring(obj->base.dev, | 2414 | ret = i915_gem_flush_ring(obj->last_fenced_ring, |
2422 | obj->last_fenced_ring, | ||
2423 | 0, obj->base.write_domain); | 2415 | 0, obj->base.write_domain); |
2424 | if (ret) | 2416 | if (ret) |
2425 | return ret; | 2417 | return ret; |
@@ -2431,10 +2423,10 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2431 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { | 2423 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { |
2432 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2424 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2433 | obj->last_fenced_seqno)) { | 2425 | obj->last_fenced_seqno)) { |
2434 | ret = i915_do_wait_request(obj->base.dev, | 2426 | ret = i915_wait_request(obj->last_fenced_ring, |
2435 | obj->last_fenced_seqno, | 2427 | obj->last_fenced_seqno, |
2436 | interruptible, | 2428 | interruptible); |
2437 | obj->last_fenced_ring); | 2429 | |
2438 | if (ret) | 2430 | if (ret) |
2439 | return ret; | 2431 | return ret; |
2440 | } | 2432 | } |
@@ -2560,10 +2552,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2560 | if (reg->setup_seqno) { | 2552 | if (reg->setup_seqno) { |
2561 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2553 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2562 | reg->setup_seqno)) { | 2554 | reg->setup_seqno)) { |
2563 | ret = i915_do_wait_request(obj->base.dev, | 2555 | ret = i915_wait_request(obj->last_fenced_ring, |
2564 | reg->setup_seqno, | 2556 | reg->setup_seqno, |
2565 | interruptible, | 2557 | interruptible); |
2566 | obj->last_fenced_ring); | ||
2567 | if (ret) | 2558 | if (ret) |
2568 | return ret; | 2559 | return ret; |
2569 | } | 2560 | } |
@@ -2580,7 +2571,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2580 | } else if (obj->tiling_changed) { | 2571 | } else if (obj->tiling_changed) { |
2581 | if (obj->fenced_gpu_access) { | 2572 | if (obj->fenced_gpu_access) { |
2582 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 2573 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2583 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, | 2574 | ret = i915_gem_flush_ring(obj->ring, |
2584 | 0, obj->base.write_domain); | 2575 | 0, obj->base.write_domain); |
2585 | if (ret) | 2576 | if (ret) |
2586 | return ret; | 2577 | return ret; |
@@ -2597,7 +2588,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2597 | if (obj->tiling_changed) { | 2588 | if (obj->tiling_changed) { |
2598 | if (pipelined) { | 2589 | if (pipelined) { |
2599 | reg->setup_seqno = | 2590 | reg->setup_seqno = |
2600 | i915_gem_next_request_seqno(dev, pipelined); | 2591 | i915_gem_next_request_seqno(pipelined); |
2601 | obj->last_fenced_seqno = reg->setup_seqno; | 2592 | obj->last_fenced_seqno = reg->setup_seqno; |
2602 | obj->last_fenced_ring = pipelined; | 2593 | obj->last_fenced_ring = pipelined; |
2603 | } | 2594 | } |
@@ -2637,7 +2628,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2637 | old->fence_reg = I915_FENCE_REG_NONE; | 2628 | old->fence_reg = I915_FENCE_REG_NONE; |
2638 | old->last_fenced_ring = pipelined; | 2629 | old->last_fenced_ring = pipelined; |
2639 | old->last_fenced_seqno = | 2630 | old->last_fenced_seqno = |
2640 | pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; | 2631 | pipelined ? i915_gem_next_request_seqno(pipelined) : 0; |
2641 | 2632 | ||
2642 | drm_gem_object_unreference(&old->base); | 2633 | drm_gem_object_unreference(&old->base); |
2643 | } else if (obj->last_fenced_seqno == 0) | 2634 | } else if (obj->last_fenced_seqno == 0) |
@@ -2649,7 +2640,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2649 | obj->last_fenced_ring = pipelined; | 2640 | obj->last_fenced_ring = pipelined; |
2650 | 2641 | ||
2651 | reg->setup_seqno = | 2642 | reg->setup_seqno = |
2652 | pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; | 2643 | pipelined ? i915_gem_next_request_seqno(pipelined) : 0; |
2653 | obj->last_fenced_seqno = reg->setup_seqno; | 2644 | obj->last_fenced_seqno = reg->setup_seqno; |
2654 | 2645 | ||
2655 | update: | 2646 | update: |
@@ -2846,7 +2837,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2846 | 2837 | ||
2847 | obj->map_and_fenceable = mappable && fenceable; | 2838 | obj->map_and_fenceable = mappable && fenceable; |
2848 | 2839 | ||
2849 | trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); | 2840 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
2850 | return 0; | 2841 | return 0; |
2851 | } | 2842 | } |
2852 | 2843 | ||
@@ -2869,13 +2860,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) | |||
2869 | static int | 2860 | static int |
2870 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) | 2861 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2871 | { | 2862 | { |
2872 | struct drm_device *dev = obj->base.dev; | ||
2873 | |||
2874 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2863 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2875 | return 0; | 2864 | return 0; |
2876 | 2865 | ||
2877 | /* Queue the GPU write cache flushing we need. */ | 2866 | /* Queue the GPU write cache flushing we need. */ |
2878 | return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); | 2867 | return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
2879 | } | 2868 | } |
2880 | 2869 | ||
2881 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2870 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -3024,8 +3013,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
3024 | return 0; | 3013 | return 0; |
3025 | 3014 | ||
3026 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3015 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3027 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, | 3016 | ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
3028 | 0, obj->base.write_domain); | ||
3029 | if (ret) | 3017 | if (ret) |
3030 | return ret; | 3018 | return ret; |
3031 | } | 3019 | } |
@@ -3442,7 +3430,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3442 | * flush earlier is beneficial. | 3430 | * flush earlier is beneficial. |
3443 | */ | 3431 | */ |
3444 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3432 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3445 | ret = i915_gem_flush_ring(dev, obj->ring, | 3433 | ret = i915_gem_flush_ring(obj->ring, |
3446 | 0, obj->base.write_domain); | 3434 | 0, obj->base.write_domain); |
3447 | } else if (obj->ring->outstanding_lazy_request == | 3435 | } else if (obj->ring->outstanding_lazy_request == |
3448 | obj->last_rendering_seqno) { | 3436 | obj->last_rendering_seqno) { |
@@ -3453,9 +3441,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3453 | */ | 3441 | */ |
3454 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 3442 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
3455 | if (request) | 3443 | if (request) |
3456 | ret = i915_add_request(dev, | 3444 | ret = i915_add_request(obj->ring, NULL,request); |
3457 | NULL, request, | ||
3458 | obj->ring); | ||
3459 | else | 3445 | else |
3460 | ret = -ENOMEM; | 3446 | ret = -ENOMEM; |
3461 | } | 3447 | } |
@@ -3465,7 +3451,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3465 | * are actually unmasked, and our working set ends up being | 3451 | * are actually unmasked, and our working set ends up being |
3466 | * larger than required. | 3452 | * larger than required. |
3467 | */ | 3453 | */ |
3468 | i915_gem_retire_requests_ring(dev, obj->ring); | 3454 | i915_gem_retire_requests_ring(obj->ring); |
3469 | 3455 | ||
3470 | args->busy = obj->active; | 3456 | args->busy = obj->active; |
3471 | } | 3457 | } |
@@ -3595,6 +3581,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) | |||
3595 | kfree(obj->page_cpu_valid); | 3581 | kfree(obj->page_cpu_valid); |
3596 | kfree(obj->bit_17); | 3582 | kfree(obj->bit_17); |
3597 | kfree(obj); | 3583 | kfree(obj); |
3584 | |||
3585 | trace_i915_gem_object_destroy(obj); | ||
3598 | } | 3586 | } |
3599 | 3587 | ||
3600 | void i915_gem_free_object(struct drm_gem_object *gem_obj) | 3588 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
@@ -3602,8 +3590,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
3602 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | 3590 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
3603 | struct drm_device *dev = obj->base.dev; | 3591 | struct drm_device *dev = obj->base.dev; |
3604 | 3592 | ||
3605 | trace_i915_gem_object_destroy(obj); | ||
3606 | |||
3607 | while (obj->pin_count > 0) | 3593 | while (obj->pin_count > 0) |
3608 | i915_gem_object_unpin(obj); | 3594 | i915_gem_object_unpin(obj); |
3609 | 3595 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 29d014c48ca2..8da1899bd24f 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev) | |||
134 | } | 134 | } |
135 | #endif /* WATCH_INACTIVE */ | 135 | #endif /* WATCH_INACTIVE */ |
136 | 136 | ||
137 | |||
138 | #if WATCH_EXEC | WATCH_PWRITE | ||
139 | static void | ||
140 | i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | ||
141 | uint32_t bias, uint32_t mark) | ||
142 | { | ||
143 | uint32_t *mem = kmap_atomic(page, KM_USER0); | ||
144 | int i; | ||
145 | for (i = start; i < end; i += 4) | ||
146 | DRM_INFO("%08x: %08x%s\n", | ||
147 | (int) (bias + i), mem[i / 4], | ||
148 | (bias + i == mark) ? " ********" : ""); | ||
149 | kunmap_atomic(mem, KM_USER0); | ||
150 | /* give syslog time to catch up */ | ||
151 | msleep(1); | ||
152 | } | ||
153 | |||
154 | void | ||
155 | i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, | ||
156 | const char *where, uint32_t mark) | ||
157 | { | ||
158 | int page; | ||
159 | |||
160 | DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); | ||
161 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { | ||
162 | int page_len, chunk, chunk_len; | ||
163 | |||
164 | page_len = len - page * PAGE_SIZE; | ||
165 | if (page_len > PAGE_SIZE) | ||
166 | page_len = PAGE_SIZE; | ||
167 | |||
168 | for (chunk = 0; chunk < page_len; chunk += 128) { | ||
169 | chunk_len = page_len - chunk; | ||
170 | if (chunk_len > 128) | ||
171 | chunk_len = 128; | ||
172 | i915_gem_dump_page(obj->pages[page], | ||
173 | chunk, chunk + chunk_len, | ||
174 | obj->gtt_offset + | ||
175 | page * PAGE_SIZE, | ||
176 | mark); | ||
177 | } | ||
178 | } | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | #if WATCH_COHERENCY | 137 | #if WATCH_COHERENCY |
183 | void | 138 | void |
184 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | 139 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3d39005540aa..da05a2692a75 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | #include "i915_trace.h" | ||
33 | 34 | ||
34 | static bool | 35 | static bool |
35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) | 36 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
65 | 66 | ||
67 | trace_i915_gem_evict(dev, min_size, alignment, mappable); | ||
68 | |||
66 | /* | 69 | /* |
67 | * The goal is to evict objects and amalgamate space in LRU order. | 70 | * The goal is to evict objects and amalgamate space in LRU order. |
68 | * The oldest idle objects reside on the inactive list, which is in | 71 | * The oldest idle objects reside on the inactive list, which is in |
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
189 | if (lists_empty) | 192 | if (lists_empty) |
190 | return -ENOSPC; | 193 | return -ENOSPC; |
191 | 194 | ||
195 | trace_i915_gem_evict_everything(dev, purgeable_only); | ||
196 | |||
192 | /* Flush everything (on to the inactive lists) and evict */ | 197 | /* Flush everything (on to the inactive lists) and evict */ |
193 | ret = i915_gpu_idle(dev); | 198 | ret = i915_gpu_idle(dev); |
194 | if (ret) | 199 | if (ret) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b0a0238c36d1..84fa24e6cca8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -282,21 +282,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
282 | 282 | ||
283 | target_offset = to_intel_bo(target_obj)->gtt_offset; | 283 | target_offset = to_intel_bo(target_obj)->gtt_offset; |
284 | 284 | ||
285 | #if WATCH_RELOC | ||
286 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
287 | "read %08x write %08x gtt %08x " | ||
288 | "presumed %08x delta %08x\n", | ||
289 | __func__, | ||
290 | obj, | ||
291 | (int) reloc->offset, | ||
292 | (int) reloc->target_handle, | ||
293 | (int) reloc->read_domains, | ||
294 | (int) reloc->write_domain, | ||
295 | (int) target_offset, | ||
296 | (int) reloc->presumed_offset, | ||
297 | reloc->delta); | ||
298 | #endif | ||
299 | |||
300 | /* The target buffer should have appeared before us in the | 285 | /* The target buffer should have appeared before us in the |
301 | * exec_object list, so it should have a GTT space bound by now. | 286 | * exec_object list, so it should have a GTT space bound by now. |
302 | */ | 287 | */ |
@@ -747,8 +732,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev, | |||
747 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 732 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
748 | for (i = 0; i < I915_NUM_RINGS; i++) | 733 | for (i = 0; i < I915_NUM_RINGS; i++) |
749 | if (flush_rings & (1 << i)) { | 734 | if (flush_rings & (1 << i)) { |
750 | ret = i915_gem_flush_ring(dev, | 735 | ret = i915_gem_flush_ring(&dev_priv->ring[i], |
751 | &dev_priv->ring[i], | ||
752 | invalidate_domains, | 736 | invalidate_domains, |
753 | flush_domains); | 737 | flush_domains); |
754 | if (ret) | 738 | if (ret) |
@@ -787,7 +771,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
787 | if (request == NULL) | 771 | if (request == NULL) |
788 | return -ENOMEM; | 772 | return -ENOMEM; |
789 | 773 | ||
790 | ret = i915_add_request(obj->base.dev, NULL, request, from); | 774 | ret = i915_add_request(from, NULL, request); |
791 | if (ret) { | 775 | if (ret) { |
792 | kfree(request); | 776 | kfree(request); |
793 | return ret; | 777 | return ret; |
@@ -815,12 +799,6 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
815 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | 799 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); |
816 | 800 | ||
817 | if (cd.invalidate_domains | cd.flush_domains) { | 801 | if (cd.invalidate_domains | cd.flush_domains) { |
818 | #if WATCH_EXEC | ||
819 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
820 | __func__, | ||
821 | cd.invalidate_domains, | ||
822 | cd.flush_domains); | ||
823 | #endif | ||
824 | ret = i915_gem_execbuffer_flush(ring->dev, | 802 | ret = i915_gem_execbuffer_flush(ring->dev, |
825 | cd.invalidate_domains, | 803 | cd.invalidate_domains, |
826 | cd.flush_domains, | 804 | cd.flush_domains, |
@@ -924,6 +902,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
924 | struct drm_i915_gem_object *obj; | 902 | struct drm_i915_gem_object *obj; |
925 | 903 | ||
926 | list_for_each_entry(obj, objects, exec_list) { | 904 | list_for_each_entry(obj, objects, exec_list) { |
905 | u32 old_read = obj->base.read_domains; | ||
906 | u32 old_write = obj->base.write_domain; | ||
907 | |||
908 | |||
927 | obj->base.read_domains = obj->base.pending_read_domains; | 909 | obj->base.read_domains = obj->base.pending_read_domains; |
928 | obj->base.write_domain = obj->base.pending_write_domain; | 910 | obj->base.write_domain = obj->base.pending_write_domain; |
929 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | 911 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
@@ -937,9 +919,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
937 | intel_mark_busy(ring->dev, obj); | 919 | intel_mark_busy(ring->dev, obj); |
938 | } | 920 | } |
939 | 921 | ||
940 | trace_i915_gem_object_change_domain(obj, | 922 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
941 | obj->base.read_domains, | ||
942 | obj->base.write_domain); | ||
943 | } | 923 | } |
944 | } | 924 | } |
945 | 925 | ||
@@ -961,14 +941,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |||
961 | if (INTEL_INFO(dev)->gen >= 4) | 941 | if (INTEL_INFO(dev)->gen >= 4) |
962 | invalidate |= I915_GEM_DOMAIN_SAMPLER; | 942 | invalidate |= I915_GEM_DOMAIN_SAMPLER; |
963 | if (ring->flush(ring, invalidate, 0)) { | 943 | if (ring->flush(ring, invalidate, 0)) { |
964 | i915_gem_next_request_seqno(dev, ring); | 944 | i915_gem_next_request_seqno(ring); |
965 | return; | 945 | return; |
966 | } | 946 | } |
967 | 947 | ||
968 | /* Add a breadcrumb for the completion of the batch buffer */ | 948 | /* Add a breadcrumb for the completion of the batch buffer */ |
969 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 949 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
970 | if (request == NULL || i915_add_request(dev, file, request, ring)) { | 950 | if (request == NULL || i915_add_request(ring, file, request)) { |
971 | i915_gem_next_request_seqno(dev, ring); | 951 | i915_gem_next_request_seqno(ring); |
972 | kfree(request); | 952 | kfree(request); |
973 | } | 953 | } |
974 | } | 954 | } |
@@ -998,10 +978,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
998 | if (ret) | 978 | if (ret) |
999 | return ret; | 979 | return ret; |
1000 | 980 | ||
1001 | #if WATCH_EXEC | ||
1002 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1003 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1004 | #endif | ||
1005 | switch (args->flags & I915_EXEC_RING_MASK) { | 981 | switch (args->flags & I915_EXEC_RING_MASK) { |
1006 | case I915_EXEC_DEFAULT: | 982 | case I915_EXEC_DEFAULT: |
1007 | case I915_EXEC_RENDER: | 983 | case I915_EXEC_RENDER: |
@@ -1172,7 +1148,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1172 | if (ret) | 1148 | if (ret) |
1173 | goto err; | 1149 | goto err; |
1174 | 1150 | ||
1175 | seqno = i915_gem_next_request_seqno(dev, ring); | 1151 | seqno = i915_gem_next_request_seqno(ring); |
1176 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { | 1152 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { |
1177 | if (seqno < ring->sync_seqno[i]) { | 1153 | if (seqno < ring->sync_seqno[i]) { |
1178 | /* The GPU can not handle its semaphore value wrapping, | 1154 | /* The GPU can not handle its semaphore value wrapping, |
@@ -1187,6 +1163,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1187 | } | 1163 | } |
1188 | } | 1164 | } |
1189 | 1165 | ||
1166 | trace_i915_gem_ring_dispatch(ring, seqno); | ||
1167 | |||
1190 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; | 1168 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
1191 | exec_len = args->batch_len; | 1169 | exec_len = args->batch_len; |
1192 | if (cliprects) { | 1170 | if (cliprects) { |
@@ -1243,11 +1221,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1243 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 1221 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1244 | int ret, i; | 1222 | int ret, i; |
1245 | 1223 | ||
1246 | #if WATCH_EXEC | ||
1247 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1248 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1249 | #endif | ||
1250 | |||
1251 | if (args->buffer_count < 1) { | 1224 | if (args->buffer_count < 1) { |
1252 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 1225 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
1253 | return -EINVAL; | 1226 | return -EINVAL; |
@@ -1328,11 +1301,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1328 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 1301 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1329 | int ret; | 1302 | int ret; |
1330 | 1303 | ||
1331 | #if WATCH_EXEC | ||
1332 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1333 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1334 | #endif | ||
1335 | |||
1336 | if (args->buffer_count < 1) { | 1304 | if (args->buffer_count < 1) { |
1337 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | 1305 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); |
1338 | return -EINVAL; | 1306 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 95304472f0d0..15d6269027e7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -365,7 +365,7 @@ static void notify_ring(struct drm_device *dev, | |||
365 | return; | 365 | return; |
366 | 366 | ||
367 | seqno = ring->get_seqno(ring); | 367 | seqno = ring->get_seqno(ring); |
368 | trace_i915_gem_request_complete(dev, seqno); | 368 | trace_i915_gem_request_complete(ring, seqno); |
369 | 369 | ||
370 | ring->irq_seqno = seqno; | 370 | ring->irq_seqno = seqno; |
371 | wake_up_all(&ring->irq_queue); | 371 | wake_up_all(&ring->irq_queue); |
@@ -1273,16 +1273,6 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1273 | return dev_priv->counter; | 1273 | return dev_priv->counter; |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | ||
1277 | { | ||
1278 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1279 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
1280 | |||
1281 | if (dev_priv->trace_irq_seqno == 0 && | ||
1282 | ring->irq_get(ring)) | ||
1283 | dev_priv->trace_irq_seqno = seqno; | ||
1284 | } | ||
1285 | |||
1286 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 1276 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
1287 | { | 1277 | { |
1288 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1278 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 7f0fc3ed61aa..d623fefbfaca 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
9 | #include "i915_drv.h" | 9 | #include "i915_drv.h" |
10 | #include "intel_ringbuffer.h" | ||
10 | 11 | ||
11 | #undef TRACE_SYSTEM | 12 | #undef TRACE_SYSTEM |
12 | #define TRACE_SYSTEM i915 | 13 | #define TRACE_SYSTEM i915 |
@@ -16,9 +17,7 @@ | |||
16 | /* object tracking */ | 17 | /* object tracking */ |
17 | 18 | ||
18 | TRACE_EVENT(i915_gem_object_create, | 19 | TRACE_EVENT(i915_gem_object_create, |
19 | |||
20 | TP_PROTO(struct drm_i915_gem_object *obj), | 20 | TP_PROTO(struct drm_i915_gem_object *obj), |
21 | |||
22 | TP_ARGS(obj), | 21 | TP_ARGS(obj), |
23 | 22 | ||
24 | TP_STRUCT__entry( | 23 | TP_STRUCT__entry( |
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create, | |||
35 | ); | 34 | ); |
36 | 35 | ||
37 | TRACE_EVENT(i915_gem_object_bind, | 36 | TRACE_EVENT(i915_gem_object_bind, |
38 | 37 | TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), | |
39 | TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), | 38 | TP_ARGS(obj, mappable), |
40 | |||
41 | TP_ARGS(obj, gtt_offset, mappable), | ||
42 | 39 | ||
43 | TP_STRUCT__entry( | 40 | TP_STRUCT__entry( |
44 | __field(struct drm_i915_gem_object *, obj) | 41 | __field(struct drm_i915_gem_object *, obj) |
45 | __field(u32, gtt_offset) | 42 | __field(u32, offset) |
43 | __field(u32, size) | ||
46 | __field(bool, mappable) | 44 | __field(bool, mappable) |
47 | ), | 45 | ), |
48 | 46 | ||
49 | TP_fast_assign( | 47 | TP_fast_assign( |
50 | __entry->obj = obj; | 48 | __entry->obj = obj; |
51 | __entry->gtt_offset = gtt_offset; | 49 | __entry->offset = obj->gtt_space->start; |
50 | __entry->size = obj->gtt_space->size; | ||
52 | __entry->mappable = mappable; | 51 | __entry->mappable = mappable; |
53 | ), | 52 | ), |
54 | 53 | ||
55 | TP_printk("obj=%p, gtt_offset=%08x%s", | 54 | TP_printk("obj=%p, offset=%08x size=%x%s", |
56 | __entry->obj, __entry->gtt_offset, | 55 | __entry->obj, __entry->offset, __entry->size, |
57 | __entry->mappable ? ", mappable" : "") | 56 | __entry->mappable ? ", mappable" : "") |
58 | ); | 57 | ); |
59 | 58 | ||
60 | TRACE_EVENT(i915_gem_object_change_domain, | 59 | TRACE_EVENT(i915_gem_object_unbind, |
60 | TP_PROTO(struct drm_i915_gem_object *obj), | ||
61 | TP_ARGS(obj), | ||
62 | |||
63 | TP_STRUCT__entry( | ||
64 | __field(struct drm_i915_gem_object *, obj) | ||
65 | __field(u32, offset) | ||
66 | __field(u32, size) | ||
67 | ), | ||
61 | 68 | ||
62 | TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | 69 | TP_fast_assign( |
70 | __entry->obj = obj; | ||
71 | __entry->offset = obj->gtt_space->start; | ||
72 | __entry->size = obj->gtt_space->size; | ||
73 | ), | ||
63 | 74 | ||
64 | TP_ARGS(obj, old_read_domains, old_write_domain), | 75 | TP_printk("obj=%p, offset=%08x size=%x", |
76 | __entry->obj, __entry->offset, __entry->size) | ||
77 | ); | ||
78 | |||
79 | TRACE_EVENT(i915_gem_object_change_domain, | ||
80 | TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), | ||
81 | TP_ARGS(obj, old_read, old_write), | ||
65 | 82 | ||
66 | TP_STRUCT__entry( | 83 | TP_STRUCT__entry( |
67 | __field(struct drm_i915_gem_object *, obj) | 84 | __field(struct drm_i915_gem_object *, obj) |
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain, | |||
71 | 88 | ||
72 | TP_fast_assign( | 89 | TP_fast_assign( |
73 | __entry->obj = obj; | 90 | __entry->obj = obj; |
74 | __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); | 91 | __entry->read_domains = obj->base.read_domains | (old_read << 16); |
75 | __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); | 92 | __entry->write_domain = obj->base.write_domain | (old_write << 16); |
76 | ), | 93 | ), |
77 | 94 | ||
78 | TP_printk("obj=%p, read=%04x, write=%04x", | 95 | TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", |
79 | __entry->obj, | 96 | __entry->obj, |
80 | __entry->read_domains, __entry->write_domain) | 97 | __entry->read_domains >> 16, |
98 | __entry->read_domains & 0xffff, | ||
99 | __entry->write_domain >> 16, | ||
100 | __entry->write_domain & 0xffff) | ||
81 | ); | 101 | ); |
82 | 102 | ||
83 | DECLARE_EVENT_CLASS(i915_gem_object, | 103 | TRACE_EVENT(i915_gem_object_pwrite, |
104 | TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), | ||
105 | TP_ARGS(obj, offset, len), | ||
84 | 106 | ||
85 | TP_PROTO(struct drm_i915_gem_object *obj), | 107 | TP_STRUCT__entry( |
108 | __field(struct drm_i915_gem_object *, obj) | ||
109 | __field(u32, offset) | ||
110 | __field(u32, len) | ||
111 | ), | ||
86 | 112 | ||
87 | TP_ARGS(obj), | 113 | TP_fast_assign( |
114 | __entry->obj = obj; | ||
115 | __entry->offset = offset; | ||
116 | __entry->len = len; | ||
117 | ), | ||
118 | |||
119 | TP_printk("obj=%p, offset=%u, len=%u", | ||
120 | __entry->obj, __entry->offset, __entry->len) | ||
121 | ); | ||
122 | |||
123 | TRACE_EVENT(i915_gem_object_pread, | ||
124 | TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), | ||
125 | TP_ARGS(obj, offset, len), | ||
88 | 126 | ||
89 | TP_STRUCT__entry( | 127 | TP_STRUCT__entry( |
90 | __field(struct drm_i915_gem_object *, obj) | 128 | __field(struct drm_i915_gem_object *, obj) |
129 | __field(u32, offset) | ||
130 | __field(u32, len) | ||
91 | ), | 131 | ), |
92 | 132 | ||
93 | TP_fast_assign( | 133 | TP_fast_assign( |
94 | __entry->obj = obj; | 134 | __entry->obj = obj; |
135 | __entry->offset = offset; | ||
136 | __entry->len = len; | ||
95 | ), | 137 | ), |
96 | 138 | ||
97 | TP_printk("obj=%p", __entry->obj) | 139 | TP_printk("obj=%p, offset=%u, len=%u", |
140 | __entry->obj, __entry->offset, __entry->len) | ||
98 | ); | 141 | ); |
99 | 142 | ||
100 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, | 143 | TRACE_EVENT(i915_gem_object_fault, |
144 | TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), | ||
145 | TP_ARGS(obj, index, gtt, write), | ||
146 | |||
147 | TP_STRUCT__entry( | ||
148 | __field(struct drm_i915_gem_object *, obj) | ||
149 | __field(u32, index) | ||
150 | __field(bool, gtt) | ||
151 | __field(bool, write) | ||
152 | ), | ||
153 | |||
154 | TP_fast_assign( | ||
155 | __entry->obj = obj; | ||
156 | __entry->index = index; | ||
157 | __entry->gtt = gtt; | ||
158 | __entry->write = write; | ||
159 | ), | ||
101 | 160 | ||
161 | TP_printk("obj=%p, %s index=%u %s", | ||
162 | __entry->obj, | ||
163 | __entry->gtt ? "GTT" : "CPU", | ||
164 | __entry->index, | ||
165 | __entry->write ? ", writable" : "") | ||
166 | ); | ||
167 | |||
168 | DECLARE_EVENT_CLASS(i915_gem_object, | ||
102 | TP_PROTO(struct drm_i915_gem_object *obj), | 169 | TP_PROTO(struct drm_i915_gem_object *obj), |
170 | TP_ARGS(obj), | ||
103 | 171 | ||
104 | TP_ARGS(obj) | 172 | TP_STRUCT__entry( |
173 | __field(struct drm_i915_gem_object *, obj) | ||
174 | ), | ||
175 | |||
176 | TP_fast_assign( | ||
177 | __entry->obj = obj; | ||
178 | ), | ||
179 | |||
180 | TP_printk("obj=%p", __entry->obj) | ||
105 | ); | 181 | ); |
106 | 182 | ||
107 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, | 183 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, |
184 | TP_PROTO(struct drm_i915_gem_object *obj), | ||
185 | TP_ARGS(obj) | ||
186 | ); | ||
108 | 187 | ||
188 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | ||
109 | TP_PROTO(struct drm_i915_gem_object *obj), | 189 | TP_PROTO(struct drm_i915_gem_object *obj), |
110 | |||
111 | TP_ARGS(obj) | 190 | TP_ARGS(obj) |
112 | ); | 191 | ); |
113 | 192 | ||
114 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | 193 | TRACE_EVENT(i915_gem_evict, |
194 | TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), | ||
195 | TP_ARGS(dev, size, align, mappable), | ||
115 | 196 | ||
116 | TP_PROTO(struct drm_i915_gem_object *obj), | 197 | TP_STRUCT__entry( |
198 | __field(u32, dev) | ||
199 | __field(u32, size) | ||
200 | __field(u32, align) | ||
201 | __field(bool, mappable) | ||
202 | ), | ||
117 | 203 | ||
118 | TP_ARGS(obj) | 204 | TP_fast_assign( |
205 | __entry->dev = dev->primary->index; | ||
206 | __entry->size = size; | ||
207 | __entry->align = align; | ||
208 | __entry->mappable = mappable; | ||
209 | ), | ||
210 | |||
211 | TP_printk("dev=%d, size=%d, align=%d %s", | ||
212 | __entry->dev, __entry->size, __entry->align, | ||
213 | __entry->mappable ? ", mappable" : "") | ||
119 | ); | 214 | ); |
120 | 215 | ||
121 | /* batch tracing */ | 216 | TRACE_EVENT(i915_gem_evict_everything, |
217 | TP_PROTO(struct drm_device *dev, bool purgeable), | ||
218 | TP_ARGS(dev, purgeable), | ||
122 | 219 | ||
123 | TRACE_EVENT(i915_gem_request_submit, | 220 | TP_STRUCT__entry( |
221 | __field(u32, dev) | ||
222 | __field(bool, purgeable) | ||
223 | ), | ||
224 | |||
225 | TP_fast_assign( | ||
226 | __entry->dev = dev->primary->index; | ||
227 | __entry->purgeable = purgeable; | ||
228 | ), | ||
124 | 229 | ||
125 | TP_PROTO(struct drm_device *dev, u32 seqno), | 230 | TP_printk("dev=%d%s", |
231 | __entry->dev, | ||
232 | __entry->purgeable ? ", purgeable only" : "") | ||
233 | ); | ||
126 | 234 | ||
127 | TP_ARGS(dev, seqno), | 235 | TRACE_EVENT(i915_gem_ring_dispatch, |
236 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||
237 | TP_ARGS(ring, seqno), | ||
128 | 238 | ||
129 | TP_STRUCT__entry( | 239 | TP_STRUCT__entry( |
130 | __field(u32, dev) | 240 | __field(u32, dev) |
241 | __field(u32, ring) | ||
131 | __field(u32, seqno) | 242 | __field(u32, seqno) |
132 | ), | 243 | ), |
133 | 244 | ||
134 | TP_fast_assign( | 245 | TP_fast_assign( |
135 | __entry->dev = dev->primary->index; | 246 | __entry->dev = ring->dev->primary->index; |
247 | __entry->ring = ring->id; | ||
136 | __entry->seqno = seqno; | 248 | __entry->seqno = seqno; |
137 | i915_trace_irq_get(dev, seqno); | 249 | i915_trace_irq_get(ring, seqno); |
138 | ), | 250 | ), |
139 | 251 | ||
140 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 252 | TP_printk("dev=%u, ring=%u, seqno=%u", |
253 | __entry->dev, __entry->ring, __entry->seqno) | ||
141 | ); | 254 | ); |
142 | 255 | ||
143 | TRACE_EVENT(i915_gem_request_flush, | 256 | TRACE_EVENT(i915_gem_ring_flush, |
144 | 257 | TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), | |
145 | TP_PROTO(struct drm_device *dev, u32 seqno, | 258 | TP_ARGS(ring, invalidate, flush), |
146 | u32 flush_domains, u32 invalidate_domains), | ||
147 | |||
148 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), | ||
149 | 259 | ||
150 | TP_STRUCT__entry( | 260 | TP_STRUCT__entry( |
151 | __field(u32, dev) | 261 | __field(u32, dev) |
152 | __field(u32, seqno) | 262 | __field(u32, ring) |
153 | __field(u32, flush_domains) | 263 | __field(u32, invalidate) |
154 | __field(u32, invalidate_domains) | 264 | __field(u32, flush) |
155 | ), | 265 | ), |
156 | 266 | ||
157 | TP_fast_assign( | 267 | TP_fast_assign( |
158 | __entry->dev = dev->primary->index; | 268 | __entry->dev = ring->dev->primary->index; |
159 | __entry->seqno = seqno; | 269 | __entry->ring = ring->id; |
160 | __entry->flush_domains = flush_domains; | 270 | __entry->invalidate = invalidate; |
161 | __entry->invalidate_domains = invalidate_domains; | 271 | __entry->flush = flush; |
162 | ), | 272 | ), |
163 | 273 | ||
164 | TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", | 274 | TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", |
165 | __entry->dev, __entry->seqno, | 275 | __entry->dev, __entry->ring, |
166 | __entry->flush_domains, __entry->invalidate_domains) | 276 | __entry->invalidate, __entry->flush) |
167 | ); | 277 | ); |
168 | 278 | ||
169 | DECLARE_EVENT_CLASS(i915_gem_request, | 279 | DECLARE_EVENT_CLASS(i915_gem_request, |
170 | 280 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
171 | TP_PROTO(struct drm_device *dev, u32 seqno), | 281 | TP_ARGS(ring, seqno), |
172 | |||
173 | TP_ARGS(dev, seqno), | ||
174 | 282 | ||
175 | TP_STRUCT__entry( | 283 | TP_STRUCT__entry( |
176 | __field(u32, dev) | 284 | __field(u32, dev) |
285 | __field(u32, ring) | ||
177 | __field(u32, seqno) | 286 | __field(u32, seqno) |
178 | ), | 287 | ), |
179 | 288 | ||
180 | TP_fast_assign( | 289 | TP_fast_assign( |
181 | __entry->dev = dev->primary->index; | 290 | __entry->dev = ring->dev->primary->index; |
291 | __entry->ring = ring->id; | ||
182 | __entry->seqno = seqno; | 292 | __entry->seqno = seqno; |
183 | ), | 293 | ), |
184 | 294 | ||
185 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 295 | TP_printk("dev=%u, ring=%u, seqno=%u", |
296 | __entry->dev, __entry->ring, __entry->seqno) | ||
186 | ); | 297 | ); |
187 | 298 | ||
188 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, | 299 | DEFINE_EVENT(i915_gem_request, i915_gem_request_add, |
189 | 300 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
190 | TP_PROTO(struct drm_device *dev, u32 seqno), | 301 | TP_ARGS(ring, seqno) |
302 | ); | ||
191 | 303 | ||
192 | TP_ARGS(dev, seqno) | 304 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, |
305 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||
306 | TP_ARGS(ring, seqno) | ||
193 | ); | 307 | ); |
194 | 308 | ||
195 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, | 309 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, |
196 | 310 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
197 | TP_PROTO(struct drm_device *dev, u32 seqno), | 311 | TP_ARGS(ring, seqno) |
198 | |||
199 | TP_ARGS(dev, seqno) | ||
200 | ); | 312 | ); |
201 | 313 | ||
202 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, | 314 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, |
203 | 315 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
204 | TP_PROTO(struct drm_device *dev, u32 seqno), | 316 | TP_ARGS(ring, seqno) |
205 | |||
206 | TP_ARGS(dev, seqno) | ||
207 | ); | 317 | ); |
208 | 318 | ||
209 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, | 319 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, |
210 | 320 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
211 | TP_PROTO(struct drm_device *dev, u32 seqno), | 321 | TP_ARGS(ring, seqno) |
212 | |||
213 | TP_ARGS(dev, seqno) | ||
214 | ); | 322 | ); |
215 | 323 | ||
216 | DECLARE_EVENT_CLASS(i915_ring, | 324 | DECLARE_EVENT_CLASS(i915_ring, |
217 | 325 | TP_PROTO(struct intel_ring_buffer *ring), | |
218 | TP_PROTO(struct drm_device *dev), | 326 | TP_ARGS(ring), |
219 | |||
220 | TP_ARGS(dev), | ||
221 | 327 | ||
222 | TP_STRUCT__entry( | 328 | TP_STRUCT__entry( |
223 | __field(u32, dev) | 329 | __field(u32, dev) |
330 | __field(u32, ring) | ||
224 | ), | 331 | ), |
225 | 332 | ||
226 | TP_fast_assign( | 333 | TP_fast_assign( |
227 | __entry->dev = dev->primary->index; | 334 | __entry->dev = ring->dev->primary->index; |
335 | __entry->ring = ring->id; | ||
228 | ), | 336 | ), |
229 | 337 | ||
230 | TP_printk("dev=%u", __entry->dev) | 338 | TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) |
231 | ); | 339 | ); |
232 | 340 | ||
233 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, | 341 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, |
234 | 342 | TP_PROTO(struct intel_ring_buffer *ring), | |
235 | TP_PROTO(struct drm_device *dev), | 343 | TP_ARGS(ring) |
236 | |||
237 | TP_ARGS(dev) | ||
238 | ); | 344 | ); |
239 | 345 | ||
240 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, | 346 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, |
241 | 347 | TP_PROTO(struct intel_ring_buffer *ring), | |
242 | TP_PROTO(struct drm_device *dev), | 348 | TP_ARGS(ring) |
243 | |||
244 | TP_ARGS(dev) | ||
245 | ); | 349 | ); |
246 | 350 | ||
247 | TRACE_EVENT(i915_flip_request, | 351 | TRACE_EVENT(i915_flip_request, |
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete, | |||
281 | ); | 385 | ); |
282 | 386 | ||
283 | TRACE_EVENT(i915_reg_rw, | 387 | TRACE_EVENT(i915_reg_rw, |
284 | TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), | 388 | TP_PROTO(bool write, u32 reg, u64 val, int len), |
285 | 389 | ||
286 | TP_ARGS(cmd, reg, val, len), | 390 | TP_ARGS(write, reg, val, len), |
287 | 391 | ||
288 | TP_STRUCT__entry( | 392 | TP_STRUCT__entry( |
289 | __field(int, cmd) | 393 | __field(u64, val) |
290 | __field(uint32_t, reg) | 394 | __field(u32, reg) |
291 | __field(uint64_t, val) | 395 | __field(u16, write) |
292 | __field(int, len) | 396 | __field(u16, len) |
293 | ), | 397 | ), |
294 | 398 | ||
295 | TP_fast_assign( | 399 | TP_fast_assign( |
296 | __entry->cmd = cmd; | 400 | __entry->val = (u64)val; |
297 | __entry->reg = reg; | 401 | __entry->reg = reg; |
298 | __entry->val = (uint64_t)val; | 402 | __entry->write = write; |
299 | __entry->len = len; | 403 | __entry->len = len; |
300 | ), | 404 | ), |
301 | 405 | ||
302 | TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", | 406 | TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", |
303 | __entry->cmd, __entry->reg, __entry->val, __entry->len) | 407 | __entry->write ? "write" : "read", |
408 | __entry->reg, __entry->len, | ||
409 | (u32)(__entry->val & 0xffffffff), | ||
410 | (u32)(__entry->val >> 32)) | ||
304 | ); | 411 | ); |
305 | 412 | ||
306 | #endif /* _I915_TRACE_H_ */ | 413 | #endif /* _I915_TRACE_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3fbb98b948d6..d2fdfd589c85 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -221,16 +221,15 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
221 | int ret; | 221 | int ret; |
222 | 222 | ||
223 | BUG_ON(overlay->last_flip_req); | 223 | BUG_ON(overlay->last_flip_req); |
224 | ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); | 224 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); |
225 | if (ret) { | 225 | if (ret) { |
226 | kfree(request); | 226 | kfree(request); |
227 | return ret; | 227 | return ret; |
228 | } | 228 | } |
229 | overlay->last_flip_req = request->seqno; | 229 | overlay->last_flip_req = request->seqno; |
230 | overlay->flip_tail = tail; | 230 | overlay->flip_tail = tail; |
231 | ret = i915_do_wait_request(dev, | 231 | ret = i915_wait_request(LP_RING(dev_priv), |
232 | overlay->last_flip_req, true, | 232 | overlay->last_flip_req, true); |
233 | LP_RING(dev_priv)); | ||
234 | if (ret) | 233 | if (ret) |
235 | return ret; | 234 | return ret; |
236 | 235 | ||
@@ -364,7 +363,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
364 | OUT_RING(flip_addr); | 363 | OUT_RING(flip_addr); |
365 | ADVANCE_LP_RING(); | 364 | ADVANCE_LP_RING(); |
366 | 365 | ||
367 | ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); | 366 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); |
368 | if (ret) { | 367 | if (ret) { |
369 | kfree(request); | 368 | kfree(request); |
370 | return ret; | 369 | return ret; |
@@ -453,8 +452,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | |||
453 | if (overlay->last_flip_req == 0) | 452 | if (overlay->last_flip_req == 0) |
454 | return 0; | 453 | return 0; |
455 | 454 | ||
456 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | 455 | ret = i915_wait_request(LP_RING(dev_priv), |
457 | interruptible, LP_RING(dev_priv)); | 456 | overlay->last_flip_req, interruptible); |
458 | if (ret) | 457 | if (ret) |
459 | return ret; | 458 | return ret; |
460 | 459 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 235d9c4b40ae..ec7175e0dcd8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
62 | u32 flush_domains) | 62 | u32 flush_domains) |
63 | { | 63 | { |
64 | struct drm_device *dev = ring->dev; | 64 | struct drm_device *dev = ring->dev; |
65 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
66 | u32 cmd; | 65 | u32 cmd; |
67 | int ret; | 66 | int ret; |
68 | 67 | ||
69 | #if WATCH_EXEC | ||
70 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | ||
71 | invalidate_domains, flush_domains); | ||
72 | #endif | ||
73 | |||
74 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, | ||
75 | invalidate_domains, flush_domains); | ||
76 | |||
77 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 68 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { |
78 | /* | 69 | /* |
79 | * read/write caches: | 70 | * read/write caches: |
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
122 | (IS_G4X(dev) || IS_GEN5(dev))) | 113 | (IS_G4X(dev) || IS_GEN5(dev))) |
123 | cmd |= MI_INVALIDATE_ISP; | 114 | cmd |= MI_INVALIDATE_ISP; |
124 | 115 | ||
125 | #if WATCH_EXEC | ||
126 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
127 | #endif | ||
128 | ret = intel_ring_begin(ring, 2); | 116 | ret = intel_ring_begin(ring, 2); |
129 | if (ret) | 117 | if (ret) |
130 | return ret; | 118 | return ret; |
@@ -714,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
714 | u32 offset, u32 len) | 702 | u32 offset, u32 len) |
715 | { | 703 | { |
716 | struct drm_device *dev = ring->dev; | 704 | struct drm_device *dev = ring->dev; |
717 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
718 | int ret; | 705 | int ret; |
719 | 706 | ||
720 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | ||
721 | |||
722 | if (IS_I830(dev) || IS_845G(dev)) { | 707 | if (IS_I830(dev) || IS_845G(dev)) { |
723 | ret = intel_ring_begin(ring, 4); | 708 | ret = intel_ring_begin(ring, 4); |
724 | if (ret) | 709 | if (ret) |
@@ -953,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
953 | return 0; | 938 | return 0; |
954 | } | 939 | } |
955 | 940 | ||
956 | trace_i915_ring_wait_begin (dev); | 941 | trace_i915_ring_wait_begin(ring); |
957 | end = jiffies + 3 * HZ; | 942 | end = jiffies + 3 * HZ; |
958 | do { | 943 | do { |
959 | ring->head = I915_READ_HEAD(ring); | 944 | ring->head = I915_READ_HEAD(ring); |
960 | ring->space = ring_space(ring); | 945 | ring->space = ring_space(ring); |
961 | if (ring->space >= n) { | 946 | if (ring->space >= n) { |
962 | trace_i915_ring_wait_end(dev); | 947 | trace_i915_ring_wait_end(ring); |
963 | return 0; | 948 | return 0; |
964 | } | 949 | } |
965 | 950 | ||
@@ -973,7 +958,7 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
973 | if (atomic_read(&dev_priv->mm.wedged)) | 958 | if (atomic_read(&dev_priv->mm.wedged)) |
974 | return -EAGAIN; | 959 | return -EAGAIN; |
975 | } while (!time_after(jiffies, end)); | 960 | } while (!time_after(jiffies, end)); |
976 | trace_i915_ring_wait_end (dev); | 961 | trace_i915_ring_wait_end(ring); |
977 | return -EBUSY; | 962 | return -EBUSY; |
978 | } | 963 | } |
979 | 964 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5e14b09f67ce..bd6a5fbfa929 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -58,6 +58,7 @@ struct intel_ring_buffer { | |||
58 | u32 irq_refcount; | 58 | u32 irq_refcount; |
59 | u32 irq_mask; | 59 | u32 irq_mask; |
60 | u32 irq_seqno; /* last seq seem at irq time */ | 60 | u32 irq_seqno; /* last seq seem at irq time */ |
61 | u32 trace_irq_seqno; | ||
61 | u32 waiting_seqno; | 62 | u32 waiting_seqno; |
62 | u32 sync_seqno[I915_NUM_RINGS-1]; | 63 | u32 sync_seqno[I915_NUM_RINGS-1]; |
63 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); | 64 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
@@ -186,6 +187,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); | |||
186 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | 187 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
187 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | 188 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
188 | 189 | ||
190 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | ||
191 | { | ||
192 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | ||
193 | ring->trace_irq_seqno = seqno; | ||
194 | } | ||
195 | |||
189 | /* DRI warts */ | 196 | /* DRI warts */ |
190 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | 197 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
191 | 198 | ||