aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c119
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c9
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h315
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c11
6 files changed, 447 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5269dfa5f620..fa7b9be096bc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 i915_trace_points.o \
12 intel_display.o \ 13 intel_display.o \
13 intel_crt.o \ 14 intel_crt.o \
14 intel_lvds.o \ 15 intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 59826c5b8760..ae7ec0390024 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,7 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h"
36 37
37/* Really want an OS-independent resettable timer. Would like to have 38/* Really want an OS-independent resettable timer. Would like to have
38 * this loop run for (eg) 3 sec, but have the timer reset every time 39 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -49,14 +50,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
49 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 50 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
50 int i; 51 int i;
51 52
53 trace_i915_ring_wait_begin (dev);
54
52 for (i = 0; i < 100000; i++) { 55 for (i = 0; i < 100000; i++) {
53 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 56 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
54 acthd = I915_READ(acthd_reg); 57 acthd = I915_READ(acthd_reg);
55 ring->space = ring->head - (ring->tail + 8); 58 ring->space = ring->head - (ring->tail + 8);
56 if (ring->space < 0) 59 if (ring->space < 0)
57 ring->space += ring->Size; 60 ring->space += ring->Size;
58 if (ring->space >= n) 61 if (ring->space >= n) {
62 trace_i915_ring_wait_end (dev);
59 return 0; 63 return 0;
64 }
60 65
61 if (dev->primary->master) { 66 if (dev->primary->master) {
62 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 67 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -76,6 +81,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
76 81
77 } 82 }
78 83
84 trace_i915_ring_wait_end (dev);
79 return -EBUSY; 85 return -EBUSY;
80} 86}
81 87
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dea9ac069851..67e2cd5636ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@
29#include "drm.h" 29#include "drm.h"
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h"
32#include "intel_drv.h" 33#include "intel_drv.h"
33#include <linux/swap.h> 34#include <linux/swap.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
@@ -1618,8 +1619,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1618 1619
1619 if ((obj->write_domain & flush_domains) == 1620 if ((obj->write_domain & flush_domains) ==
1620 obj->write_domain) { 1621 obj->write_domain) {
1622 uint32_t old_write_domain = obj->write_domain;
1623
1621 obj->write_domain = 0; 1624 obj->write_domain = 0;
1622 i915_gem_object_move_to_active(obj, seqno); 1625 i915_gem_object_move_to_active(obj, seqno);
1626
1627 trace_i915_gem_object_change_domain(obj,
1628 obj->read_domains,
1629 old_write_domain);
1623 } 1630 }
1624 } 1631 }
1625 1632
@@ -1667,6 +1674,8 @@ i915_gem_retire_request(struct drm_device *dev,
1667{ 1674{
1668 drm_i915_private_t *dev_priv = dev->dev_private; 1675 drm_i915_private_t *dev_priv = dev->dev_private;
1669 1676
1677 trace_i915_gem_request_retire(dev, request->seqno);
1678
1670 /* Move any buffers on the active list that are no longer referenced 1679 /* Move any buffers on the active list that are no longer referenced
1671 * by the ringbuffer to the flushing/inactive lists as appropriate. 1680 * by the ringbuffer to the flushing/inactive lists as appropriate.
1672 */ 1681 */
@@ -1810,6 +1819,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1810 i915_driver_irq_postinstall(dev); 1819 i915_driver_irq_postinstall(dev);
1811 } 1820 }
1812 1821
1822 trace_i915_gem_request_wait_begin(dev, seqno);
1823
1813 dev_priv->mm.waiting_gem_seqno = seqno; 1824 dev_priv->mm.waiting_gem_seqno = seqno;
1814 i915_user_irq_get(dev); 1825 i915_user_irq_get(dev);
1815 ret = wait_event_interruptible(dev_priv->irq_queue, 1826 ret = wait_event_interruptible(dev_priv->irq_queue,
@@ -1818,6 +1829,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1818 atomic_read(&dev_priv->mm.wedged)); 1829 atomic_read(&dev_priv->mm.wedged));
1819 i915_user_irq_put(dev); 1830 i915_user_irq_put(dev);
1820 dev_priv->mm.waiting_gem_seqno = 0; 1831 dev_priv->mm.waiting_gem_seqno = 0;
1832
1833 trace_i915_gem_request_wait_end(dev, seqno);
1821 } 1834 }
1822 if (atomic_read(&dev_priv->mm.wedged)) 1835 if (atomic_read(&dev_priv->mm.wedged))
1823 ret = -EIO; 1836 ret = -EIO;
@@ -1850,6 +1863,8 @@ i915_gem_flush(struct drm_device *dev,
1850 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 1863 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1851 invalidate_domains, flush_domains); 1864 invalidate_domains, flush_domains);
1852#endif 1865#endif
1866 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1867 invalidate_domains, flush_domains);
1853 1868
1854 if (flush_domains & I915_GEM_DOMAIN_CPU) 1869 if (flush_domains & I915_GEM_DOMAIN_CPU)
1855 drm_agp_chipset_flush(dev); 1870 drm_agp_chipset_flush(dev);
@@ -2003,6 +2018,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2003 if (!list_empty(&obj_priv->list)) 2018 if (!list_empty(&obj_priv->list))
2004 list_del_init(&obj_priv->list); 2019 list_del_init(&obj_priv->list);
2005 2020
2021 trace_i915_gem_object_unbind(obj);
2022
2006 return 0; 2023 return 0;
2007} 2024}
2008 2025
@@ -2452,6 +2469,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2452 else 2469 else
2453 i830_write_fence_reg(reg); 2470 i830_write_fence_reg(reg);
2454 2471
2472 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2473
2455 return 0; 2474 return 0;
2456} 2475}
2457 2476
@@ -2650,6 +2669,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2650 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2669 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2651 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2670 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2652 2671
2672 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2673
2653 return 0; 2674 return 0;
2654} 2675}
2655 2676
@@ -2665,6 +2686,8 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2665 if (obj_priv->pages == NULL) 2686 if (obj_priv->pages == NULL)
2666 return; 2687 return;
2667 2688
2689 trace_i915_gem_object_clflush(obj);
2690
2668 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2691 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2669} 2692}
2670 2693
@@ -2674,21 +2697,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2674{ 2697{
2675 struct drm_device *dev = obj->dev; 2698 struct drm_device *dev = obj->dev;
2676 uint32_t seqno; 2699 uint32_t seqno;
2700 uint32_t old_write_domain;
2677 2701
2678 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2702 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2679 return; 2703 return;
2680 2704
2681 /* Queue the GPU write cache flushing we need. */ 2705 /* Queue the GPU write cache flushing we need. */
2706 old_write_domain = obj->write_domain;
2682 i915_gem_flush(dev, 0, obj->write_domain); 2707 i915_gem_flush(dev, 0, obj->write_domain);
2683 seqno = i915_add_request(dev, NULL, obj->write_domain); 2708 seqno = i915_add_request(dev, NULL, obj->write_domain);
2684 obj->write_domain = 0; 2709 obj->write_domain = 0;
2685 i915_gem_object_move_to_active(obj, seqno); 2710 i915_gem_object_move_to_active(obj, seqno);
2711
2712 trace_i915_gem_object_change_domain(obj,
2713 obj->read_domains,
2714 old_write_domain);
2686} 2715}
2687 2716
2688/** Flushes the GTT write domain for the object if it's dirty. */ 2717/** Flushes the GTT write domain for the object if it's dirty. */
2689static void 2718static void
2690i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 2719i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2691{ 2720{
2721 uint32_t old_write_domain;
2722
2692 if (obj->write_domain != I915_GEM_DOMAIN_GTT) 2723 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2693 return; 2724 return;
2694 2725
@@ -2696,7 +2727,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2696 * to it immediately go to main memory as far as we know, so there's 2727 * to it immediately go to main memory as far as we know, so there's
2697 * no chipset flush. It also doesn't land in render cache. 2728 * no chipset flush. It also doesn't land in render cache.
2698 */ 2729 */
2730 old_write_domain = obj->write_domain;
2699 obj->write_domain = 0; 2731 obj->write_domain = 0;
2732
2733 trace_i915_gem_object_change_domain(obj,
2734 obj->read_domains,
2735 old_write_domain);
2700} 2736}
2701 2737
2702/** Flushes the CPU write domain for the object if it's dirty. */ 2738/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2704,13 +2740,19 @@ static void
2704i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 2740i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2705{ 2741{
2706 struct drm_device *dev = obj->dev; 2742 struct drm_device *dev = obj->dev;
2743 uint32_t old_write_domain;
2707 2744
2708 if (obj->write_domain != I915_GEM_DOMAIN_CPU) 2745 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2709 return; 2746 return;
2710 2747
2711 i915_gem_clflush_object(obj); 2748 i915_gem_clflush_object(obj);
2712 drm_agp_chipset_flush(dev); 2749 drm_agp_chipset_flush(dev);
2750 old_write_domain = obj->write_domain;
2713 obj->write_domain = 0; 2751 obj->write_domain = 0;
2752
2753 trace_i915_gem_object_change_domain(obj,
2754 obj->read_domains,
2755 old_write_domain);
2714} 2756}
2715 2757
2716/** 2758/**
@@ -2723,6 +2765,7 @@ int
2723i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2765i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2724{ 2766{
2725 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2767 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2768 uint32_t old_write_domain, old_read_domains;
2726 int ret; 2769 int ret;
2727 2770
2728 /* Not valid to be called on unbound objects. */ 2771 /* Not valid to be called on unbound objects. */
@@ -2735,6 +2778,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2735 if (ret != 0) 2778 if (ret != 0)
2736 return ret; 2779 return ret;
2737 2780
2781 old_write_domain = obj->write_domain;
2782 old_read_domains = obj->read_domains;
2783
2738 /* If we're writing through the GTT domain, then CPU and GPU caches 2784 /* If we're writing through the GTT domain, then CPU and GPU caches
2739 * will need to be invalidated at next use. 2785 * will need to be invalidated at next use.
2740 */ 2786 */
@@ -2753,6 +2799,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2753 obj_priv->dirty = 1; 2799 obj_priv->dirty = 1;
2754 } 2800 }
2755 2801
2802 trace_i915_gem_object_change_domain(obj,
2803 old_read_domains,
2804 old_write_domain);
2805
2756 return 0; 2806 return 0;
2757} 2807}
2758 2808
@@ -2765,6 +2815,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2765static int 2815static int
2766i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2816i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2767{ 2817{
2818 uint32_t old_write_domain, old_read_domains;
2768 int ret; 2819 int ret;
2769 2820
2770 i915_gem_object_flush_gpu_write_domain(obj); 2821 i915_gem_object_flush_gpu_write_domain(obj);
@@ -2780,6 +2831,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2780 */ 2831 */
2781 i915_gem_object_set_to_full_cpu_read_domain(obj); 2832 i915_gem_object_set_to_full_cpu_read_domain(obj);
2782 2833
2834 old_write_domain = obj->write_domain;
2835 old_read_domains = obj->read_domains;
2836
2783 /* Flush the CPU cache if it's still invalid. */ 2837 /* Flush the CPU cache if it's still invalid. */
2784 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2838 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2785 i915_gem_clflush_object(obj); 2839 i915_gem_clflush_object(obj);
@@ -2800,6 +2854,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2800 obj->write_domain = I915_GEM_DOMAIN_CPU; 2854 obj->write_domain = I915_GEM_DOMAIN_CPU;
2801 } 2855 }
2802 2856
2857 trace_i915_gem_object_change_domain(obj,
2858 old_read_domains,
2859 old_write_domain);
2860
2803 return 0; 2861 return 0;
2804} 2862}
2805 2863
@@ -2921,6 +2979,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2921 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2979 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2922 uint32_t invalidate_domains = 0; 2980 uint32_t invalidate_domains = 0;
2923 uint32_t flush_domains = 0; 2981 uint32_t flush_domains = 0;
2982 uint32_t old_read_domains;
2924 2983
2925 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); 2984 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2926 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); 2985 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
@@ -2967,6 +3026,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2967 i915_gem_clflush_object(obj); 3026 i915_gem_clflush_object(obj);
2968 } 3027 }
2969 3028
3029 old_read_domains = obj->read_domains;
3030
2970 /* The actual obj->write_domain will be updated with 3031 /* The actual obj->write_domain will be updated with
2971 * pending_write_domain after we emit the accumulated flush for all 3032 * pending_write_domain after we emit the accumulated flush for all
2972 * of our domain changes in execbuffers (which clears objects' 3033 * of our domain changes in execbuffers (which clears objects'
@@ -2985,6 +3046,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2985 obj->read_domains, obj->write_domain, 3046 obj->read_domains, obj->write_domain,
2986 dev->invalidate_domains, dev->flush_domains); 3047 dev->invalidate_domains, dev->flush_domains);
2987#endif 3048#endif
3049
3050 trace_i915_gem_object_change_domain(obj,
3051 old_read_domains,
3052 obj->write_domain);
2988} 3053}
2989 3054
2990/** 3055/**
@@ -3037,6 +3102,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3037 uint64_t offset, uint64_t size) 3102 uint64_t offset, uint64_t size)
3038{ 3103{
3039 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3104 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3105 uint32_t old_read_domains;
3040 int i, ret; 3106 int i, ret;
3041 3107
3042 if (offset == 0 && size == obj->size) 3108 if (offset == 0 && size == obj->size)
@@ -3083,8 +3149,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3083 */ 3149 */
3084 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3150 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3085 3151
3152 old_read_domains = obj->read_domains;
3086 obj->read_domains |= I915_GEM_DOMAIN_CPU; 3153 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3087 3154
3155 trace_i915_gem_object_change_domain(obj,
3156 old_read_domains,
3157 obj->write_domain);
3158
3088 return 0; 3159 return 0;
3089} 3160}
3090 3161
@@ -3282,6 +3353,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3282 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3353 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3283 exec_len = (uint32_t) exec->batch_len; 3354 exec_len = (uint32_t) exec->batch_len;
3284 3355
3356 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
3357
3285 count = nbox ? nbox : 1; 3358 count = nbox ? nbox : 1;
3286 3359
3287 for (i = 0; i < count; i++) { 3360 for (i = 0; i < count; i++) {
@@ -3660,8 +3733,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3660 3733
3661 for (i = 0; i < args->buffer_count; i++) { 3734 for (i = 0; i < args->buffer_count; i++) {
3662 struct drm_gem_object *obj = object_list[i]; 3735 struct drm_gem_object *obj = object_list[i];
3736 uint32_t old_write_domain = obj->write_domain;
3663 3737
3664 obj->write_domain = obj->pending_write_domain; 3738 obj->write_domain = obj->pending_write_domain;
3739 trace_i915_gem_object_change_domain(obj,
3740 obj->read_domains,
3741 old_write_domain);
3665 } 3742 }
3666 3743
3667 i915_verify_inactive(dev, __FILE__, __LINE__); 3744 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4050,6 +4127,8 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4050 INIT_LIST_HEAD(&obj_priv->fence_list); 4127 INIT_LIST_HEAD(&obj_priv->fence_list);
4051 obj_priv->madv = I915_MADV_WILLNEED; 4128 obj_priv->madv = I915_MADV_WILLNEED;
4052 4129
4130 trace_i915_gem_object_create(obj);
4131
4053 return 0; 4132 return 0;
4054} 4133}
4055 4134
@@ -4058,6 +4137,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4058 struct drm_device *dev = obj->dev; 4137 struct drm_device *dev = obj->dev;
4059 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4138 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4060 4139
4140 trace_i915_gem_object_destroy(obj);
4141
4061 while (obj_priv->pin_count > 0) 4142 while (obj_priv->pin_count > 0)
4062 i915_gem_object_unpin(obj); 4143 i915_gem_object_unpin(obj);
4063 4144
@@ -4186,24 +4267,36 @@ i915_gem_idle(struct drm_device *dev)
4186 * the GPU domains and just stuff them onto inactive. 4267 * the GPU domains and just stuff them onto inactive.
4187 */ 4268 */
4188 while (!list_empty(&dev_priv->mm.active_list)) { 4269 while (!list_empty(&dev_priv->mm.active_list)) {
4189 struct drm_i915_gem_object *obj_priv; 4270 struct drm_gem_object *obj;
4271 uint32_t old_write_domain;
4190 4272
4191 obj_priv = list_first_entry(&dev_priv->mm.active_list, 4273 obj = list_first_entry(&dev_priv->mm.active_list,
4192 struct drm_i915_gem_object, 4274 struct drm_i915_gem_object,
4193 list); 4275 list)->obj;
4194 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4276 old_write_domain = obj->write_domain;
4195 i915_gem_object_move_to_inactive(obj_priv->obj); 4277 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4278 i915_gem_object_move_to_inactive(obj);
4279
4280 trace_i915_gem_object_change_domain(obj,
4281 obj->read_domains,
4282 old_write_domain);
4196 } 4283 }
4197 spin_unlock(&dev_priv->mm.active_list_lock); 4284 spin_unlock(&dev_priv->mm.active_list_lock);
4198 4285
4199 while (!list_empty(&dev_priv->mm.flushing_list)) { 4286 while (!list_empty(&dev_priv->mm.flushing_list)) {
4200 struct drm_i915_gem_object *obj_priv; 4287 struct drm_gem_object *obj;
4201 4288 uint32_t old_write_domain;
4202 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 4289
4203 struct drm_i915_gem_object, 4290 obj = list_first_entry(&dev_priv->mm.flushing_list,
4204 list); 4291 struct drm_i915_gem_object,
4205 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 4292 list)->obj;
4206 i915_gem_object_move_to_inactive(obj_priv->obj); 4293 old_write_domain = obj->write_domain;
4294 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4295 i915_gem_object_move_to_inactive(obj);
4296
4297 trace_i915_gem_object_change_domain(obj,
4298 obj->read_domains,
4299 old_write_domain);
4207 } 4300 }
4208 4301
4209 4302
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 13e664ddb611..4dfeec7cdd42 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@
31#include "drm.h" 31#include "drm.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34#include "i915_trace.h"
34#include "intel_drv.h" 35#include "intel_drv.h"
35 36
36#define MAX_NOPID ((u32)~0) 37#define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
279 } 280 }
280 281
281 if (gt_iir & GT_USER_INTERRUPT) { 282 if (gt_iir & GT_USER_INTERRUPT) {
282 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 283 u32 seqno = i915_get_gem_seqno(dev);
284 dev_priv->mm.irq_gem_seqno = seqno;
285 trace_i915_gem_request_complete(dev, seqno);
283 DRM_WAKEUP(&dev_priv->irq_queue); 286 DRM_WAKEUP(&dev_priv->irq_queue);
284 } 287 }
285 288
@@ -622,7 +625,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
622 } 625 }
623 626
624 if (iir & I915_USER_INTERRUPT) { 627 if (iir & I915_USER_INTERRUPT) {
625 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 628 u32 seqno = i915_get_gem_seqno(dev);
629 dev_priv->mm.irq_gem_seqno = seqno;
630 trace_i915_gem_request_complete(dev, seqno);
626 DRM_WAKEUP(&dev_priv->irq_queue); 631 DRM_WAKEUP(&dev_priv->irq_queue);
627 dev_priv->hangcheck_count = 0; 632 dev_priv->hangcheck_count = 0;
628 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 633 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 000000000000..5567a40816f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,315 @@
1#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2#define _I915_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915
12#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
13#define TRACE_INCLUDE_FILE i915_trace
14
15/* object tracking */
16
17TRACE_EVENT(i915_gem_object_create,
18
19 TP_PROTO(struct drm_gem_object *obj),
20
21 TP_ARGS(obj),
22
23 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj)
25 __field(u32, size)
26 ),
27
28 TP_fast_assign(
29 __entry->obj = obj;
30 __entry->size = obj->size;
31 ),
32
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34);
35
36TRACE_EVENT(i915_gem_object_bind,
37
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
39
40 TP_ARGS(obj, gtt_offset),
41
42 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj)
44 __field(u32, gtt_offset)
45 ),
46
47 TP_fast_assign(
48 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset;
50 ),
51
52 TP_printk("obj=%p, gtt_offset=%08x",
53 __entry->obj, __entry->gtt_offset)
54);
55
56TRACE_EVENT(i915_gem_object_clflush,
57
58 TP_PROTO(struct drm_gem_object *obj),
59
60 TP_ARGS(obj),
61
62 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj)
64 ),
65
66 TP_fast_assign(
67 __entry->obj = obj;
68 ),
69
70 TP_printk("obj=%p", __entry->obj)
71);
72
73TRACE_EVENT(i915_gem_object_change_domain,
74
75 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
76
77 TP_ARGS(obj, old_read_domains, old_write_domain),
78
79 TP_STRUCT__entry(
80 __field(struct drm_gem_object *, obj)
81 __field(u32, read_domains)
82 __field(u32, write_domain)
83 ),
84
85 TP_fast_assign(
86 __entry->obj = obj;
87 __entry->read_domains = obj->read_domains | (old_read_domains << 16);
88 __entry->write_domain = obj->write_domain | (old_write_domain << 16);
89 ),
90
91 TP_printk("obj=%p, read=%04x, write=%04x",
92 __entry->obj,
93 __entry->read_domains, __entry->write_domain)
94);
95
96TRACE_EVENT(i915_gem_object_get_fence,
97
98 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
99
100 TP_ARGS(obj, fence, tiling_mode),
101
102 TP_STRUCT__entry(
103 __field(struct drm_gem_object *, obj)
104 __field(int, fence)
105 __field(int, tiling_mode)
106 ),
107
108 TP_fast_assign(
109 __entry->obj = obj;
110 __entry->fence = fence;
111 __entry->tiling_mode = tiling_mode;
112 ),
113
114 TP_printk("obj=%p, fence=%d, tiling=%d",
115 __entry->obj, __entry->fence, __entry->tiling_mode)
116);
117
118TRACE_EVENT(i915_gem_object_unbind,
119
120 TP_PROTO(struct drm_gem_object *obj),
121
122 TP_ARGS(obj),
123
124 TP_STRUCT__entry(
125 __field(struct drm_gem_object *, obj)
126 ),
127
128 TP_fast_assign(
129 __entry->obj = obj;
130 ),
131
132 TP_printk("obj=%p", __entry->obj)
133);
134
135TRACE_EVENT(i915_gem_object_destroy,
136
137 TP_PROTO(struct drm_gem_object *obj),
138
139 TP_ARGS(obj),
140
141 TP_STRUCT__entry(
142 __field(struct drm_gem_object *, obj)
143 ),
144
145 TP_fast_assign(
146 __entry->obj = obj;
147 ),
148
149 TP_printk("obj=%p", __entry->obj)
150);
151
152/* batch tracing */
153
154TRACE_EVENT(i915_gem_request_submit,
155
156 TP_PROTO(struct drm_device *dev, u32 seqno),
157
158 TP_ARGS(dev, seqno),
159
160 TP_STRUCT__entry(
161 __field(struct drm_device *, dev)
162 __field(u32, seqno)
163 ),
164
165 TP_fast_assign(
166 __entry->dev = dev;
167 __entry->seqno = seqno;
168 ),
169
170 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
171);
172
173TRACE_EVENT(i915_gem_request_flush,
174
175 TP_PROTO(struct drm_device *dev, u32 seqno,
176 u32 flush_domains, u32 invalidate_domains),
177
178 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
179
180 TP_STRUCT__entry(
181 __field(struct drm_device *, dev)
182 __field(u32, seqno)
183 __field(u32, flush_domains)
184 __field(u32, invalidate_domains)
185 ),
186
187 TP_fast_assign(
188 __entry->dev = dev;
189 __entry->seqno = seqno;
190 __entry->flush_domains = flush_domains;
191 __entry->invalidate_domains = invalidate_domains;
192 ),
193
194 TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
195 __entry->dev, __entry->seqno,
196 __entry->flush_domains, __entry->invalidate_domains)
197);
198
199
200TRACE_EVENT(i915_gem_request_complete,
201
202 TP_PROTO(struct drm_device *dev, u32 seqno),
203
204 TP_ARGS(dev, seqno),
205
206 TP_STRUCT__entry(
207 __field(struct drm_device *, dev)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev;
213 __entry->seqno = seqno;
214 ),
215
216 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
217);
218
219TRACE_EVENT(i915_gem_request_retire,
220
221 TP_PROTO(struct drm_device *dev, u32 seqno),
222
223 TP_ARGS(dev, seqno),
224
225 TP_STRUCT__entry(
226 __field(struct drm_device *, dev)
227 __field(u32, seqno)
228 ),
229
230 TP_fast_assign(
231 __entry->dev = dev;
232 __entry->seqno = seqno;
233 ),
234
235 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
236);
237
238TRACE_EVENT(i915_gem_request_wait_begin,
239
240 TP_PROTO(struct drm_device *dev, u32 seqno),
241
242 TP_ARGS(dev, seqno),
243
244 TP_STRUCT__entry(
245 __field(struct drm_device *, dev)
246 __field(u32, seqno)
247 ),
248
249 TP_fast_assign(
250 __entry->dev = dev;
251 __entry->seqno = seqno;
252 ),
253
254 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
255);
256
257TRACE_EVENT(i915_gem_request_wait_end,
258
259 TP_PROTO(struct drm_device *dev, u32 seqno),
260
261 TP_ARGS(dev, seqno),
262
263 TP_STRUCT__entry(
264 __field(struct drm_device *, dev)
265 __field(u32, seqno)
266 ),
267
268 TP_fast_assign(
269 __entry->dev = dev;
270 __entry->seqno = seqno;
271 ),
272
273 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
274);
275
276TRACE_EVENT(i915_ring_wait_begin,
277
278 TP_PROTO(struct drm_device *dev),
279
280 TP_ARGS(dev),
281
282 TP_STRUCT__entry(
283 __field(struct drm_device *, dev)
284 ),
285
286 TP_fast_assign(
287 __entry->dev = dev;
288 ),
289
290 TP_printk("dev=%p", __entry->dev)
291);
292
293TRACE_EVENT(i915_ring_wait_end,
294
295 TP_PROTO(struct drm_device *dev),
296
297 TP_ARGS(dev),
298
299 TP_STRUCT__entry(
300 __field(struct drm_device *, dev)
301 ),
302
303 TP_fast_assign(
304 __entry->dev = dev;
305 ),
306
307 TP_printk("dev=%p", __entry->dev)
308);
309
310#endif /* _I915_TRACE_H_ */
311
312/* This part must be outside protection */
313#undef TRACE_INCLUDE_PATH
314#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
315#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 000000000000..ead876eb6ea0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,11 @@
1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * Authors:
5 * Chris Wilson <chris@chris-wilson.co.uk>
6 */
7
8#include "i915_drv.h"
9
10#define CREATE_TRACE_POINTS
11#include "i915_trace.h"