diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-07-16 17:05:07 -0400 |
---|---|---|
committer | John Stultz <john.stultz@linaro.org> | 2014-07-23 18:01:50 -0400 |
commit | f166e6dcb7225c4193bcda68c9346583ed78b186 (patch) | |
tree | aa9bbe498428fc8b8b8def7dfcd269630603853f | |
parent | 5ed0bdf21a85d78e04f89f15ccf227562177cbd9 (diff) |
drm: vmwgfx: Use nsec based interfaces
No point in converting timespecs back and forth.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_marker.c | 44 |
2 files changed, 16 insertions, 32 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6b252a887ae2..c886c024c637 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -159,8 +159,8 @@ struct vmw_surface { | |||
159 | 159 | ||
160 | struct vmw_marker_queue { | 160 | struct vmw_marker_queue { |
161 | struct list_head head; | 161 | struct list_head head; |
162 | struct timespec lag; | 162 | u64 lag; |
163 | struct timespec lag_time; | 163 | u64 lag_time; |
164 | spinlock_t lock; | 164 | spinlock_t lock; |
165 | }; | 165 | }; |
166 | 166 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c index 8a8725c2716c..efd1ffd68185 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c | |||
@@ -31,14 +31,14 @@ | |||
31 | struct vmw_marker { | 31 | struct vmw_marker { |
32 | struct list_head head; | 32 | struct list_head head; |
33 | uint32_t seqno; | 33 | uint32_t seqno; |
34 | struct timespec submitted; | 34 | u64 submitted; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | void vmw_marker_queue_init(struct vmw_marker_queue *queue) | 37 | void vmw_marker_queue_init(struct vmw_marker_queue *queue) |
38 | { | 38 | { |
39 | INIT_LIST_HEAD(&queue->head); | 39 | INIT_LIST_HEAD(&queue->head); |
40 | queue->lag = ns_to_timespec(0); | 40 | queue->lag = 0; |
41 | getrawmonotonic(&queue->lag_time); | 41 | queue->lag_time = ktime_get_raw_ns(); |
42 | spin_lock_init(&queue->lock); | 42 | spin_lock_init(&queue->lock); |
43 | } | 43 | } |
44 | 44 | ||
@@ -62,7 +62,7 @@ int vmw_marker_push(struct vmw_marker_queue *queue, | |||
62 | return -ENOMEM; | 62 | return -ENOMEM; |
63 | 63 | ||
64 | marker->seqno = seqno; | 64 | marker->seqno = seqno; |
65 | getrawmonotonic(&marker->submitted); | 65 | marker->submitted = ktime_get_raw_ns(); |
66 | spin_lock(&queue->lock); | 66 | spin_lock(&queue->lock); |
67 | list_add_tail(&marker->head, &queue->head); | 67 | list_add_tail(&marker->head, &queue->head); |
68 | spin_unlock(&queue->lock); | 68 | spin_unlock(&queue->lock); |
@@ -74,14 +74,14 @@ int vmw_marker_pull(struct vmw_marker_queue *queue, | |||
74 | uint32_t signaled_seqno) | 74 | uint32_t signaled_seqno) |
75 | { | 75 | { |
76 | struct vmw_marker *marker, *next; | 76 | struct vmw_marker *marker, *next; |
77 | struct timespec now; | ||
78 | bool updated = false; | 77 | bool updated = false; |
78 | u64 now; | ||
79 | 79 | ||
80 | spin_lock(&queue->lock); | 80 | spin_lock(&queue->lock); |
81 | getrawmonotonic(&now); | 81 | now = ktime_get_raw_ns(); |
82 | 82 | ||
83 | if (list_empty(&queue->head)) { | 83 | if (list_empty(&queue->head)) { |
84 | queue->lag = ns_to_timespec(0); | 84 | queue->lag = 0; |
85 | queue->lag_time = now; | 85 | queue->lag_time = now; |
86 | updated = true; | 86 | updated = true; |
87 | goto out_unlock; | 87 | goto out_unlock; |
@@ -91,7 +91,7 @@ int vmw_marker_pull(struct vmw_marker_queue *queue, | |||
91 | if (signaled_seqno - marker->seqno > (1 << 30)) | 91 | if (signaled_seqno - marker->seqno > (1 << 30)) |
92 | continue; | 92 | continue; |
93 | 93 | ||
94 | queue->lag = timespec_sub(now, marker->submitted); | 94 | queue->lag = now - marker->submitted; |
95 | queue->lag_time = now; | 95 | queue->lag_time = now; |
96 | updated = true; | 96 | updated = true; |
97 | list_del(&marker->head); | 97 | list_del(&marker->head); |
@@ -104,27 +104,13 @@ out_unlock: | |||
104 | return (updated) ? 0 : -EBUSY; | 104 | return (updated) ? 0 : -EBUSY; |
105 | } | 105 | } |
106 | 106 | ||
107 | static struct timespec vmw_timespec_add(struct timespec t1, | 107 | static u64 vmw_fifo_lag(struct vmw_marker_queue *queue) |
108 | struct timespec t2) | ||
109 | { | 108 | { |
110 | t1.tv_sec += t2.tv_sec; | 109 | u64 now; |
111 | t1.tv_nsec += t2.tv_nsec; | ||
112 | if (t1.tv_nsec >= 1000000000L) { | ||
113 | t1.tv_sec += 1; | ||
114 | t1.tv_nsec -= 1000000000L; | ||
115 | } | ||
116 | |||
117 | return t1; | ||
118 | } | ||
119 | |||
120 | static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue) | ||
121 | { | ||
122 | struct timespec now; | ||
123 | 110 | ||
124 | spin_lock(&queue->lock); | 111 | spin_lock(&queue->lock); |
125 | getrawmonotonic(&now); | 112 | now = ktime_get_raw_ns(); |
126 | queue->lag = vmw_timespec_add(queue->lag, | 113 | queue->lag += now - queue->lag_time; |
127 | timespec_sub(now, queue->lag_time)); | ||
128 | queue->lag_time = now; | 114 | queue->lag_time = now; |
129 | spin_unlock(&queue->lock); | 115 | spin_unlock(&queue->lock); |
130 | return queue->lag; | 116 | return queue->lag; |
@@ -134,11 +120,9 @@ static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue) | |||
134 | static bool vmw_lag_lt(struct vmw_marker_queue *queue, | 120 | static bool vmw_lag_lt(struct vmw_marker_queue *queue, |
135 | uint32_t us) | 121 | uint32_t us) |
136 | { | 122 | { |
137 | struct timespec lag, cond; | 123 | u64 cond = (u64) us * NSEC_PER_USEC; |
138 | 124 | ||
139 | cond = ns_to_timespec((s64) us * 1000); | 125 | return vmw_fifo_lag(queue) <= cond; |
140 | lag = vmw_fifo_lag(queue); | ||
141 | return (timespec_compare(&lag, &cond) < 1); | ||
142 | } | 126 | } |
143 | 127 | ||
144 | int vmw_wait_lag(struct vmw_private *dev_priv, | 128 | int vmw_wait_lag(struct vmw_private *dev_priv, |