diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-07-16 17:05:06 -0400 |
---|---|---|
committer | John Stultz <john.stultz@linaro.org> | 2014-07-23 18:01:50 -0400 |
commit | 5ed0bdf21a85d78e04f89f15ccf227562177cbd9 (patch) | |
tree | 44c8a001f5b0efc7bc86cb53b1dd3fe5684c8207 | |
parent | f519b1a2e08c913375324a927992bb328387f169 (diff) |
drm: i915: Use nsec based interfaces
Use ktime_get_raw_ns() and get rid of the back and forth timespec
conversions.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: John Stultz <john.stultz@linaro.org>
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 12 |
3 files changed, 18 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 374f964323ad..1f7700897dfc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -931,7 +931,7 @@ struct intel_ilk_power_mgmt { | |||
931 | unsigned long last_time1; | 931 | unsigned long last_time1; |
932 | unsigned long chipset_power; | 932 | unsigned long chipset_power; |
933 | u64 last_count2; | 933 | u64 last_count2; |
934 | struct timespec last_time2; | 934 | u64 last_time2; |
935 | unsigned long gfx_power; | 935 | unsigned long gfx_power; |
936 | u8 corr; | 936 | u8 corr; |
937 | 937 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f36126383d26..74531caa5191 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) | |||
1149 | static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | 1149 | static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
1150 | unsigned reset_counter, | 1150 | unsigned reset_counter, |
1151 | bool interruptible, | 1151 | bool interruptible, |
1152 | struct timespec *timeout, | 1152 | s64 *timeout, |
1153 | struct drm_i915_file_private *file_priv) | 1153 | struct drm_i915_file_private *file_priv) |
1154 | { | 1154 | { |
1155 | struct drm_device *dev = ring->dev; | 1155 | struct drm_device *dev = ring->dev; |
1156 | struct drm_i915_private *dev_priv = dev->dev_private; | 1156 | struct drm_i915_private *dev_priv = dev->dev_private; |
1157 | const bool irq_test_in_progress = | 1157 | const bool irq_test_in_progress = |
1158 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); | 1158 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1159 | struct timespec before, now; | ||
1160 | DEFINE_WAIT(wait); | 1159 | DEFINE_WAIT(wait); |
1161 | unsigned long timeout_expire; | 1160 | unsigned long timeout_expire; |
1161 | s64 before, now; | ||
1162 | int ret; | 1162 | int ret; |
1163 | 1163 | ||
1164 | WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); | 1164 | WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); |
@@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1166 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | 1166 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1167 | return 0; | 1167 | return 0; |
1168 | 1168 | ||
1169 | timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; | 1169 | timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; |
1170 | 1170 | ||
1171 | if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { | 1171 | if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { |
1172 | gen6_rps_boost(dev_priv); | 1172 | gen6_rps_boost(dev_priv); |
@@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1181 | 1181 | ||
1182 | /* Record current time in case interrupted by signal, or wedged */ | 1182 | /* Record current time in case interrupted by signal, or wedged */ |
1183 | trace_i915_gem_request_wait_begin(ring, seqno); | 1183 | trace_i915_gem_request_wait_begin(ring, seqno); |
1184 | getrawmonotonic(&before); | 1184 | before = ktime_get_raw_ns(); |
1185 | for (;;) { | 1185 | for (;;) { |
1186 | struct timer_list timer; | 1186 | struct timer_list timer; |
1187 | 1187 | ||
@@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1230 | destroy_timer_on_stack(&timer); | 1230 | destroy_timer_on_stack(&timer); |
1231 | } | 1231 | } |
1232 | } | 1232 | } |
1233 | getrawmonotonic(&now); | 1233 | now = ktime_get_raw_ns(); |
1234 | trace_i915_gem_request_wait_end(ring, seqno); | 1234 | trace_i915_gem_request_wait_end(ring, seqno); |
1235 | 1235 | ||
1236 | if (!irq_test_in_progress) | 1236 | if (!irq_test_in_progress) |
@@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1239 | finish_wait(&ring->irq_queue, &wait); | 1239 | finish_wait(&ring->irq_queue, &wait); |
1240 | 1240 | ||
1241 | if (timeout) { | 1241 | if (timeout) { |
1242 | struct timespec sleep_time = timespec_sub(now, before); | 1242 | s64 tres = *timeout - (now - before); |
1243 | *timeout = timespec_sub(*timeout, sleep_time); | 1243 | |
1244 | if (!timespec_valid(timeout)) /* i.e. negative time remains */ | 1244 | *timeout = tres < 0 ? 0 : tres; |
1245 | set_normalized_timespec(timeout, 0, 0); | ||
1246 | } | 1245 | } |
1247 | 1246 | ||
1248 | return ret; | 1247 | return ret; |
@@ -2753,16 +2752,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2753 | struct drm_i915_gem_wait *args = data; | 2752 | struct drm_i915_gem_wait *args = data; |
2754 | struct drm_i915_gem_object *obj; | 2753 | struct drm_i915_gem_object *obj; |
2755 | struct intel_engine_cs *ring = NULL; | 2754 | struct intel_engine_cs *ring = NULL; |
2756 | struct timespec timeout_stack, *timeout = NULL; | ||
2757 | unsigned reset_counter; | 2755 | unsigned reset_counter; |
2758 | u32 seqno = 0; | 2756 | u32 seqno = 0; |
2759 | int ret = 0; | 2757 | int ret = 0; |
2760 | 2758 | ||
2761 | if (args->timeout_ns >= 0) { | ||
2762 | timeout_stack = ns_to_timespec(args->timeout_ns); | ||
2763 | timeout = &timeout_stack; | ||
2764 | } | ||
2765 | |||
2766 | ret = i915_mutex_lock_interruptible(dev); | 2759 | ret = i915_mutex_lock_interruptible(dev); |
2767 | if (ret) | 2760 | if (ret) |
2768 | return ret; | 2761 | return ret; |
@@ -2787,9 +2780,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2787 | goto out; | 2780 | goto out; |
2788 | 2781 | ||
2789 | /* Do this after OLR check to make sure we make forward progress polling | 2782 | /* Do this after OLR check to make sure we make forward progress polling |
2790 | * on this IOCTL with a 0 timeout (like busy ioctl) | 2783 | * on this IOCTL with a timeout <=0 (like busy ioctl) |
2791 | */ | 2784 | */ |
2792 | if (!args->timeout_ns) { | 2785 | if (args->timeout_ns <= 0) { |
2793 | ret = -ETIME; | 2786 | ret = -ETIME; |
2794 | goto out; | 2787 | goto out; |
2795 | } | 2788 | } |
@@ -2798,10 +2791,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2798 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 2791 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
2799 | mutex_unlock(&dev->struct_mutex); | 2792 | mutex_unlock(&dev->struct_mutex); |
2800 | 2793 | ||
2801 | ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); | 2794 | return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, |
2802 | if (timeout) | 2795 | file->driver_priv); |
2803 | args->timeout_ns = timespec_to_ns(timeout); | ||
2804 | return ret; | ||
2805 | 2796 | ||
2806 | out: | 2797 | out: |
2807 | drm_gem_object_unreference(&obj->base); | 2798 | drm_gem_object_unreference(&obj->base); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ee72807069e4..f1233f544f3e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2993,7 +2993,7 @@ static void ironlake_enable_drps(struct drm_device *dev) | |||
2993 | I915_READ(0x112e0); | 2993 | I915_READ(0x112e0); |
2994 | dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); | 2994 | dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); |
2995 | dev_priv->ips.last_count2 = I915_READ(0x112f4); | 2995 | dev_priv->ips.last_count2 = I915_READ(0x112f4); |
2996 | getrawmonotonic(&dev_priv->ips.last_time2); | 2996 | dev_priv->ips.last_time2 = ktime_get_raw_ns(); |
2997 | 2997 | ||
2998 | spin_unlock_irq(&mchdev_lock); | 2998 | spin_unlock_irq(&mchdev_lock); |
2999 | } | 2999 | } |
@@ -4314,18 +4314,16 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | |||
4314 | 4314 | ||
4315 | static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) | 4315 | static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) |
4316 | { | 4316 | { |
4317 | struct timespec now, diff1; | 4317 | u64 now, diff, diffms; |
4318 | u64 diff; | ||
4319 | unsigned long diffms; | ||
4320 | u32 count; | 4318 | u32 count; |
4321 | 4319 | ||
4322 | assert_spin_locked(&mchdev_lock); | 4320 | assert_spin_locked(&mchdev_lock); |
4323 | 4321 | ||
4324 | getrawmonotonic(&now); | 4322 | now = ktime_get_raw_ns(); |
4325 | diff1 = timespec_sub(now, dev_priv->ips.last_time2); | 4323 | diffms = now - dev_priv->ips.last_time2; |
4324 | do_div(diffms, NSEC_PER_MSEC); | ||
4326 | 4325 | ||
4327 | /* Don't divide by 0 */ | 4326 | /* Don't divide by 0 */ |
4328 | diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; | ||
4329 | if (!diffms) | 4327 | if (!diffms) |
4330 | return; | 4328 | return; |
4331 | 4329 | ||