diff options
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 191 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 78 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 6 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 5 |
14 files changed, 346 insertions, 74 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0a5bea9e3585..987a165ede26 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
1199 | } | 1199 | } |
1200 | EXPORT_SYMBOL(cpufreq_quick_get); | 1200 | EXPORT_SYMBOL(cpufreq_quick_get); |
1201 | 1201 | ||
1202 | /** | ||
1203 | * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU | ||
1204 | * @cpu: CPU number | ||
1205 | * | ||
1206 | * Just return the max possible frequency for a given CPU. | ||
1207 | */ | ||
1208 | unsigned int cpufreq_quick_get_max(unsigned int cpu) | ||
1209 | { | ||
1210 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1211 | unsigned int ret_freq = 0; | ||
1212 | |||
1213 | if (policy) { | ||
1214 | ret_freq = policy->max; | ||
1215 | cpufreq_cpu_put(policy); | ||
1216 | } | ||
1217 | |||
1218 | return ret_freq; | ||
1219 | } | ||
1220 | EXPORT_SYMBOL(cpufreq_quick_get_max); | ||
1221 | |||
1202 | 1222 | ||
1203 | static unsigned int __cpufreq_get(unsigned int cpu) | 1223 | static unsigned int __cpufreq_get(unsigned int cpu) |
1204 | { | 1224 | { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4d46441cbe2d..8a5a032ec696 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused) | |||
1123 | return 0; | 1123 | return 0; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static int i915_ring_freq_table(struct seq_file *m, void *unused) | ||
1127 | { | ||
1128 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1129 | struct drm_device *dev = node->minor->dev; | ||
1130 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1131 | int ret; | ||
1132 | int gpu_freq, ia_freq; | ||
1133 | |||
1134 | if (!IS_GEN6(dev)) { | ||
1135 | seq_printf(m, "unsupported on this chipset\n"); | ||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1140 | if (ret) | ||
1141 | return ret; | ||
1142 | |||
1143 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); | ||
1144 | |||
1145 | for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; | ||
1146 | gpu_freq++) { | ||
1147 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); | ||
1148 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
1149 | GEN6_PCODE_READ_MIN_FREQ_TABLE); | ||
1150 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
1151 | GEN6_PCODE_READY) == 0, 10)) { | ||
1152 | DRM_ERROR("pcode read of freq table timed out\n"); | ||
1153 | continue; | ||
1154 | } | ||
1155 | ia_freq = I915_READ(GEN6_PCODE_DATA); | ||
1156 | seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); | ||
1157 | } | ||
1158 | |||
1159 | mutex_unlock(&dev->struct_mutex); | ||
1160 | |||
1161 | return 0; | ||
1162 | } | ||
1163 | |||
1126 | static int i915_gfxec(struct seq_file *m, void *unused) | 1164 | static int i915_gfxec(struct seq_file *m, void *unused) |
1127 | { | 1165 | { |
1128 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1166 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -1426,6 +1464,7 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1426 | {"i915_inttoext_table", i915_inttoext_table, 0}, | 1464 | {"i915_inttoext_table", i915_inttoext_table, 0}, |
1427 | {"i915_drpc_info", i915_drpc_info, 0}, | 1465 | {"i915_drpc_info", i915_drpc_info, 0}, |
1428 | {"i915_emon_status", i915_emon_status, 0}, | 1466 | {"i915_emon_status", i915_emon_status, 0}, |
1467 | {"i915_ring_freq_table", i915_ring_freq_table, 0}, | ||
1429 | {"i915_gfxec", i915_gfxec, 0}, | 1468 | {"i915_gfxec", i915_gfxec, 0}, |
1430 | {"i915_fbc_status", i915_fbc_status, 0}, | 1469 | {"i915_fbc_status", i915_fbc_status, 0}, |
1431 | {"i915_sr_status", i915_sr_status, 0}, | 1470 | {"i915_sr_status", i915_sr_status, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 609358faaa90..b54f7d9b173a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -70,6 +70,9 @@ module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | |||
70 | static bool i915_try_reset = true; | 70 | static bool i915_try_reset = true; |
71 | module_param_named(reset, i915_try_reset, bool, 0600); | 71 | module_param_named(reset, i915_try_reset, bool, 0600); |
72 | 72 | ||
73 | bool i915_enable_hangcheck = true; | ||
74 | module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); | ||
75 | |||
73 | static struct drm_driver driver; | 76 | static struct drm_driver driver; |
74 | extern int intel_agp_enabled; | 77 | extern int intel_agp_enabled; |
75 | 78 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index eddabf68e97a..e0f9ca3e5ff8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -994,6 +994,7 @@ extern unsigned int i915_panel_use_ssc; | |||
994 | extern int i915_vbt_sdvo_panel_type; | 994 | extern int i915_vbt_sdvo_panel_type; |
995 | extern unsigned int i915_enable_rc6; | 995 | extern unsigned int i915_enable_rc6; |
996 | extern unsigned int i915_enable_fbc; | 996 | extern unsigned int i915_enable_fbc; |
997 | extern bool i915_enable_hangcheck; | ||
997 | 998 | ||
998 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 999 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
999 | extern int i915_resume(struct drm_device *dev); | 1000 | extern int i915_resume(struct drm_device *dev); |
@@ -1193,7 +1194,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | |||
1193 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, | 1194 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1194 | uint32_t read_domains, | 1195 | uint32_t read_domains, |
1195 | uint32_t write_domain); | 1196 | uint32_t write_domain); |
1196 | int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); | 1197 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
1197 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); | 1198 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
1198 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1199 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1199 | void i915_gem_do_init(struct drm_device *dev, | 1200 | void i915_gem_do_init(struct drm_device *dev, |
@@ -1212,7 +1213,8 @@ int __must_check | |||
1212 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 1213 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1213 | bool write); | 1214 | bool write); |
1214 | int __must_check | 1215 | int __must_check |
1215 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | 1216 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
1217 | u32 alignment, | ||
1216 | struct intel_ring_buffer *pipelined); | 1218 | struct intel_ring_buffer *pipelined); |
1217 | int i915_gem_attach_phys_object(struct drm_device *dev, | 1219 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1218 | struct drm_i915_gem_object *obj, | 1220 | struct drm_i915_gem_object *obj, |
@@ -1226,9 +1228,14 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file); | |||
1226 | uint32_t | 1228 | uint32_t |
1227 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); | 1229 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); |
1228 | 1230 | ||
1231 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | ||
1232 | enum i915_cache_level cache_level); | ||
1233 | |||
1229 | /* i915_gem_gtt.c */ | 1234 | /* i915_gem_gtt.c */ |
1230 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1235 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1231 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1236 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
1237 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | ||
1238 | enum i915_cache_level cache_level); | ||
1232 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); | 1239 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1233 | 1240 | ||
1234 | /* i915_gem_evict.c */ | 1241 | /* i915_gem_evict.c */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5c0d1247f453..e9d1d5c3a696 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1771,8 +1771,11 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1771 | ring->outstanding_lazy_request = false; | 1771 | ring->outstanding_lazy_request = false; |
1772 | 1772 | ||
1773 | if (!dev_priv->mm.suspended) { | 1773 | if (!dev_priv->mm.suspended) { |
1774 | mod_timer(&dev_priv->hangcheck_timer, | 1774 | if (i915_enable_hangcheck) { |
1775 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1775 | mod_timer(&dev_priv->hangcheck_timer, |
1776 | jiffies + | ||
1777 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1778 | } | ||
1776 | if (was_empty) | 1779 | if (was_empty) |
1777 | queue_delayed_work(dev_priv->wq, | 1780 | queue_delayed_work(dev_priv->wq, |
1778 | &dev_priv->mm.retire_work, HZ); | 1781 | &dev_priv->mm.retire_work, HZ); |
@@ -2143,6 +2146,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | |||
2143 | return 0; | 2146 | return 0; |
2144 | } | 2147 | } |
2145 | 2148 | ||
2149 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) | ||
2150 | { | ||
2151 | u32 old_write_domain, old_read_domains; | ||
2152 | |||
2153 | /* Act a barrier for all accesses through the GTT */ | ||
2154 | mb(); | ||
2155 | |||
2156 | /* Force a pagefault for domain tracking on next user access */ | ||
2157 | i915_gem_release_mmap(obj); | ||
2158 | |||
2159 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) | ||
2160 | return; | ||
2161 | |||
2162 | old_read_domains = obj->base.read_domains; | ||
2163 | old_write_domain = obj->base.write_domain; | ||
2164 | |||
2165 | obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; | ||
2166 | obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; | ||
2167 | |||
2168 | trace_i915_gem_object_change_domain(obj, | ||
2169 | old_read_domains, | ||
2170 | old_write_domain); | ||
2171 | } | ||
2172 | |||
2146 | /** | 2173 | /** |
2147 | * Unbinds an object from the GTT aperture. | 2174 | * Unbinds an object from the GTT aperture. |
2148 | */ | 2175 | */ |
@@ -2159,23 +2186,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2159 | return -EINVAL; | 2186 | return -EINVAL; |
2160 | } | 2187 | } |
2161 | 2188 | ||
2162 | /* blow away mappings if mapped through GTT */ | 2189 | ret = i915_gem_object_finish_gpu(obj); |
2163 | i915_gem_release_mmap(obj); | ||
2164 | |||
2165 | /* Move the object to the CPU domain to ensure that | ||
2166 | * any possible CPU writes while it's not in the GTT | ||
2167 | * are flushed when we go to remap it. This will | ||
2168 | * also ensure that all pending GPU writes are finished | ||
2169 | * before we unbind. | ||
2170 | */ | ||
2171 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
2172 | if (ret == -ERESTARTSYS) | 2190 | if (ret == -ERESTARTSYS) |
2173 | return ret; | 2191 | return ret; |
2174 | /* Continue on if we fail due to EIO, the GPU is hung so we | 2192 | /* Continue on if we fail due to EIO, the GPU is hung so we |
2175 | * should be safe and we need to cleanup or else we might | 2193 | * should be safe and we need to cleanup or else we might |
2176 | * cause memory corruption through use-after-free. | 2194 | * cause memory corruption through use-after-free. |
2177 | */ | 2195 | */ |
2196 | |||
2197 | i915_gem_object_finish_gtt(obj); | ||
2198 | |||
2199 | /* Move the object to the CPU domain to ensure that | ||
2200 | * any possible CPU writes while it's not in the GTT | ||
2201 | * are flushed when we go to remap it. | ||
2202 | */ | ||
2203 | if (ret == 0) | ||
2204 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
2205 | if (ret == -ERESTARTSYS) | ||
2206 | return ret; | ||
2178 | if (ret) { | 2207 | if (ret) { |
2208 | /* In the event of a disaster, abandon all caches and | ||
2209 | * hope for the best. | ||
2210 | */ | ||
2179 | i915_gem_clflush_object(obj); | 2211 | i915_gem_clflush_object(obj); |
2180 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 2212 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2181 | } | 2213 | } |
@@ -2997,51 +3029,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2997 | return 0; | 3029 | return 0; |
2998 | } | 3030 | } |
2999 | 3031 | ||
3032 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | ||
3033 | enum i915_cache_level cache_level) | ||
3034 | { | ||
3035 | int ret; | ||
3036 | |||
3037 | if (obj->cache_level == cache_level) | ||
3038 | return 0; | ||
3039 | |||
3040 | if (obj->pin_count) { | ||
3041 | DRM_DEBUG("can not change the cache level of pinned objects\n"); | ||
3042 | return -EBUSY; | ||
3043 | } | ||
3044 | |||
3045 | if (obj->gtt_space) { | ||
3046 | ret = i915_gem_object_finish_gpu(obj); | ||
3047 | if (ret) | ||
3048 | return ret; | ||
3049 | |||
3050 | i915_gem_object_finish_gtt(obj); | ||
3051 | |||
3052 | /* Before SandyBridge, you could not use tiling or fence | ||
3053 | * registers with snooped memory, so relinquish any fences | ||
3054 | * currently pointing to our region in the aperture. | ||
3055 | */ | ||
3056 | if (INTEL_INFO(obj->base.dev)->gen < 6) { | ||
3057 | ret = i915_gem_object_put_fence(obj); | ||
3058 | if (ret) | ||
3059 | return ret; | ||
3060 | } | ||
3061 | |||
3062 | i915_gem_gtt_rebind_object(obj, cache_level); | ||
3063 | } | ||
3064 | |||
3065 | if (cache_level == I915_CACHE_NONE) { | ||
3066 | u32 old_read_domains, old_write_domain; | ||
3067 | |||
3068 | /* If we're coming from LLC cached, then we haven't | ||
3069 | * actually been tracking whether the data is in the | ||
3070 | * CPU cache or not, since we only allow one bit set | ||
3071 | * in obj->write_domain and have been skipping the clflushes. | ||
3072 | * Just set it to the CPU cache for now. | ||
3073 | */ | ||
3074 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); | ||
3075 | WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); | ||
3076 | |||
3077 | old_read_domains = obj->base.read_domains; | ||
3078 | old_write_domain = obj->base.write_domain; | ||
3079 | |||
3080 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | ||
3081 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | ||
3082 | |||
3083 | trace_i915_gem_object_change_domain(obj, | ||
3084 | old_read_domains, | ||
3085 | old_write_domain); | ||
3086 | } | ||
3087 | |||
3088 | obj->cache_level = cache_level; | ||
3089 | return 0; | ||
3090 | } | ||
3091 | |||
3000 | /* | 3092 | /* |
3001 | * Prepare buffer for display plane. Use uninterruptible for possible flush | 3093 | * Prepare buffer for display plane (scanout, cursors, etc). |
3002 | * wait, as in modesetting process we're not supposed to be interrupted. | 3094 | * Can be called from an uninterruptible phase (modesetting) and allows |
3095 | * any flushes to be pipelined (for pageflips). | ||
3096 | * | ||
3097 | * For the display plane, we want to be in the GTT but out of any write | ||
3098 | * domains. So in many ways this looks like set_to_gtt_domain() apart from the | ||
3099 | * ability to pipeline the waits, pinning and any additional subtleties | ||
3100 | * that may differentiate the display plane from ordinary buffers. | ||
3003 | */ | 3101 | */ |
3004 | int | 3102 | int |
3005 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | 3103 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3104 | u32 alignment, | ||
3006 | struct intel_ring_buffer *pipelined) | 3105 | struct intel_ring_buffer *pipelined) |
3007 | { | 3106 | { |
3008 | uint32_t old_read_domains; | 3107 | u32 old_read_domains, old_write_domain; |
3009 | int ret; | 3108 | int ret; |
3010 | 3109 | ||
3011 | /* Not valid to be called on unbound objects. */ | ||
3012 | if (obj->gtt_space == NULL) | ||
3013 | return -EINVAL; | ||
3014 | |||
3015 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3110 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3016 | if (ret) | 3111 | if (ret) |
3017 | return ret; | 3112 | return ret; |
3018 | 3113 | ||
3019 | |||
3020 | /* Currently, we are always called from an non-interruptible context. */ | ||
3021 | if (pipelined != obj->ring) { | 3114 | if (pipelined != obj->ring) { |
3022 | ret = i915_gem_object_wait_rendering(obj); | 3115 | ret = i915_gem_object_wait_rendering(obj); |
3023 | if (ret) | 3116 | if (ret) |
3024 | return ret; | 3117 | return ret; |
3025 | } | 3118 | } |
3026 | 3119 | ||
3120 | /* The display engine is not coherent with the LLC cache on gen6. As | ||
3121 | * a result, we make sure that the pinning that is about to occur is | ||
3122 | * done with uncached PTEs. This is lowest common denominator for all | ||
3123 | * chipsets. | ||
3124 | * | ||
3125 | * However for gen6+, we could do better by using the GFDT bit instead | ||
3126 | * of uncaching, which would allow us to flush all the LLC-cached data | ||
3127 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. | ||
3128 | */ | ||
3129 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); | ||
3130 | if (ret) | ||
3131 | return ret; | ||
3132 | |||
3133 | /* As the user may map the buffer once pinned in the display plane | ||
3134 | * (e.g. libkms for the bootup splash), we have to ensure that we | ||
3135 | * always use map_and_fenceable for all scanout buffers. | ||
3136 | */ | ||
3137 | ret = i915_gem_object_pin(obj, alignment, true); | ||
3138 | if (ret) | ||
3139 | return ret; | ||
3140 | |||
3027 | i915_gem_object_flush_cpu_write_domain(obj); | 3141 | i915_gem_object_flush_cpu_write_domain(obj); |
3028 | 3142 | ||
3143 | old_write_domain = obj->base.write_domain; | ||
3029 | old_read_domains = obj->base.read_domains; | 3144 | old_read_domains = obj->base.read_domains; |
3145 | |||
3146 | /* It should now be out of any other write domains, and we can update | ||
3147 | * the domain values for our changes. | ||
3148 | */ | ||
3149 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
3030 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; | 3150 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
3031 | 3151 | ||
3032 | trace_i915_gem_object_change_domain(obj, | 3152 | trace_i915_gem_object_change_domain(obj, |
3033 | old_read_domains, | 3153 | old_read_domains, |
3034 | obj->base.write_domain); | 3154 | old_write_domain); |
3035 | 3155 | ||
3036 | return 0; | 3156 | return 0; |
3037 | } | 3157 | } |
3038 | 3158 | ||
3039 | int | 3159 | int |
3040 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) | 3160 | i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
3041 | { | 3161 | { |
3042 | int ret; | 3162 | int ret; |
3043 | 3163 | ||
3044 | if (!obj->active) | 3164 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) |
3045 | return 0; | 3165 | return 0; |
3046 | 3166 | ||
3047 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3167 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
@@ -3050,6 +3170,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) | |||
3050 | return ret; | 3170 | return ret; |
3051 | } | 3171 | } |
3052 | 3172 | ||
3173 | /* Ensure that we invalidate the GPU's caches and TLBs. */ | ||
3174 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | ||
3175 | |||
3053 | return i915_gem_object_wait_rendering(obj); | 3176 | return i915_gem_object_wait_rendering(obj); |
3054 | } | 3177 | } |
3055 | 3178 | ||
@@ -3576,7 +3699,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3576 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3699 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3577 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 3700 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3578 | 3701 | ||
3579 | obj->cache_level = I915_CACHE_NONE; | 3702 | if (IS_GEN6(dev)) { |
3703 | /* On Gen6, we can have the GPU use the LLC (the CPU | ||
3704 | * cache) for about a 10% performance improvement | ||
3705 | * compared to uncached. Graphics requests other than | ||
3706 | * display scanout are coherent with the CPU in | ||
3707 | * accessing this cache. This means in this mode we | ||
3708 | * don't need to clflush on the CPU side, and on the | ||
3709 | * GPU side we only need to flush internal caches to | ||
3710 | * get data visible to the CPU. | ||
3711 | * | ||
3712 | * However, we maintain the display planes as UC, and so | ||
3713 | * need to rebind when first used as such. | ||
3714 | */ | ||
3715 | obj->cache_level = I915_CACHE_LLC; | ||
3716 | } else | ||
3717 | obj->cache_level = I915_CACHE_NONE; | ||
3718 | |||
3580 | obj->base.driver_private = NULL; | 3719 | obj->base.driver_private = NULL; |
3581 | obj->fence_reg = I915_FENCE_REG_NONE; | 3720 | obj->fence_reg = I915_FENCE_REG_NONE; |
3582 | INIT_LIST_HEAD(&obj->mm_list); | 3721 | INIT_LIST_HEAD(&obj->mm_list); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e46b645773cf..7a709cd8d543 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
59 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | 59 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
60 | 60 | ||
61 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 61 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
62 | unsigned int agp_type = | ||
63 | cache_level_to_agp_type(dev, obj->cache_level); | ||
64 | |||
65 | i915_gem_clflush_object(obj); | 62 | i915_gem_clflush_object(obj); |
66 | 63 | i915_gem_gtt_rebind_object(obj, obj->cache_level); | |
67 | if (dev_priv->mm.gtt->needs_dmar) { | ||
68 | BUG_ON(!obj->sg_list); | ||
69 | |||
70 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
71 | obj->num_sg, | ||
72 | obj->gtt_space->start >> PAGE_SHIFT, | ||
73 | agp_type); | ||
74 | } else | ||
75 | intel_gtt_insert_pages(obj->gtt_space->start | ||
76 | >> PAGE_SHIFT, | ||
77 | obj->base.size >> PAGE_SHIFT, | ||
78 | obj->pages, | ||
79 | agp_type); | ||
80 | } | 64 | } |
81 | 65 | ||
82 | intel_gtt_chipset_flush(); | 66 | intel_gtt_chipset_flush(); |
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) | |||
110 | return 0; | 94 | return 0; |
111 | } | 95 | } |
112 | 96 | ||
97 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | ||
98 | enum i915_cache_level cache_level) | ||
99 | { | ||
100 | struct drm_device *dev = obj->base.dev; | ||
101 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
102 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); | ||
103 | |||
104 | if (dev_priv->mm.gtt->needs_dmar) { | ||
105 | BUG_ON(!obj->sg_list); | ||
106 | |||
107 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
108 | obj->num_sg, | ||
109 | obj->gtt_space->start >> PAGE_SHIFT, | ||
110 | agp_type); | ||
111 | } else | ||
112 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, | ||
113 | obj->base.size >> PAGE_SHIFT, | ||
114 | obj->pages, | ||
115 | agp_type); | ||
116 | } | ||
117 | |||
113 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 118 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
114 | { | 119 | { |
115 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | 120 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ae2b49969b99..0b0de5239ad5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -361,10 +361,12 @@ static void notify_ring(struct drm_device *dev, | |||
361 | 361 | ||
362 | ring->irq_seqno = seqno; | 362 | ring->irq_seqno = seqno; |
363 | wake_up_all(&ring->irq_queue); | 363 | wake_up_all(&ring->irq_queue); |
364 | 364 | if (i915_enable_hangcheck) { | |
365 | dev_priv->hangcheck_count = 0; | 365 | dev_priv->hangcheck_count = 0; |
366 | mod_timer(&dev_priv->hangcheck_timer, | 366 | mod_timer(&dev_priv->hangcheck_timer, |
367 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 367 | jiffies + |
368 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
369 | } | ||
368 | } | 370 | } |
369 | 371 | ||
370 | static void gen6_pm_rps_work(struct work_struct *work) | 372 | static void gen6_pm_rps_work(struct work_struct *work) |
@@ -1664,6 +1666,9 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1664 | uint32_t acthd, instdone, instdone1; | 1666 | uint32_t acthd, instdone, instdone1; |
1665 | bool err = false; | 1667 | bool err = false; |
1666 | 1668 | ||
1669 | if (!i915_enable_hangcheck) | ||
1670 | return; | ||
1671 | |||
1667 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1672 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1668 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && | 1673 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && |
1669 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && | 1674 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5d5def756c9e..4a446b116e6a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -3434,7 +3434,9 @@ | |||
3434 | #define GEN6_PCODE_MAILBOX 0x138124 | 3434 | #define GEN6_PCODE_MAILBOX 0x138124 |
3435 | #define GEN6_PCODE_READY (1<<31) | 3435 | #define GEN6_PCODE_READY (1<<31) |
3436 | #define GEN6_READ_OC_PARAMS 0xc | 3436 | #define GEN6_READ_OC_PARAMS 0xc |
3437 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 | 3437 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 |
3438 | #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 | ||
3438 | #define GEN6_PCODE_DATA 0x138128 | 3439 | #define GEN6_PCODE_DATA 0x138128 |
3440 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | ||
3439 | 3441 | ||
3440 | #endif /* _I915_REG_H_ */ | 3442 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index e8152d23d5b6..6fbd997f5a6c 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -875,8 +875,10 @@ int i915_restore_state(struct drm_device *dev) | |||
875 | intel_init_emon(dev); | 875 | intel_init_emon(dev); |
876 | } | 876 | } |
877 | 877 | ||
878 | if (IS_GEN6(dev)) | 878 | if (IS_GEN6(dev)) { |
879 | gen6_enable_rps(dev_priv); | 879 | gen6_enable_rps(dev_priv); |
880 | gen6_update_ring_freq(dev_priv); | ||
881 | } | ||
880 | 882 | ||
881 | /* Cache mode state */ | 883 | /* Cache mode state */ |
882 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 884 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 21b6f93fe919..804ac4d6cb48 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/cpufreq.h> | ||
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <linux/input.h> | 29 | #include <linux/input.h> |
29 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
@@ -1812,14 +1813,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1812 | } | 1813 | } |
1813 | 1814 | ||
1814 | dev_priv->mm.interruptible = false; | 1815 | dev_priv->mm.interruptible = false; |
1815 | ret = i915_gem_object_pin(obj, alignment, true); | 1816 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1816 | if (ret) | 1817 | if (ret) |
1817 | goto err_interruptible; | 1818 | goto err_interruptible; |
1818 | 1819 | ||
1819 | ret = i915_gem_object_set_to_display_plane(obj, pipelined); | ||
1820 | if (ret) | ||
1821 | goto err_unpin; | ||
1822 | |||
1823 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | 1820 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1824 | * fence, whereas 965+ only requires a fence if using | 1821 | * fence, whereas 965+ only requires a fence if using |
1825 | * framebuffer compression. For simplicity, we always install | 1822 | * framebuffer compression. For simplicity, we always install |
@@ -1971,7 +1968,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1971 | * This should only fail upon a hung GPU, in which case we | 1968 | * This should only fail upon a hung GPU, in which case we |
1972 | * can safely continue. | 1969 | * can safely continue. |
1973 | */ | 1970 | */ |
1974 | ret = i915_gem_object_flush_gpu(obj); | 1971 | ret = i915_gem_object_finish_gpu(obj); |
1975 | (void) ret; | 1972 | (void) ret; |
1976 | } | 1973 | } |
1977 | 1974 | ||
@@ -5434,21 +5431,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
5434 | goto fail_locked; | 5431 | goto fail_locked; |
5435 | } | 5432 | } |
5436 | 5433 | ||
5437 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | 5434 | ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); |
5438 | if (ret) { | ||
5439 | DRM_ERROR("failed to pin cursor bo\n"); | ||
5440 | goto fail_locked; | ||
5441 | } | ||
5442 | |||
5443 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); | ||
5444 | if (ret) { | 5435 | if (ret) { |
5445 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 5436 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
5446 | goto fail_unpin; | 5437 | goto fail_locked; |
5447 | } | 5438 | } |
5448 | 5439 | ||
5449 | ret = i915_gem_object_put_fence(obj); | 5440 | ret = i915_gem_object_put_fence(obj); |
5450 | if (ret) { | 5441 | if (ret) { |
5451 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 5442 | DRM_ERROR("failed to release fence for cursor"); |
5452 | goto fail_unpin; | 5443 | goto fail_unpin; |
5453 | } | 5444 | } |
5454 | 5445 | ||
@@ -7283,6 +7274,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
7283 | mutex_unlock(&dev_priv->dev->struct_mutex); | 7274 | mutex_unlock(&dev_priv->dev->struct_mutex); |
7284 | } | 7275 | } |
7285 | 7276 | ||
7277 | void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | ||
7278 | { | ||
7279 | int min_freq = 15; | ||
7280 | int gpu_freq, ia_freq, max_ia_freq; | ||
7281 | int scaling_factor = 180; | ||
7282 | |||
7283 | max_ia_freq = cpufreq_quick_get_max(0); | ||
7284 | /* | ||
7285 | * Default to measured freq if none found, PCU will ensure we don't go | ||
7286 | * over | ||
7287 | */ | ||
7288 | if (!max_ia_freq) | ||
7289 | max_ia_freq = tsc_khz; | ||
7290 | |||
7291 | /* Convert from kHz to MHz */ | ||
7292 | max_ia_freq /= 1000; | ||
7293 | |||
7294 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
7295 | |||
7296 | /* | ||
7297 | * For each potential GPU frequency, load a ring frequency we'd like | ||
7298 | * to use for memory access. We do this by specifying the IA frequency | ||
7299 | * the PCU should use as a reference to determine the ring frequency. | ||
7300 | */ | ||
7301 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; | ||
7302 | gpu_freq--) { | ||
7303 | int diff = dev_priv->max_delay - gpu_freq; | ||
7304 | |||
7305 | /* | ||
7306 | * For GPU frequencies less than 750MHz, just use the lowest | ||
7307 | * ring freq. | ||
7308 | */ | ||
7309 | if (gpu_freq < min_freq) | ||
7310 | ia_freq = 800; | ||
7311 | else | ||
7312 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | ||
7313 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | ||
7314 | |||
7315 | I915_WRITE(GEN6_PCODE_DATA, | ||
7316 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | | ||
7317 | gpu_freq); | ||
7318 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
7319 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
7320 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
7321 | GEN6_PCODE_READY) == 0, 10)) { | ||
7322 | DRM_ERROR("pcode write of freq table timed out\n"); | ||
7323 | continue; | ||
7324 | } | ||
7325 | } | ||
7326 | |||
7327 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
7328 | } | ||
7329 | |||
7286 | static void ironlake_init_clock_gating(struct drm_device *dev) | 7330 | static void ironlake_init_clock_gating(struct drm_device *dev) |
7287 | { | 7331 | { |
7288 | struct drm_i915_private *dev_priv = dev->dev_private; | 7332 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -7926,8 +7970,10 @@ void intel_modeset_init(struct drm_device *dev) | |||
7926 | intel_init_emon(dev); | 7970 | intel_init_emon(dev); |
7927 | } | 7971 | } |
7928 | 7972 | ||
7929 | if (IS_GEN6(dev)) | 7973 | if (IS_GEN6(dev)) { |
7930 | gen6_enable_rps(dev_priv); | 7974 | gen6_enable_rps(dev_priv); |
7975 | gen6_update_ring_freq(dev_priv); | ||
7976 | } | ||
7931 | 7977 | ||
7932 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 7978 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
7933 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 7979 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9ffa61eb4d7e..8ac3bd8b6faa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -317,6 +317,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev); | |||
317 | extern void ironlake_enable_drps(struct drm_device *dev); | 317 | extern void ironlake_enable_drps(struct drm_device *dev); |
318 | extern void ironlake_disable_drps(struct drm_device *dev); | 318 | extern void ironlake_disable_drps(struct drm_device *dev); |
319 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | 319 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); |
320 | extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); | ||
320 | extern void gen6_disable_rps(struct drm_device *dev); | 321 | extern void gen6_disable_rps(struct drm_device *dev); |
321 | extern void intel_init_emon(struct drm_device *dev); | 322 | extern void intel_init_emon(struct drm_device *dev); |
322 | 323 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 56a8e2aea19c..cffd3edd9bb4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
773 | if (ret != 0) | 773 | if (ret != 0) |
774 | return ret; | 774 | return ret; |
775 | 775 | ||
776 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); | 776 | ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); |
777 | if (ret != 0) | 777 | if (ret != 0) |
778 | return ret; | 778 | return ret; |
779 | 779 | ||
780 | ret = i915_gem_object_set_to_gtt_domain(new_bo, 0); | ||
781 | if (ret != 0) | ||
782 | goto out_unpin; | ||
783 | |||
784 | ret = i915_gem_object_put_fence(new_bo); | 780 | ret = i915_gem_object_put_fence(new_bo); |
785 | if (ret) | 781 | if (ret) |
786 | goto out_unpin; | 782 | goto out_unpin; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 95c4b1429935..e9615685a39c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring) | |||
236 | ret = -ENOMEM; | 236 | ret = -ENOMEM; |
237 | goto err; | 237 | goto err; |
238 | } | 238 | } |
239 | obj->cache_level = I915_CACHE_LLC; | 239 | |
240 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | ||
240 | 241 | ||
241 | ret = i915_gem_object_pin(obj, 4096, true); | 242 | ret = i915_gem_object_pin(obj, 4096, true); |
242 | if (ret) | 243 | if (ret) |
@@ -776,7 +777,8 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
776 | ret = -ENOMEM; | 777 | ret = -ENOMEM; |
777 | goto err; | 778 | goto err; |
778 | } | 779 | } |
779 | obj->cache_level = I915_CACHE_LLC; | 780 | |
781 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | ||
780 | 782 | ||
781 | ret = i915_gem_object_pin(obj, 4096, true); | 783 | ret = i915_gem_object_pin(obj, 4096, true); |
782 | if (ret != 0) { | 784 | if (ret != 0) { |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 11be48e0d168..6216115c7789 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -324,11 +324,16 @@ static inline unsigned int cpufreq_get(unsigned int cpu) | |||
324 | /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ | 324 | /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ |
325 | #ifdef CONFIG_CPU_FREQ | 325 | #ifdef CONFIG_CPU_FREQ |
326 | unsigned int cpufreq_quick_get(unsigned int cpu); | 326 | unsigned int cpufreq_quick_get(unsigned int cpu); |
327 | unsigned int cpufreq_quick_get_max(unsigned int cpu); | ||
327 | #else | 328 | #else |
328 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) | 329 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) |
329 | { | 330 | { |
330 | return 0; | 331 | return 0; |
331 | } | 332 | } |
333 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) | ||
334 | { | ||
335 | return 0; | ||
336 | } | ||
332 | #endif | 337 | #endif |
333 | 338 | ||
334 | 339 | ||