diff options
author | Eugeni Dodonov <eugeni.dodonov@intel.com> | 2012-04-18 14:29:23 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-04-18 15:56:13 -0400 |
commit | 2b4e57bd7a6a855dd1229f8cfbbdebfbc3f933be (patch) | |
tree | dbd7dc8849c99ac4af7a8a7123fdcc60aee5d6ad /drivers/gpu/drm/i915/intel_pm.c | |
parent | f6750b3cc6e9284f373a2fd155ec0bba38d02ad0 (diff) |
drm/i915: move drps, rps and rc6-related functions to intel_pm
This moves DRPS, RPS and RC6-related functionality into intel_pm module.
It also removes the linux/cpufreq.h include from intel_display, as its
only user was the GPU turbo-related functionality in Gen6+ code path.
v2: rebase on top of latest drm-intel-next-queued adding the bits that
shifted around since the last patch.
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Acked-by: Ben Widawsky <benjamin.widawsky@intel.com>
Signed-off-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 513 |
1 files changed, 513 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c5bc4c456baa..2f45de3339bf 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -25,6 +25,7 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/cpufreq.h> | ||
28 | #include "i915_drv.h" | 29 | #include "i915_drv.h" |
29 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
30 | 31 | ||
@@ -1979,3 +1980,515 @@ void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, | |||
1979 | pixel_size); | 1980 | pixel_size); |
1980 | } | 1981 | } |
1981 | 1982 | ||
1983 | static struct drm_i915_gem_object * | ||
1984 | intel_alloc_context_page(struct drm_device *dev) | ||
1985 | { | ||
1986 | struct drm_i915_gem_object *ctx; | ||
1987 | int ret; | ||
1988 | |||
1989 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1990 | |||
1991 | ctx = i915_gem_alloc_object(dev, 4096); | ||
1992 | if (!ctx) { | ||
1993 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
1994 | return NULL; | ||
1995 | } | ||
1996 | |||
1997 | ret = i915_gem_object_pin(ctx, 4096, true); | ||
1998 | if (ret) { | ||
1999 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
2000 | goto err_unref; | ||
2001 | } | ||
2002 | |||
2003 | ret = i915_gem_object_set_to_gtt_domain(ctx, 1); | ||
2004 | if (ret) { | ||
2005 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | ||
2006 | goto err_unpin; | ||
2007 | } | ||
2008 | |||
2009 | return ctx; | ||
2010 | |||
2011 | err_unpin: | ||
2012 | i915_gem_object_unpin(ctx); | ||
2013 | err_unref: | ||
2014 | drm_gem_object_unreference(&ctx->base); | ||
2015 | mutex_unlock(&dev->struct_mutex); | ||
2016 | return NULL; | ||
2017 | } | ||
2018 | |||
2019 | bool ironlake_set_drps(struct drm_device *dev, u8 val) | ||
2020 | { | ||
2021 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2022 | u16 rgvswctl; | ||
2023 | |||
2024 | rgvswctl = I915_READ16(MEMSWCTL); | ||
2025 | if (rgvswctl & MEMCTL_CMD_STS) { | ||
2026 | DRM_DEBUG("gpu busy, RCS change rejected\n"); | ||
2027 | return false; /* still busy with another command */ | ||
2028 | } | ||
2029 | |||
2030 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
2031 | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
2032 | I915_WRITE16(MEMSWCTL, rgvswctl); | ||
2033 | POSTING_READ16(MEMSWCTL); | ||
2034 | |||
2035 | rgvswctl |= MEMCTL_CMD_STS; | ||
2036 | I915_WRITE16(MEMSWCTL, rgvswctl); | ||
2037 | |||
2038 | return true; | ||
2039 | } | ||
2040 | |||
2041 | void ironlake_enable_drps(struct drm_device *dev) | ||
2042 | { | ||
2043 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2044 | u32 rgvmodectl = I915_READ(MEMMODECTL); | ||
2045 | u8 fmax, fmin, fstart, vstart; | ||
2046 | |||
2047 | /* Enable temp reporting */ | ||
2048 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); | ||
2049 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); | ||
2050 | |||
2051 | /* 100ms RC evaluation intervals */ | ||
2052 | I915_WRITE(RCUPEI, 100000); | ||
2053 | I915_WRITE(RCDNEI, 100000); | ||
2054 | |||
2055 | /* Set max/min thresholds to 90ms and 80ms respectively */ | ||
2056 | I915_WRITE(RCBMAXAVG, 90000); | ||
2057 | I915_WRITE(RCBMINAVG, 80000); | ||
2058 | |||
2059 | I915_WRITE(MEMIHYST, 1); | ||
2060 | |||
2061 | /* Set up min, max, and cur for interrupt handling */ | ||
2062 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; | ||
2063 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | ||
2064 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | ||
2065 | MEMMODE_FSTART_SHIFT; | ||
2066 | |||
2067 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | ||
2068 | PXVFREQ_PX_SHIFT; | ||
2069 | |||
2070 | dev_priv->fmax = fmax; /* IPS callback will increase this */ | ||
2071 | dev_priv->fstart = fstart; | ||
2072 | |||
2073 | dev_priv->max_delay = fstart; | ||
2074 | dev_priv->min_delay = fmin; | ||
2075 | dev_priv->cur_delay = fstart; | ||
2076 | |||
2077 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", | ||
2078 | fmax, fmin, fstart); | ||
2079 | |||
2080 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | ||
2081 | |||
2082 | /* | ||
2083 | * Interrupts will be enabled in ironlake_irq_postinstall | ||
2084 | */ | ||
2085 | |||
2086 | I915_WRITE(VIDSTART, vstart); | ||
2087 | POSTING_READ(VIDSTART); | ||
2088 | |||
2089 | rgvmodectl |= MEMMODE_SWMODE_EN; | ||
2090 | I915_WRITE(MEMMODECTL, rgvmodectl); | ||
2091 | |||
2092 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) | ||
2093 | DRM_ERROR("stuck trying to change perf mode\n"); | ||
2094 | msleep(1); | ||
2095 | |||
2096 | ironlake_set_drps(dev, fstart); | ||
2097 | |||
2098 | dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + | ||
2099 | I915_READ(0x112e0); | ||
2100 | dev_priv->last_time1 = jiffies_to_msecs(jiffies); | ||
2101 | dev_priv->last_count2 = I915_READ(0x112f4); | ||
2102 | getrawmonotonic(&dev_priv->last_time2); | ||
2103 | } | ||
2104 | |||
2105 | void ironlake_disable_drps(struct drm_device *dev) | ||
2106 | { | ||
2107 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2108 | u16 rgvswctl = I915_READ16(MEMSWCTL); | ||
2109 | |||
2110 | /* Ack interrupts, disable EFC interrupt */ | ||
2111 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | ||
2112 | I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); | ||
2113 | I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); | ||
2114 | I915_WRITE(DEIIR, DE_PCU_EVENT); | ||
2115 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); | ||
2116 | |||
2117 | /* Go back to the starting frequency */ | ||
2118 | ironlake_set_drps(dev, dev_priv->fstart); | ||
2119 | msleep(1); | ||
2120 | rgvswctl |= MEMCTL_CMD_STS; | ||
2121 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
2122 | msleep(1); | ||
2123 | |||
2124 | } | ||
2125 | |||
2126 | void gen6_set_rps(struct drm_device *dev, u8 val) | ||
2127 | { | ||
2128 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2129 | u32 swreq; | ||
2130 | |||
2131 | swreq = (val & 0x3ff) << 25; | ||
2132 | I915_WRITE(GEN6_RPNSWREQ, swreq); | ||
2133 | } | ||
2134 | |||
2135 | void gen6_disable_rps(struct drm_device *dev) | ||
2136 | { | ||
2137 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2138 | |||
2139 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||
2140 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | ||
2141 | I915_WRITE(GEN6_PMIER, 0); | ||
2142 | /* Complete PM interrupt masking here doesn't race with the rps work | ||
2143 | * item again unmasking PM interrupts because that is using a different | ||
2144 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | ||
2145 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | ||
2146 | |||
2147 | spin_lock_irq(&dev_priv->rps_lock); | ||
2148 | dev_priv->pm_iir = 0; | ||
2149 | spin_unlock_irq(&dev_priv->rps_lock); | ||
2150 | |||
2151 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | ||
2152 | } | ||
2153 | |||
2154 | int intel_enable_rc6(const struct drm_device *dev) | ||
2155 | { | ||
2156 | /* | ||
2157 | * Respect the kernel parameter if it is set | ||
2158 | */ | ||
2159 | if (i915_enable_rc6 >= 0) | ||
2160 | return i915_enable_rc6; | ||
2161 | |||
2162 | /* | ||
2163 | * Disable RC6 on Ironlake | ||
2164 | */ | ||
2165 | if (INTEL_INFO(dev)->gen == 5) | ||
2166 | return 0; | ||
2167 | |||
2168 | /* Sorry Haswell, no RC6 for you for now. */ | ||
2169 | if (IS_HASWELL(dev)) | ||
2170 | return 0; | ||
2171 | |||
2172 | /* | ||
2173 | * Disable rc6 on Sandybridge | ||
2174 | */ | ||
2175 | if (INTEL_INFO(dev)->gen == 6) { | ||
2176 | DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); | ||
2177 | return INTEL_RC6_ENABLE; | ||
2178 | } | ||
2179 | DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); | ||
2180 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); | ||
2181 | } | ||
2182 | |||
2183 | void gen6_enable_rps(struct drm_i915_private *dev_priv) | ||
2184 | { | ||
2185 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
2186 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
2187 | u32 pcu_mbox, rc6_mask = 0; | ||
2188 | u32 gtfifodbg; | ||
2189 | int cur_freq, min_freq, max_freq; | ||
2190 | int rc6_mode; | ||
2191 | int i; | ||
2192 | |||
2193 | /* Here begins a magic sequence of register writes to enable | ||
2194 | * auto-downclocking. | ||
2195 | * | ||
2196 | * Perhaps there might be some value in exposing these to | ||
2197 | * userspace... | ||
2198 | */ | ||
2199 | I915_WRITE(GEN6_RC_STATE, 0); | ||
2200 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
2201 | |||
2202 | /* Clear the DBG now so we don't confuse earlier errors */ | ||
2203 | if ((gtfifodbg = I915_READ(GTFIFODBG))) { | ||
2204 | DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); | ||
2205 | I915_WRITE(GTFIFODBG, gtfifodbg); | ||
2206 | } | ||
2207 | |||
2208 | gen6_gt_force_wake_get(dev_priv); | ||
2209 | |||
2210 | /* disable the counters and set deterministic thresholds */ | ||
2211 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
2212 | |||
2213 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | ||
2214 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | ||
2215 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | ||
2216 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | ||
2217 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | ||
2218 | |||
2219 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
2220 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); | ||
2221 | |||
2222 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
2223 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | ||
2224 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | ||
2225 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | ||
2226 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | ||
2227 | |||
2228 | rc6_mode = intel_enable_rc6(dev_priv->dev); | ||
2229 | if (rc6_mode & INTEL_RC6_ENABLE) | ||
2230 | rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; | ||
2231 | |||
2232 | if (rc6_mode & INTEL_RC6p_ENABLE) | ||
2233 | rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; | ||
2234 | |||
2235 | if (rc6_mode & INTEL_RC6pp_ENABLE) | ||
2236 | rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; | ||
2237 | |||
2238 | DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", | ||
2239 | (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", | ||
2240 | (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", | ||
2241 | (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); | ||
2242 | |||
2243 | I915_WRITE(GEN6_RC_CONTROL, | ||
2244 | rc6_mask | | ||
2245 | GEN6_RC_CTL_EI_MODE(1) | | ||
2246 | GEN6_RC_CTL_HW_ENABLE); | ||
2247 | |||
2248 | I915_WRITE(GEN6_RPNSWREQ, | ||
2249 | GEN6_FREQUENCY(10) | | ||
2250 | GEN6_OFFSET(0) | | ||
2251 | GEN6_AGGRESSIVE_TURBO); | ||
2252 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | ||
2253 | GEN6_FREQUENCY(12)); | ||
2254 | |||
2255 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | ||
2256 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
2257 | 18 << 24 | | ||
2258 | 6 << 16); | ||
2259 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); | ||
2260 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); | ||
2261 | I915_WRITE(GEN6_RP_UP_EI, 100000); | ||
2262 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); | ||
2263 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
2264 | I915_WRITE(GEN6_RP_CONTROL, | ||
2265 | GEN6_RP_MEDIA_TURBO | | ||
2266 | GEN6_RP_MEDIA_HW_MODE | | ||
2267 | GEN6_RP_MEDIA_IS_GFX | | ||
2268 | GEN6_RP_ENABLE | | ||
2269 | GEN6_RP_UP_BUSY_AVG | | ||
2270 | GEN6_RP_DOWN_IDLE_CONT); | ||
2271 | |||
2272 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2273 | 500)) | ||
2274 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
2275 | |||
2276 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
2277 | I915_WRITE(GEN6_PCODE_MAILBOX, | ||
2278 | GEN6_PCODE_READY | | ||
2279 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
2280 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2281 | 500)) | ||
2282 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
2283 | |||
2284 | min_freq = (rp_state_cap & 0xff0000) >> 16; | ||
2285 | max_freq = rp_state_cap & 0xff; | ||
2286 | cur_freq = (gt_perf_status & 0xff00) >> 8; | ||
2287 | |||
2288 | /* Check for overclock support */ | ||
2289 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2290 | 500)) | ||
2291 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
2292 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
2293 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
2294 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2295 | 500)) | ||
2296 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
2297 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
2298 | max_freq = pcu_mbox & 0xff; | ||
2299 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | ||
2300 | } | ||
2301 | |||
2302 | /* In units of 100MHz */ | ||
2303 | dev_priv->max_delay = max_freq; | ||
2304 | dev_priv->min_delay = min_freq; | ||
2305 | dev_priv->cur_delay = cur_freq; | ||
2306 | |||
2307 | /* requires MSI enabled */ | ||
2308 | I915_WRITE(GEN6_PMIER, | ||
2309 | GEN6_PM_MBOX_EVENT | | ||
2310 | GEN6_PM_THERMAL_EVENT | | ||
2311 | GEN6_PM_RP_DOWN_TIMEOUT | | ||
2312 | GEN6_PM_RP_UP_THRESHOLD | | ||
2313 | GEN6_PM_RP_DOWN_THRESHOLD | | ||
2314 | GEN6_PM_RP_UP_EI_EXPIRED | | ||
2315 | GEN6_PM_RP_DOWN_EI_EXPIRED); | ||
2316 | spin_lock_irq(&dev_priv->rps_lock); | ||
2317 | WARN_ON(dev_priv->pm_iir != 0); | ||
2318 | I915_WRITE(GEN6_PMIMR, 0); | ||
2319 | spin_unlock_irq(&dev_priv->rps_lock); | ||
2320 | /* enable all PM interrupts */ | ||
2321 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
2322 | |||
2323 | gen6_gt_force_wake_put(dev_priv); | ||
2324 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
2325 | } | ||
2326 | |||
2327 | void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | ||
2328 | { | ||
2329 | int min_freq = 15; | ||
2330 | int gpu_freq, ia_freq, max_ia_freq; | ||
2331 | int scaling_factor = 180; | ||
2332 | |||
2333 | max_ia_freq = cpufreq_quick_get_max(0); | ||
2334 | /* | ||
2335 | * Default to measured freq if none found, PCU will ensure we don't go | ||
2336 | * over | ||
2337 | */ | ||
2338 | if (!max_ia_freq) | ||
2339 | max_ia_freq = tsc_khz; | ||
2340 | |||
2341 | /* Convert from kHz to MHz */ | ||
2342 | max_ia_freq /= 1000; | ||
2343 | |||
2344 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
2345 | |||
2346 | /* | ||
2347 | * For each potential GPU frequency, load a ring frequency we'd like | ||
2348 | * to use for memory access. We do this by specifying the IA frequency | ||
2349 | * the PCU should use as a reference to determine the ring frequency. | ||
2350 | */ | ||
2351 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; | ||
2352 | gpu_freq--) { | ||
2353 | int diff = dev_priv->max_delay - gpu_freq; | ||
2354 | |||
2355 | /* | ||
2356 | * For GPU frequencies less than 750MHz, just use the lowest | ||
2357 | * ring freq. | ||
2358 | */ | ||
2359 | if (gpu_freq < min_freq) | ||
2360 | ia_freq = 800; | ||
2361 | else | ||
2362 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | ||
2363 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | ||
2364 | |||
2365 | I915_WRITE(GEN6_PCODE_DATA, | ||
2366 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | | ||
2367 | gpu_freq); | ||
2368 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
2369 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
2370 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
2371 | GEN6_PCODE_READY) == 0, 10)) { | ||
2372 | DRM_ERROR("pcode write of freq table timed out\n"); | ||
2373 | continue; | ||
2374 | } | ||
2375 | } | ||
2376 | |||
2377 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
2378 | } | ||
2379 | |||
2380 | static void ironlake_teardown_rc6(struct drm_device *dev) | ||
2381 | { | ||
2382 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2383 | |||
2384 | if (dev_priv->renderctx) { | ||
2385 | i915_gem_object_unpin(dev_priv->renderctx); | ||
2386 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
2387 | dev_priv->renderctx = NULL; | ||
2388 | } | ||
2389 | |||
2390 | if (dev_priv->pwrctx) { | ||
2391 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
2392 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
2393 | dev_priv->pwrctx = NULL; | ||
2394 | } | ||
2395 | } | ||
2396 | |||
2397 | void ironlake_disable_rc6(struct drm_device *dev) | ||
2398 | { | ||
2399 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2400 | |||
2401 | if (I915_READ(PWRCTXA)) { | ||
2402 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
2403 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
2404 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
2405 | 50); | ||
2406 | |||
2407 | I915_WRITE(PWRCTXA, 0); | ||
2408 | POSTING_READ(PWRCTXA); | ||
2409 | |||
2410 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
2411 | POSTING_READ(RSTDBYCTL); | ||
2412 | } | ||
2413 | |||
2414 | ironlake_teardown_rc6(dev); | ||
2415 | } | ||
2416 | |||
2417 | static int ironlake_setup_rc6(struct drm_device *dev) | ||
2418 | { | ||
2419 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2420 | |||
2421 | if (dev_priv->renderctx == NULL) | ||
2422 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
2423 | if (!dev_priv->renderctx) | ||
2424 | return -ENOMEM; | ||
2425 | |||
2426 | if (dev_priv->pwrctx == NULL) | ||
2427 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
2428 | if (!dev_priv->pwrctx) { | ||
2429 | ironlake_teardown_rc6(dev); | ||
2430 | return -ENOMEM; | ||
2431 | } | ||
2432 | |||
2433 | return 0; | ||
2434 | } | ||
2435 | |||
2436 | void ironlake_enable_rc6(struct drm_device *dev) | ||
2437 | { | ||
2438 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2439 | int ret; | ||
2440 | |||
2441 | /* rc6 disabled by default due to repeated reports of hanging during | ||
2442 | * boot and resume. | ||
2443 | */ | ||
2444 | if (!intel_enable_rc6(dev)) | ||
2445 | return; | ||
2446 | |||
2447 | mutex_lock(&dev->struct_mutex); | ||
2448 | ret = ironlake_setup_rc6(dev); | ||
2449 | if (ret) { | ||
2450 | mutex_unlock(&dev->struct_mutex); | ||
2451 | return; | ||
2452 | } | ||
2453 | |||
2454 | /* | ||
2455 | * GPU can automatically power down the render unit if given a page | ||
2456 | * to save state. | ||
2457 | */ | ||
2458 | ret = BEGIN_LP_RING(6); | ||
2459 | if (ret) { | ||
2460 | ironlake_teardown_rc6(dev); | ||
2461 | mutex_unlock(&dev->struct_mutex); | ||
2462 | return; | ||
2463 | } | ||
2464 | |||
2465 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | ||
2466 | OUT_RING(MI_SET_CONTEXT); | ||
2467 | OUT_RING(dev_priv->renderctx->gtt_offset | | ||
2468 | MI_MM_SPACE_GTT | | ||
2469 | MI_SAVE_EXT_STATE_EN | | ||
2470 | MI_RESTORE_EXT_STATE_EN | | ||
2471 | MI_RESTORE_INHIBIT); | ||
2472 | OUT_RING(MI_SUSPEND_FLUSH); | ||
2473 | OUT_RING(MI_NOOP); | ||
2474 | OUT_RING(MI_FLUSH); | ||
2475 | ADVANCE_LP_RING(); | ||
2476 | |||
2477 | /* | ||
2478 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW | ||
2479 | * does an implicit flush, combined with MI_FLUSH above, it should be | ||
2480 | * safe to assume that renderctx is valid | ||
2481 | */ | ||
2482 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); | ||
2483 | if (ret) { | ||
2484 | DRM_ERROR("failed to enable ironlake power power savings\n"); | ||
2485 | ironlake_teardown_rc6(dev); | ||
2486 | mutex_unlock(&dev->struct_mutex); | ||
2487 | return; | ||
2488 | } | ||
2489 | |||
2490 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | ||
2491 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
2492 | mutex_unlock(&dev->struct_mutex); | ||
2493 | } | ||
2494 | |||