diff options
author | Dave Airlie <airlied@redhat.com> | 2014-07-08 20:38:42 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-07-08 20:38:42 -0400 |
commit | ca5a1b9ba0fb5291b555a23b76dbe5f6c30bfd7a (patch) | |
tree | 9a012a2a610ad4e9500c8e4f0aa68ecdc23c4dba | |
parent | c7dbc6c9ae5c3baa3be755a228a349374d043b5b (diff) | |
parent | 34882298b93e998d5fccde852b860e8fbe6c8f6b (diff) |
Merge tag 'drm-intel-next-2014-06-20' of git://anongit.freedesktop.org/drm-intel into drm-next
- Accurate frontbuffer tracking and frontbuffer rendering invalidate, flush and
flip events. This is prep work for proper PSR support and should also be
useful for DRRS&fbc.
- Runtime suspend hardware on system suspend to support the new SOix sleep
states, from Jesse.
- PSR updates for broadwell (Rodrigo)
- Universal plane support for cursors (Matt Roper), including core drm patches.
- Prefault gtt mappings (Chris)
- baytrail write-enable pte bit support (Akash Goel)
- mmio based flips (Sourab Gupta) instead of blitter ring flips
- interrupt handling race fixes (Oscar Mateo)
And old, not yet merged features from the previous round:
- rps/turbo support for chv (Deepak)
- some other straggling chv patches (Ville)
- proper universal plane conversion for the primary plane (Matt Roper)
- ppgtt on vlv from Jesse
- pile of cleanups, little fixes for insane corner cases and improved debug
support all over
* tag 'drm-intel-next-2014-06-20' of git://anongit.freedesktop.org/drm-intel: (99 commits)
drm/i915: Update DRIVER_DATE to 20140620
drivers/i915: Fix unnoticed failure of init_ring_common()
drm/i915: Track frontbuffer invalidation/flushing
drm/i915: Use new frontbuffer bits to increase pll clock
drm/i915: don't take runtime PM reference around freeze/thaw
drm/i915: use runtime irq suspend/resume in freeze/thaw
drm/i915: Properly track domain of the fbcon fb
drm/i915: Print obj->frontbuffer_bits in debugfs output
drm/i915: Introduce accurate frontbuffer tracking
drm/i915: Drop schedule_back from psr_exit
drm/i915: Ditch intel_edp_psr_update
drm/i915: Drop unecessary complexity from psr_inactivate
drm/i915: Remove ctx->last_ring
drm/i915/chv: Ack interrupts before handling them (CHV)
drm/i915/bdw: Ack interrupts before handling them (GEN8)
drm/i915/vlv: Ack interrupts before handling them (VLV)
drm/i915: Ack interrupts before handling them (GEN5 - GEN7)
drm/i915: Don't BUG_ON in i915_gem_obj_offset
drm/i915: Grab dev->struct_mutex in i915_gem_pageflip_info
drm/i915: Add some L3 registers to the parser whitelist
...
Conflicts:
drivers/gpu/drm/i915/i915_drv.c
35 files changed, 2575 insertions, 920 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bd7422676638..c808a092d824 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -41,6 +41,10 @@ | |||
41 | 41 | ||
42 | #include "drm_crtc_internal.h" | 42 | #include "drm_crtc_internal.h" |
43 | 43 | ||
44 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | ||
45 | struct drm_mode_fb_cmd2 *r, | ||
46 | struct drm_file *file_priv); | ||
47 | |||
44 | /** | 48 | /** |
45 | * drm_modeset_lock_all - take all modeset locks | 49 | * drm_modeset_lock_all - take all modeset locks |
46 | * @dev: drm device | 50 | * @dev: drm device |
@@ -723,7 +727,7 @@ DEFINE_WW_CLASS(crtc_ww_class); | |||
723 | */ | 727 | */ |
724 | int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, | 728 | int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, |
725 | struct drm_plane *primary, | 729 | struct drm_plane *primary, |
726 | void *cursor, | 730 | struct drm_plane *cursor, |
727 | const struct drm_crtc_funcs *funcs) | 731 | const struct drm_crtc_funcs *funcs) |
728 | { | 732 | { |
729 | struct drm_mode_config *config = &dev->mode_config; | 733 | struct drm_mode_config *config = &dev->mode_config; |
@@ -748,8 +752,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, | |||
748 | config->num_crtc++; | 752 | config->num_crtc++; |
749 | 753 | ||
750 | crtc->primary = primary; | 754 | crtc->primary = primary; |
755 | crtc->cursor = cursor; | ||
751 | if (primary) | 756 | if (primary) |
752 | primary->possible_crtcs = 1 << drm_crtc_index(crtc); | 757 | primary->possible_crtcs = 1 << drm_crtc_index(crtc); |
758 | if (cursor) | ||
759 | cursor->possible_crtcs = 1 << drm_crtc_index(crtc); | ||
753 | 760 | ||
754 | out: | 761 | out: |
755 | drm_modeset_unlock_all(dev); | 762 | drm_modeset_unlock_all(dev); |
@@ -2177,45 +2184,32 @@ out: | |||
2177 | return ret; | 2184 | return ret; |
2178 | } | 2185 | } |
2179 | 2186 | ||
2180 | /** | 2187 | /* |
2181 | * drm_mode_setplane - configure a plane's configuration | 2188 | * setplane_internal - setplane handler for internal callers |
2182 | * @dev: DRM device | ||
2183 | * @data: ioctl data* | ||
2184 | * @file_priv: DRM file info | ||
2185 | * | 2189 | * |
2186 | * Set plane configuration, including placement, fb, scaling, and other factors. | 2190 | * Note that we assume an extra reference has already been taken on fb. If the |
2187 | * Or pass a NULL fb to disable. | 2191 | * update fails, this reference will be dropped before return; if it succeeds, |
2192 | * the previous framebuffer (if any) will be unreferenced instead. | ||
2188 | * | 2193 | * |
2189 | * Returns: | 2194 | * src_{x,y,w,h} are provided in 16.16 fixed point format |
2190 | * Zero on success, errno on failure. | ||
2191 | */ | 2195 | */ |
2192 | int drm_mode_setplane(struct drm_device *dev, void *data, | 2196 | static int setplane_internal(struct drm_plane *plane, |
2193 | struct drm_file *file_priv) | 2197 | struct drm_crtc *crtc, |
2198 | struct drm_framebuffer *fb, | ||
2199 | int32_t crtc_x, int32_t crtc_y, | ||
2200 | uint32_t crtc_w, uint32_t crtc_h, | ||
2201 | /* src_{x,y,w,h} values are 16.16 fixed point */ | ||
2202 | uint32_t src_x, uint32_t src_y, | ||
2203 | uint32_t src_w, uint32_t src_h) | ||
2194 | { | 2204 | { |
2195 | struct drm_mode_set_plane *plane_req = data; | 2205 | struct drm_device *dev = plane->dev; |
2196 | struct drm_plane *plane; | 2206 | struct drm_framebuffer *old_fb = NULL; |
2197 | struct drm_crtc *crtc; | ||
2198 | struct drm_framebuffer *fb = NULL, *old_fb = NULL; | ||
2199 | int ret = 0; | 2207 | int ret = 0; |
2200 | unsigned int fb_width, fb_height; | 2208 | unsigned int fb_width, fb_height; |
2201 | int i; | 2209 | int i; |
2202 | 2210 | ||
2203 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2204 | return -EINVAL; | ||
2205 | |||
2206 | /* | ||
2207 | * First, find the plane, crtc, and fb objects. If not available, | ||
2208 | * we don't bother to call the driver. | ||
2209 | */ | ||
2210 | plane = drm_plane_find(dev, plane_req->plane_id); | ||
2211 | if (!plane) { | ||
2212 | DRM_DEBUG_KMS("Unknown plane ID %d\n", | ||
2213 | plane_req->plane_id); | ||
2214 | return -ENOENT; | ||
2215 | } | ||
2216 | |||
2217 | /* No fb means shut it down */ | 2211 | /* No fb means shut it down */ |
2218 | if (!plane_req->fb_id) { | 2212 | if (!fb) { |
2219 | drm_modeset_lock_all(dev); | 2213 | drm_modeset_lock_all(dev); |
2220 | old_fb = plane->fb; | 2214 | old_fb = plane->fb; |
2221 | ret = plane->funcs->disable_plane(plane); | 2215 | ret = plane->funcs->disable_plane(plane); |
@@ -2229,14 +2223,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, | |||
2229 | goto out; | 2223 | goto out; |
2230 | } | 2224 | } |
2231 | 2225 | ||
2232 | crtc = drm_crtc_find(dev, plane_req->crtc_id); | ||
2233 | if (!crtc) { | ||
2234 | DRM_DEBUG_KMS("Unknown crtc ID %d\n", | ||
2235 | plane_req->crtc_id); | ||
2236 | ret = -ENOENT; | ||
2237 | goto out; | ||
2238 | } | ||
2239 | |||
2240 | /* Check whether this plane is usable on this CRTC */ | 2226 | /* Check whether this plane is usable on this CRTC */ |
2241 | if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { | 2227 | if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { |
2242 | DRM_DEBUG_KMS("Invalid crtc for plane\n"); | 2228 | DRM_DEBUG_KMS("Invalid crtc for plane\n"); |
@@ -2244,14 +2230,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, | |||
2244 | goto out; | 2230 | goto out; |
2245 | } | 2231 | } |
2246 | 2232 | ||
2247 | fb = drm_framebuffer_lookup(dev, plane_req->fb_id); | ||
2248 | if (!fb) { | ||
2249 | DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", | ||
2250 | plane_req->fb_id); | ||
2251 | ret = -ENOENT; | ||
2252 | goto out; | ||
2253 | } | ||
2254 | |||
2255 | /* Check whether this plane supports the fb pixel format. */ | 2233 | /* Check whether this plane supports the fb pixel format. */ |
2256 | for (i = 0; i < plane->format_count; i++) | 2234 | for (i = 0; i < plane->format_count; i++) |
2257 | if (fb->pixel_format == plane->format_types[i]) | 2235 | if (fb->pixel_format == plane->format_types[i]) |
@@ -2267,43 +2245,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data, | |||
2267 | fb_height = fb->height << 16; | 2245 | fb_height = fb->height << 16; |
2268 | 2246 | ||
2269 | /* Make sure source coordinates are inside the fb. */ | 2247 | /* Make sure source coordinates are inside the fb. */ |
2270 | if (plane_req->src_w > fb_width || | 2248 | if (src_w > fb_width || |
2271 | plane_req->src_x > fb_width - plane_req->src_w || | 2249 | src_x > fb_width - src_w || |
2272 | plane_req->src_h > fb_height || | 2250 | src_h > fb_height || |
2273 | plane_req->src_y > fb_height - plane_req->src_h) { | 2251 | src_y > fb_height - src_h) { |
2274 | DRM_DEBUG_KMS("Invalid source coordinates " | 2252 | DRM_DEBUG_KMS("Invalid source coordinates " |
2275 | "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", | 2253 | "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", |
2276 | plane_req->src_w >> 16, | 2254 | src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, |
2277 | ((plane_req->src_w & 0xffff) * 15625) >> 10, | 2255 | src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, |
2278 | plane_req->src_h >> 16, | 2256 | src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, |
2279 | ((plane_req->src_h & 0xffff) * 15625) >> 10, | 2257 | src_y >> 16, ((src_y & 0xffff) * 15625) >> 10); |
2280 | plane_req->src_x >> 16, | ||
2281 | ((plane_req->src_x & 0xffff) * 15625) >> 10, | ||
2282 | plane_req->src_y >> 16, | ||
2283 | ((plane_req->src_y & 0xffff) * 15625) >> 10); | ||
2284 | ret = -ENOSPC; | 2258 | ret = -ENOSPC; |
2285 | goto out; | 2259 | goto out; |
2286 | } | 2260 | } |
2287 | 2261 | ||
2288 | /* Give drivers some help against integer overflows */ | ||
2289 | if (plane_req->crtc_w > INT_MAX || | ||
2290 | plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || | ||
2291 | plane_req->crtc_h > INT_MAX || | ||
2292 | plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { | ||
2293 | DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", | ||
2294 | plane_req->crtc_w, plane_req->crtc_h, | ||
2295 | plane_req->crtc_x, plane_req->crtc_y); | ||
2296 | ret = -ERANGE; | ||
2297 | goto out; | ||
2298 | } | ||
2299 | |||
2300 | drm_modeset_lock_all(dev); | 2262 | drm_modeset_lock_all(dev); |
2301 | old_fb = plane->fb; | 2263 | old_fb = plane->fb; |
2302 | ret = plane->funcs->update_plane(plane, crtc, fb, | 2264 | ret = plane->funcs->update_plane(plane, crtc, fb, |
2303 | plane_req->crtc_x, plane_req->crtc_y, | 2265 | crtc_x, crtc_y, crtc_w, crtc_h, |
2304 | plane_req->crtc_w, plane_req->crtc_h, | 2266 | src_x, src_y, src_w, src_h); |
2305 | plane_req->src_x, plane_req->src_y, | ||
2306 | plane_req->src_w, plane_req->src_h); | ||
2307 | if (!ret) { | 2267 | if (!ret) { |
2308 | plane->crtc = crtc; | 2268 | plane->crtc = crtc; |
2309 | plane->fb = fb; | 2269 | plane->fb = fb; |
@@ -2320,6 +2280,85 @@ out: | |||
2320 | drm_framebuffer_unreference(old_fb); | 2280 | drm_framebuffer_unreference(old_fb); |
2321 | 2281 | ||
2322 | return ret; | 2282 | return ret; |
2283 | |||
2284 | } | ||
2285 | |||
2286 | /** | ||
2287 | * drm_mode_setplane - configure a plane's configuration | ||
2288 | * @dev: DRM device | ||
2289 | * @data: ioctl data* | ||
2290 | * @file_priv: DRM file info | ||
2291 | * | ||
2292 | * Set plane configuration, including placement, fb, scaling, and other factors. | ||
2293 | * Or pass a NULL fb to disable (planes may be disabled without providing a | ||
2294 | * valid crtc). | ||
2295 | * | ||
2296 | * Returns: | ||
2297 | * Zero on success, errno on failure. | ||
2298 | */ | ||
2299 | int drm_mode_setplane(struct drm_device *dev, void *data, | ||
2300 | struct drm_file *file_priv) | ||
2301 | { | ||
2302 | struct drm_mode_set_plane *plane_req = data; | ||
2303 | struct drm_mode_object *obj; | ||
2304 | struct drm_plane *plane; | ||
2305 | struct drm_crtc *crtc = NULL; | ||
2306 | struct drm_framebuffer *fb = NULL; | ||
2307 | |||
2308 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2309 | return -EINVAL; | ||
2310 | |||
2311 | /* Give drivers some help against integer overflows */ | ||
2312 | if (plane_req->crtc_w > INT_MAX || | ||
2313 | plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || | ||
2314 | plane_req->crtc_h > INT_MAX || | ||
2315 | plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { | ||
2316 | DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", | ||
2317 | plane_req->crtc_w, plane_req->crtc_h, | ||
2318 | plane_req->crtc_x, plane_req->crtc_y); | ||
2319 | return -ERANGE; | ||
2320 | } | ||
2321 | |||
2322 | /* | ||
2323 | * First, find the plane, crtc, and fb objects. If not available, | ||
2324 | * we don't bother to call the driver. | ||
2325 | */ | ||
2326 | obj = drm_mode_object_find(dev, plane_req->plane_id, | ||
2327 | DRM_MODE_OBJECT_PLANE); | ||
2328 | if (!obj) { | ||
2329 | DRM_DEBUG_KMS("Unknown plane ID %d\n", | ||
2330 | plane_req->plane_id); | ||
2331 | return -ENOENT; | ||
2332 | } | ||
2333 | plane = obj_to_plane(obj); | ||
2334 | |||
2335 | if (plane_req->fb_id) { | ||
2336 | fb = drm_framebuffer_lookup(dev, plane_req->fb_id); | ||
2337 | if (!fb) { | ||
2338 | DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", | ||
2339 | plane_req->fb_id); | ||
2340 | return -ENOENT; | ||
2341 | } | ||
2342 | |||
2343 | obj = drm_mode_object_find(dev, plane_req->crtc_id, | ||
2344 | DRM_MODE_OBJECT_CRTC); | ||
2345 | if (!obj) { | ||
2346 | DRM_DEBUG_KMS("Unknown crtc ID %d\n", | ||
2347 | plane_req->crtc_id); | ||
2348 | return -ENOENT; | ||
2349 | } | ||
2350 | crtc = obj_to_crtc(obj); | ||
2351 | } | ||
2352 | |||
2353 | /* | ||
2354 | * setplane_internal will take care of deref'ing either the old or new | ||
2355 | * framebuffer depending on success. | ||
2356 | */ | ||
2357 | return setplane_internal(plane, crtc, fb, | ||
2358 | plane_req->crtc_x, plane_req->crtc_y, | ||
2359 | plane_req->crtc_w, plane_req->crtc_h, | ||
2360 | plane_req->src_x, plane_req->src_y, | ||
2361 | plane_req->src_w, plane_req->src_h); | ||
2323 | } | 2362 | } |
2324 | 2363 | ||
2325 | /** | 2364 | /** |
@@ -2568,6 +2607,102 @@ out: | |||
2568 | return ret; | 2607 | return ret; |
2569 | } | 2608 | } |
2570 | 2609 | ||
2610 | /** | ||
2611 | * drm_mode_cursor_universal - translate legacy cursor ioctl call into a | ||
2612 | * universal plane handler call | ||
2613 | * @crtc: crtc to update cursor for | ||
2614 | * @req: data pointer for the ioctl | ||
2615 | * @file_priv: drm file for the ioctl call | ||
2616 | * | ||
2617 | * Legacy cursor ioctl's work directly with driver buffer handles. To | ||
2618 | * translate legacy ioctl calls into universal plane handler calls, we need to | ||
2619 | * wrap the native buffer handle in a drm_framebuffer. | ||
2620 | * | ||
2621 | * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB | ||
2622 | * buffer with a pitch of 4*width; the universal plane interface should be used | ||
2623 | * directly in cases where the hardware can support other buffer settings and | ||
2624 | * userspace wants to make use of these capabilities. | ||
2625 | * | ||
2626 | * Returns: | ||
2627 | * Zero on success, errno on failure. | ||
2628 | */ | ||
2629 | static int drm_mode_cursor_universal(struct drm_crtc *crtc, | ||
2630 | struct drm_mode_cursor2 *req, | ||
2631 | struct drm_file *file_priv) | ||
2632 | { | ||
2633 | struct drm_device *dev = crtc->dev; | ||
2634 | struct drm_framebuffer *fb = NULL; | ||
2635 | struct drm_mode_fb_cmd2 fbreq = { | ||
2636 | .width = req->width, | ||
2637 | .height = req->height, | ||
2638 | .pixel_format = DRM_FORMAT_ARGB8888, | ||
2639 | .pitches = { req->width * 4 }, | ||
2640 | .handles = { req->handle }, | ||
2641 | }; | ||
2642 | int32_t crtc_x, crtc_y; | ||
2643 | uint32_t crtc_w = 0, crtc_h = 0; | ||
2644 | uint32_t src_w = 0, src_h = 0; | ||
2645 | int ret = 0; | ||
2646 | |||
2647 | BUG_ON(!crtc->cursor); | ||
2648 | |||
2649 | /* | ||
2650 | * Obtain fb we'll be using (either new or existing) and take an extra | ||
2651 | * reference to it if fb != null. setplane will take care of dropping | ||
2652 | * the reference if the plane update fails. | ||
2653 | */ | ||
2654 | if (req->flags & DRM_MODE_CURSOR_BO) { | ||
2655 | if (req->handle) { | ||
2656 | fb = add_framebuffer_internal(dev, &fbreq, file_priv); | ||
2657 | if (IS_ERR(fb)) { | ||
2658 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); | ||
2659 | return PTR_ERR(fb); | ||
2660 | } | ||
2661 | |||
2662 | drm_framebuffer_reference(fb); | ||
2663 | } else { | ||
2664 | fb = NULL; | ||
2665 | } | ||
2666 | } else { | ||
2667 | mutex_lock(&dev->mode_config.mutex); | ||
2668 | fb = crtc->cursor->fb; | ||
2669 | if (fb) | ||
2670 | drm_framebuffer_reference(fb); | ||
2671 | mutex_unlock(&dev->mode_config.mutex); | ||
2672 | } | ||
2673 | |||
2674 | if (req->flags & DRM_MODE_CURSOR_MOVE) { | ||
2675 | crtc_x = req->x; | ||
2676 | crtc_y = req->y; | ||
2677 | } else { | ||
2678 | crtc_x = crtc->cursor_x; | ||
2679 | crtc_y = crtc->cursor_y; | ||
2680 | } | ||
2681 | |||
2682 | if (fb) { | ||
2683 | crtc_w = fb->width; | ||
2684 | crtc_h = fb->height; | ||
2685 | src_w = fb->width << 16; | ||
2686 | src_h = fb->height << 16; | ||
2687 | } | ||
2688 | |||
2689 | /* | ||
2690 | * setplane_internal will take care of deref'ing either the old or new | ||
2691 | * framebuffer depending on success. | ||
2692 | */ | ||
2693 | ret = setplane_internal(crtc->cursor, crtc, fb, | ||
2694 | crtc_x, crtc_y, crtc_w, crtc_h, | ||
2695 | 0, 0, src_w, src_h); | ||
2696 | |||
2697 | /* Update successful; save new cursor position, if necessary */ | ||
2698 | if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { | ||
2699 | crtc->cursor_x = req->x; | ||
2700 | crtc->cursor_y = req->y; | ||
2701 | } | ||
2702 | |||
2703 | return ret; | ||
2704 | } | ||
2705 | |||
2571 | static int drm_mode_cursor_common(struct drm_device *dev, | 2706 | static int drm_mode_cursor_common(struct drm_device *dev, |
2572 | struct drm_mode_cursor2 *req, | 2707 | struct drm_mode_cursor2 *req, |
2573 | struct drm_file *file_priv) | 2708 | struct drm_file *file_priv) |
@@ -2587,6 +2722,13 @@ static int drm_mode_cursor_common(struct drm_device *dev, | |||
2587 | return -ENOENT; | 2722 | return -ENOENT; |
2588 | } | 2723 | } |
2589 | 2724 | ||
2725 | /* | ||
2726 | * If this crtc has a universal cursor plane, call that plane's update | ||
2727 | * handler rather than using legacy cursor handlers. | ||
2728 | */ | ||
2729 | if (crtc->cursor) | ||
2730 | return drm_mode_cursor_universal(crtc, req, file_priv); | ||
2731 | |||
2590 | drm_modeset_lock(&crtc->mutex, NULL); | 2732 | drm_modeset_lock(&crtc->mutex, NULL); |
2591 | if (req->flags & DRM_MODE_CURSOR_BO) { | 2733 | if (req->flags & DRM_MODE_CURSOR_BO) { |
2592 | if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { | 2734 | if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { |
@@ -2886,56 +3028,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
2886 | return 0; | 3028 | return 0; |
2887 | } | 3029 | } |
2888 | 3030 | ||
2889 | /** | 3031 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, |
2890 | * drm_mode_addfb2 - add an FB to the graphics configuration | 3032 | struct drm_mode_fb_cmd2 *r, |
2891 | * @dev: drm device for the ioctl | 3033 | struct drm_file *file_priv) |
2892 | * @data: data pointer for the ioctl | ||
2893 | * @file_priv: drm file for the ioctl call | ||
2894 | * | ||
2895 | * Add a new FB to the specified CRTC, given a user request with format. This is | ||
2896 | * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers | ||
2897 | * and uses fourcc codes as pixel format specifiers. | ||
2898 | * | ||
2899 | * Called by the user via ioctl. | ||
2900 | * | ||
2901 | * Returns: | ||
2902 | * Zero on success, errno on failure. | ||
2903 | */ | ||
2904 | int drm_mode_addfb2(struct drm_device *dev, | ||
2905 | void *data, struct drm_file *file_priv) | ||
2906 | { | 3034 | { |
2907 | struct drm_mode_fb_cmd2 *r = data; | ||
2908 | struct drm_mode_config *config = &dev->mode_config; | 3035 | struct drm_mode_config *config = &dev->mode_config; |
2909 | struct drm_framebuffer *fb; | 3036 | struct drm_framebuffer *fb; |
2910 | int ret; | 3037 | int ret; |
2911 | 3038 | ||
2912 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2913 | return -EINVAL; | ||
2914 | |||
2915 | if (r->flags & ~DRM_MODE_FB_INTERLACED) { | 3039 | if (r->flags & ~DRM_MODE_FB_INTERLACED) { |
2916 | DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); | 3040 | DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); |
2917 | return -EINVAL; | 3041 | return ERR_PTR(-EINVAL); |
2918 | } | 3042 | } |
2919 | 3043 | ||
2920 | if ((config->min_width > r->width) || (r->width > config->max_width)) { | 3044 | if ((config->min_width > r->width) || (r->width > config->max_width)) { |
2921 | DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", | 3045 | DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", |
2922 | r->width, config->min_width, config->max_width); | 3046 | r->width, config->min_width, config->max_width); |
2923 | return -EINVAL; | 3047 | return ERR_PTR(-EINVAL); |
2924 | } | 3048 | } |
2925 | if ((config->min_height > r->height) || (r->height > config->max_height)) { | 3049 | if ((config->min_height > r->height) || (r->height > config->max_height)) { |
2926 | DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", | 3050 | DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", |
2927 | r->height, config->min_height, config->max_height); | 3051 | r->height, config->min_height, config->max_height); |
2928 | return -EINVAL; | 3052 | return ERR_PTR(-EINVAL); |
2929 | } | 3053 | } |
2930 | 3054 | ||
2931 | ret = framebuffer_check(r); | 3055 | ret = framebuffer_check(r); |
2932 | if (ret) | 3056 | if (ret) |
2933 | return ret; | 3057 | return ERR_PTR(ret); |
2934 | 3058 | ||
2935 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); | 3059 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); |
2936 | if (IS_ERR(fb)) { | 3060 | if (IS_ERR(fb)) { |
2937 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 3061 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2938 | return PTR_ERR(fb); | 3062 | return fb; |
2939 | } | 3063 | } |
2940 | 3064 | ||
2941 | mutex_lock(&file_priv->fbs_lock); | 3065 | mutex_lock(&file_priv->fbs_lock); |
@@ -2944,8 +3068,37 @@ int drm_mode_addfb2(struct drm_device *dev, | |||
2944 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | 3068 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); |
2945 | mutex_unlock(&file_priv->fbs_lock); | 3069 | mutex_unlock(&file_priv->fbs_lock); |
2946 | 3070 | ||
3071 | return fb; | ||
3072 | } | ||
2947 | 3073 | ||
2948 | return ret; | 3074 | /** |
3075 | * drm_mode_addfb2 - add an FB to the graphics configuration | ||
3076 | * @dev: drm device for the ioctl | ||
3077 | * @data: data pointer for the ioctl | ||
3078 | * @file_priv: drm file for the ioctl call | ||
3079 | * | ||
3080 | * Add a new FB to the specified CRTC, given a user request with format. This is | ||
3081 | * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers | ||
3082 | * and uses fourcc codes as pixel format specifiers. | ||
3083 | * | ||
3084 | * Called by the user via ioctl. | ||
3085 | * | ||
3086 | * Returns: | ||
3087 | * Zero on success, errno on failure. | ||
3088 | */ | ||
3089 | int drm_mode_addfb2(struct drm_device *dev, | ||
3090 | void *data, struct drm_file *file_priv) | ||
3091 | { | ||
3092 | struct drm_framebuffer *fb; | ||
3093 | |||
3094 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
3095 | return -EINVAL; | ||
3096 | |||
3097 | fb = add_framebuffer_internal(dev, data, file_priv); | ||
3098 | if (IS_ERR(fb)) | ||
3099 | return PTR_ERR(fb); | ||
3100 | |||
3101 | return 0; | ||
2949 | } | 3102 | } |
2950 | 3103 | ||
2951 | /** | 3104 | /** |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 9d7954366bd2..dea99d92fb4a 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = { | |||
426 | GEN7_SO_WRITE_OFFSET(1), | 426 | GEN7_SO_WRITE_OFFSET(1), |
427 | GEN7_SO_WRITE_OFFSET(2), | 427 | GEN7_SO_WRITE_OFFSET(2), |
428 | GEN7_SO_WRITE_OFFSET(3), | 428 | GEN7_SO_WRITE_OFFSET(3), |
429 | GEN7_L3SQCREG1, | ||
430 | GEN7_L3CNTLREG2, | ||
431 | GEN7_L3CNTLREG3, | ||
429 | }; | 432 | }; |
430 | 433 | ||
431 | static const u32 gen7_blt_regs[] = { | 434 | static const u32 gen7_blt_regs[] = { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b8c689202c40..a93b3bfdad61 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -170,6 +170,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
170 | } | 170 | } |
171 | if (obj->ring != NULL) | 171 | if (obj->ring != NULL) |
172 | seq_printf(m, " (%s)", obj->ring->name); | 172 | seq_printf(m, " (%s)", obj->ring->name); |
173 | if (obj->frontbuffer_bits) | ||
174 | seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); | ||
173 | } | 175 | } |
174 | 176 | ||
175 | static void describe_ctx(struct seq_file *m, struct intel_context *ctx) | 177 | static void describe_ctx(struct seq_file *m, struct intel_context *ctx) |
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
515 | struct drm_device *dev = node->minor->dev; | 517 | struct drm_device *dev = node->minor->dev; |
516 | unsigned long flags; | 518 | unsigned long flags; |
517 | struct intel_crtc *crtc; | 519 | struct intel_crtc *crtc; |
520 | int ret; | ||
521 | |||
522 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
523 | if (ret) | ||
524 | return ret; | ||
518 | 525 | ||
519 | for_each_intel_crtc(dev, crtc) { | 526 | for_each_intel_crtc(dev, crtc) { |
520 | const char pipe = pipe_name(crtc->pipe); | 527 | const char pipe = pipe_name(crtc->pipe); |
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
556 | spin_unlock_irqrestore(&dev->event_lock, flags); | 563 | spin_unlock_irqrestore(&dev->event_lock, flags); |
557 | } | 564 | } |
558 | 565 | ||
566 | mutex_unlock(&dev->struct_mutex); | ||
567 | |||
559 | return 0; | 568 | return 0; |
560 | } | 569 | } |
561 | 570 | ||
@@ -1029,7 +1038,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1029 | MEMSTAT_VID_SHIFT); | 1038 | MEMSTAT_VID_SHIFT); |
1030 | seq_printf(m, "Current P-state: %d\n", | 1039 | seq_printf(m, "Current P-state: %d\n", |
1031 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 1040 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); |
1032 | } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { | 1041 | } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || |
1042 | IS_BROADWELL(dev)) { | ||
1033 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 1043 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
1034 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 1044 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); |
1035 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 1045 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
@@ -1048,7 +1058,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1048 | 1058 | ||
1049 | reqf = I915_READ(GEN6_RPNSWREQ); | 1059 | reqf = I915_READ(GEN6_RPNSWREQ); |
1050 | reqf &= ~GEN6_TURBO_DISABLE; | 1060 | reqf &= ~GEN6_TURBO_DISABLE; |
1051 | if (IS_HASWELL(dev)) | 1061 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1052 | reqf >>= 24; | 1062 | reqf >>= 24; |
1053 | else | 1063 | else |
1054 | reqf >>= 25; | 1064 | reqf >>= 25; |
@@ -1065,7 +1075,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1065 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); | 1075 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); |
1066 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); | 1076 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); |
1067 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); | 1077 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); |
1068 | if (IS_HASWELL(dev)) | 1078 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1069 | cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; | 1079 | cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; |
1070 | else | 1080 | else |
1071 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | 1081 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; |
@@ -1677,9 +1687,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
1677 | 1687 | ||
1678 | #ifdef CONFIG_DRM_I915_FBDEV | 1688 | #ifdef CONFIG_DRM_I915_FBDEV |
1679 | struct drm_i915_private *dev_priv = dev->dev_private; | 1689 | struct drm_i915_private *dev_priv = dev->dev_private; |
1680 | int ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
1681 | if (ret) | ||
1682 | return ret; | ||
1683 | 1690 | ||
1684 | ifbdev = dev_priv->fbdev; | 1691 | ifbdev = dev_priv->fbdev; |
1685 | fb = to_intel_framebuffer(ifbdev->helper.fb); | 1692 | fb = to_intel_framebuffer(ifbdev->helper.fb); |
@@ -1692,7 +1699,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
1692 | atomic_read(&fb->base.refcount.refcount)); | 1699 | atomic_read(&fb->base.refcount.refcount)); |
1693 | describe_obj(m, fb->obj); | 1700 | describe_obj(m, fb->obj); |
1694 | seq_putc(m, '\n'); | 1701 | seq_putc(m, '\n'); |
1695 | mutex_unlock(&dev->mode_config.mutex); | ||
1696 | #endif | 1702 | #endif |
1697 | 1703 | ||
1698 | mutex_lock(&dev->mode_config.fb_lock); | 1704 | mutex_lock(&dev->mode_config.fb_lock); |
@@ -1723,7 +1729,7 @@ static int i915_context_status(struct seq_file *m, void *unused) | |||
1723 | struct intel_context *ctx; | 1729 | struct intel_context *ctx; |
1724 | int ret, i; | 1730 | int ret, i; |
1725 | 1731 | ||
1726 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | 1732 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
1727 | if (ret) | 1733 | if (ret) |
1728 | return ret; | 1734 | return ret; |
1729 | 1735 | ||
@@ -1753,7 +1759,7 @@ static int i915_context_status(struct seq_file *m, void *unused) | |||
1753 | seq_putc(m, '\n'); | 1759 | seq_putc(m, '\n'); |
1754 | } | 1760 | } |
1755 | 1761 | ||
1756 | mutex_unlock(&dev->mode_config.mutex); | 1762 | mutex_unlock(&dev->struct_mutex); |
1757 | 1763 | ||
1758 | return 0; | 1764 | return 0; |
1759 | } | 1765 | } |
@@ -1978,10 +1984,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
1978 | 1984 | ||
1979 | seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); | 1985 | seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); |
1980 | seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); | 1986 | seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); |
1987 | seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); | ||
1988 | seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); | ||
1981 | 1989 | ||
1982 | enabled = HAS_PSR(dev) && | 1990 | enabled = HAS_PSR(dev) && |
1983 | I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; | 1991 | I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; |
1984 | seq_printf(m, "Enabled: %s\n", yesno(enabled)); | 1992 | seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); |
1985 | 1993 | ||
1986 | if (HAS_PSR(dev)) | 1994 | if (HAS_PSR(dev)) |
1987 | psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & | 1995 | psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & |
@@ -2223,9 +2231,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) | |||
2223 | struct drm_crtc *crtc = &intel_crtc->base; | 2231 | struct drm_crtc *crtc = &intel_crtc->base; |
2224 | struct intel_encoder *intel_encoder; | 2232 | struct intel_encoder *intel_encoder; |
2225 | 2233 | ||
2226 | seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", | 2234 | if (crtc->primary->fb) |
2227 | crtc->primary->fb->base.id, crtc->x, crtc->y, | 2235 | seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", |
2228 | crtc->primary->fb->width, crtc->primary->fb->height); | 2236 | crtc->primary->fb->base.id, crtc->x, crtc->y, |
2237 | crtc->primary->fb->width, crtc->primary->fb->height); | ||
2238 | else | ||
2239 | seq_puts(m, "\tprimary plane disabled\n"); | ||
2229 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) | 2240 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
2230 | intel_encoder_info(m, intel_crtc, intel_encoder); | 2241 | intel_encoder_info(m, intel_crtc, intel_encoder); |
2231 | } | 2242 | } |
@@ -2929,11 +2940,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
2929 | /* real source -> none transition */ | 2940 | /* real source -> none transition */ |
2930 | if (source == INTEL_PIPE_CRC_SOURCE_NONE) { | 2941 | if (source == INTEL_PIPE_CRC_SOURCE_NONE) { |
2931 | struct intel_pipe_crc_entry *entries; | 2942 | struct intel_pipe_crc_entry *entries; |
2943 | struct intel_crtc *crtc = | ||
2944 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | ||
2932 | 2945 | ||
2933 | DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", | 2946 | DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", |
2934 | pipe_name(pipe)); | 2947 | pipe_name(pipe)); |
2935 | 2948 | ||
2936 | intel_wait_for_vblank(dev, pipe); | 2949 | drm_modeset_lock(&crtc->base.mutex, NULL); |
2950 | if (crtc->active) | ||
2951 | intel_wait_for_vblank(dev, pipe); | ||
2952 | drm_modeset_unlock(&crtc->base.mutex); | ||
2937 | 2953 | ||
2938 | spin_lock_irq(&pipe_crc->lock); | 2954 | spin_lock_irq(&pipe_crc->lock); |
2939 | entries = pipe_crc->entries; | 2955 | entries = pipe_crc->entries; |
@@ -3506,7 +3522,7 @@ i915_max_freq_get(void *data, u64 *val) | |||
3506 | struct drm_i915_private *dev_priv = dev->dev_private; | 3522 | struct drm_i915_private *dev_priv = dev->dev_private; |
3507 | int ret; | 3523 | int ret; |
3508 | 3524 | ||
3509 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 3525 | if (INTEL_INFO(dev)->gen < 6) |
3510 | return -ENODEV; | 3526 | return -ENODEV; |
3511 | 3527 | ||
3512 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 3528 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
@@ -3532,7 +3548,7 @@ i915_max_freq_set(void *data, u64 val) | |||
3532 | u32 rp_state_cap, hw_max, hw_min; | 3548 | u32 rp_state_cap, hw_max, hw_min; |
3533 | int ret; | 3549 | int ret; |
3534 | 3550 | ||
3535 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 3551 | if (INTEL_INFO(dev)->gen < 6) |
3536 | return -ENODEV; | 3552 | return -ENODEV; |
3537 | 3553 | ||
3538 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 3554 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
@@ -3587,7 +3603,7 @@ i915_min_freq_get(void *data, u64 *val) | |||
3587 | struct drm_i915_private *dev_priv = dev->dev_private; | 3603 | struct drm_i915_private *dev_priv = dev->dev_private; |
3588 | int ret; | 3604 | int ret; |
3589 | 3605 | ||
3590 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 3606 | if (INTEL_INFO(dev)->gen < 6) |
3591 | return -ENODEV; | 3607 | return -ENODEV; |
3592 | 3608 | ||
3593 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 3609 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
@@ -3613,7 +3629,7 @@ i915_min_freq_set(void *data, u64 val) | |||
3613 | u32 rp_state_cap, hw_max, hw_min; | 3629 | u32 rp_state_cap, hw_max, hw_min; |
3614 | int ret; | 3630 | int ret; |
3615 | 3631 | ||
3616 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 3632 | if (INTEL_INFO(dev)->gen < 6) |
3617 | return -ENODEV; | 3633 | return -ENODEV; |
3618 | 3634 | ||
3619 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 3635 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cac9265f9757..84b55665bd87 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev) | |||
138 | I915_WRITE(HWS_PGA, 0x1ffff000); | 138 | I915_WRITE(HWS_PGA, 0x1ffff000); |
139 | } | 139 | } |
140 | 140 | ||
141 | void i915_kernel_lost_context(struct drm_device * dev) | 141 | void i915_kernel_lost_context(struct drm_device *dev) |
142 | { | 142 | { |
143 | struct drm_i915_private *dev_priv = dev->dev_private; | 143 | struct drm_i915_private *dev_priv = dev->dev_private; |
144 | struct drm_i915_master_private *master_priv; | 144 | struct drm_i915_master_private *master_priv; |
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
166 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | 166 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
167 | } | 167 | } |
168 | 168 | ||
169 | static int i915_dma_cleanup(struct drm_device * dev) | 169 | static int i915_dma_cleanup(struct drm_device *dev) |
170 | { | 170 | { |
171 | struct drm_i915_private *dev_priv = dev->dev_private; | 171 | struct drm_i915_private *dev_priv = dev->dev_private; |
172 | int i; | 172 | int i; |
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | 193 | static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init) |
194 | { | 194 | { |
195 | struct drm_i915_private *dev_priv = dev->dev_private; | 195 | struct drm_i915_private *dev_priv = dev->dev_private; |
196 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 196 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
235 | return 0; | 235 | return 0; |
236 | } | 236 | } |
237 | 237 | ||
238 | static int i915_dma_resume(struct drm_device * dev) | 238 | static int i915_dma_resume(struct drm_device *dev) |
239 | { | 239 | { |
240 | struct drm_i915_private *dev_priv = dev->dev_private; | 240 | struct drm_i915_private *dev_priv = dev->dev_private; |
241 | struct intel_engine_cs *ring = LP_RING(dev_priv); | 241 | struct intel_engine_cs *ring = LP_RING(dev_priv); |
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd) | |||
359 | return 0; | 359 | return 0; |
360 | } | 360 | } |
361 | 361 | ||
362 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | 362 | static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords) |
363 | { | 363 | { |
364 | struct drm_i915_private *dev_priv = dev->dev_private; | 364 | struct drm_i915_private *dev_priv = dev->dev_private; |
365 | int i, ret; | 365 | int i, ret; |
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
369 | 369 | ||
370 | for (i = 0; i < dwords;) { | 370 | for (i = 0; i < dwords;) { |
371 | int sz = validate_cmd(buffer[i]); | 371 | int sz = validate_cmd(buffer[i]); |
372 | |||
372 | if (sz == 0 || i + sz > dwords) | 373 | if (sz == 0 || i + sz > dwords) |
373 | return -EINVAL; | 374 | return -EINVAL; |
374 | i += sz; | 375 | i += sz; |
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
453 | } | 454 | } |
454 | } | 455 | } |
455 | 456 | ||
456 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 457 | static int i915_dispatch_cmdbuffer(struct drm_device *dev, |
457 | drm_i915_cmdbuffer_t *cmd, | 458 | drm_i915_cmdbuffer_t *cmd, |
458 | struct drm_clip_rect *cliprects, | 459 | struct drm_clip_rect *cliprects, |
459 | void *cmdbuf) | 460 | void *cmdbuf) |
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
487 | return 0; | 488 | return 0; |
488 | } | 489 | } |
489 | 490 | ||
490 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | 491 | static int i915_dispatch_batchbuffer(struct drm_device *dev, |
491 | drm_i915_batchbuffer_t * batch, | 492 | drm_i915_batchbuffer_t *batch, |
492 | struct drm_clip_rect *cliprects) | 493 | struct drm_clip_rect *cliprects) |
493 | { | 494 | { |
494 | struct drm_i915_private *dev_priv = dev->dev_private; | 495 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
549 | return 0; | 550 | return 0; |
550 | } | 551 | } |
551 | 552 | ||
552 | static int i915_dispatch_flip(struct drm_device * dev) | 553 | static int i915_dispatch_flip(struct drm_device *dev) |
553 | { | 554 | { |
554 | struct drm_i915_private *dev_priv = dev->dev_private; | 555 | struct drm_i915_private *dev_priv = dev->dev_private; |
555 | struct drm_i915_master_private *master_priv = | 556 | struct drm_i915_master_private *master_priv = |
@@ -755,7 +756,7 @@ fail_batch_free: | |||
755 | return ret; | 756 | return ret; |
756 | } | 757 | } |
757 | 758 | ||
758 | static int i915_emit_irq(struct drm_device * dev) | 759 | static int i915_emit_irq(struct drm_device *dev) |
759 | { | 760 | { |
760 | struct drm_i915_private *dev_priv = dev->dev_private; | 761 | struct drm_i915_private *dev_priv = dev->dev_private; |
761 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 762 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev) | |||
781 | return dev_priv->dri1.counter; | 782 | return dev_priv->dri1.counter; |
782 | } | 783 | } |
783 | 784 | ||
784 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 785 | static int i915_wait_irq(struct drm_device *dev, int irq_nr) |
785 | { | 786 | { |
786 | struct drm_i915_private *dev_priv = dev->dev_private; | 787 | struct drm_i915_private *dev_priv = dev->dev_private; |
787 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 788 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
1266 | { | 1267 | { |
1267 | struct drm_device *dev = pci_get_drvdata(pdev); | 1268 | struct drm_device *dev = pci_get_drvdata(pdev); |
1268 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1269 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1270 | |||
1269 | if (state == VGA_SWITCHEROO_ON) { | 1271 | if (state == VGA_SWITCHEROO_ON) { |
1270 | pr_info("switched on\n"); | 1272 | pr_info("switched on\n"); |
1271 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 1273 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
@@ -1488,10 +1490,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) | |||
1488 | #define SEP_EMPTY | 1490 | #define SEP_EMPTY |
1489 | #define PRINT_FLAG(name) info->name ? #name "," : "" | 1491 | #define PRINT_FLAG(name) info->name ? #name "," : "" |
1490 | #define SEP_COMMA , | 1492 | #define SEP_COMMA , |
1491 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" | 1493 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" |
1492 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), | 1494 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), |
1493 | info->gen, | 1495 | info->gen, |
1494 | dev_priv->dev->pdev->device, | 1496 | dev_priv->dev->pdev->device, |
1497 | dev_priv->dev->pdev->revision, | ||
1495 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); | 1498 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); |
1496 | #undef PRINT_S | 1499 | #undef PRINT_S |
1497 | #undef SEP_EMPTY | 1500 | #undef SEP_EMPTY |
@@ -1602,6 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1602 | spin_lock_init(&dev_priv->backlight_lock); | 1605 | spin_lock_init(&dev_priv->backlight_lock); |
1603 | spin_lock_init(&dev_priv->uncore.lock); | 1606 | spin_lock_init(&dev_priv->uncore.lock); |
1604 | spin_lock_init(&dev_priv->mm.object_stat_lock); | 1607 | spin_lock_init(&dev_priv->mm.object_stat_lock); |
1608 | spin_lock_init(&dev_priv->mmio_flip_lock); | ||
1605 | mutex_init(&dev_priv->dpio_lock); | 1609 | mutex_init(&dev_priv->dpio_lock); |
1606 | mutex_init(&dev_priv->modeset_restore_lock); | 1610 | mutex_init(&dev_priv->modeset_restore_lock); |
1607 | 1611 | ||
@@ -1929,7 +1933,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) | |||
1929 | * and DMA structures, since the kernel won't be using them, and clea | 1933 | * and DMA structures, since the kernel won't be using them, and clea |
1930 | * up any GEM state. | 1934 | * up any GEM state. |
1931 | */ | 1935 | */ |
1932 | void i915_driver_lastclose(struct drm_device * dev) | 1936 | void i915_driver_lastclose(struct drm_device *dev) |
1933 | { | 1937 | { |
1934 | struct drm_i915_private *dev_priv = dev->dev_private; | 1938 | struct drm_i915_private *dev_priv = dev->dev_private; |
1935 | 1939 | ||
@@ -1950,7 +1954,7 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
1950 | i915_dma_cleanup(dev); | 1954 | i915_dma_cleanup(dev); |
1951 | } | 1955 | } |
1952 | 1956 | ||
1953 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | 1957 | void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) |
1954 | { | 1958 | { |
1955 | mutex_lock(&dev->struct_mutex); | 1959 | mutex_lock(&dev->struct_mutex); |
1956 | i915_gem_context_close(dev, file_priv); | 1960 | i915_gem_context_close(dev, file_priv); |
@@ -2027,7 +2031,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); | |||
2027 | * manage the gtt, we need to claim that all intel devices are agp. For | 2031 | * manage the gtt, we need to claim that all intel devices are agp. For |
2028 | * otherwise the drm core refuses to initialize the agp support code. | 2032 | * otherwise the drm core refuses to initialize the agp support code. |
2029 | */ | 2033 | */ |
2030 | int i915_driver_device_is_agp(struct drm_device * dev) | 2034 | int i915_driver_device_is_agp(struct drm_device *dev) |
2031 | { | 2035 | { |
2032 | return 1; | 2036 | return 1; |
2033 | } | 2037 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d935ab3718e1..b0955fffca98 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/acpi.h> | ||
31 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
32 | #include <drm/i915_drm.h> | 33 | #include <drm/i915_drm.h> |
33 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
@@ -46,8 +47,6 @@ static struct drm_driver driver; | |||
46 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ | 47 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ |
47 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | 48 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
48 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ | 49 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ |
49 | .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \ | ||
50 | .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \ | ||
51 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } | 50 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } |
52 | 51 | ||
53 | #define GEN_CHV_PIPEOFFSETS \ | 52 | #define GEN_CHV_PIPEOFFSETS \ |
@@ -55,10 +54,6 @@ static struct drm_driver driver; | |||
55 | CHV_PIPE_C_OFFSET }, \ | 54 | CHV_PIPE_C_OFFSET }, \ |
56 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | 55 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
57 | CHV_TRANSCODER_C_OFFSET, }, \ | 56 | CHV_TRANSCODER_C_OFFSET, }, \ |
58 | .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \ | ||
59 | CHV_DPLL_C_OFFSET }, \ | ||
60 | .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \ | ||
61 | CHV_DPLL_C_MD_OFFSET }, \ | ||
62 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ | 57 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ |
63 | CHV_PALETTE_C_OFFSET } | 58 | CHV_PALETTE_C_OFFSET } |
64 | 59 | ||
@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
499 | { | 494 | { |
500 | struct drm_i915_private *dev_priv = dev->dev_private; | 495 | struct drm_i915_private *dev_priv = dev->dev_private; |
501 | struct drm_crtc *crtc; | 496 | struct drm_crtc *crtc; |
502 | 497 | pci_power_t opregion_target_state; | |
503 | intel_runtime_pm_get(dev_priv); | ||
504 | 498 | ||
505 | /* ignore lid events during suspend */ | 499 | /* ignore lid events during suspend */ |
506 | mutex_lock(&dev_priv->modeset_restore_lock); | 500 | mutex_lock(&dev_priv->modeset_restore_lock); |
@@ -526,9 +520,9 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
526 | return error; | 520 | return error; |
527 | } | 521 | } |
528 | 522 | ||
529 | drm_irq_uninstall(dev); | 523 | intel_runtime_pm_disable_interrupts(dev); |
530 | 524 | ||
531 | intel_disable_gt_powersave(dev); | 525 | intel_suspend_gt_powersave(dev); |
532 | 526 | ||
533 | /* | 527 | /* |
534 | * Disable CRTCs directly since we want to preserve sw state | 528 | * Disable CRTCs directly since we want to preserve sw state |
@@ -547,8 +541,14 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
547 | 541 | ||
548 | i915_save_state(dev); | 542 | i915_save_state(dev); |
549 | 543 | ||
544 | if (acpi_target_system_state() >= ACPI_STATE_S3) | ||
545 | opregion_target_state = PCI_D3cold; | ||
546 | else | ||
547 | opregion_target_state = PCI_D1; | ||
548 | intel_opregion_notify_adapter(dev, opregion_target_state); | ||
549 | |||
550 | intel_uncore_forcewake_reset(dev, false); | ||
550 | intel_opregion_fini(dev); | 551 | intel_opregion_fini(dev); |
551 | intel_uncore_fini(dev); | ||
552 | 552 | ||
553 | console_lock(); | 553 | console_lock(); |
554 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); | 554 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); |
@@ -556,6 +556,8 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
556 | 556 | ||
557 | dev_priv->suspend_count++; | 557 | dev_priv->suspend_count++; |
558 | 558 | ||
559 | intel_display_set_init_power(dev_priv, false); | ||
560 | |||
559 | return 0; | 561 | return 0; |
560 | } | 562 | } |
561 | 563 | ||
@@ -605,7 +607,10 @@ static int i915_drm_thaw_early(struct drm_device *dev) | |||
605 | { | 607 | { |
606 | struct drm_i915_private *dev_priv = dev->dev_private; | 608 | struct drm_i915_private *dev_priv = dev->dev_private; |
607 | 609 | ||
608 | intel_uncore_early_sanitize(dev); | 610 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
611 | hsw_disable_pc8(dev_priv); | ||
612 | |||
613 | intel_uncore_early_sanitize(dev, true); | ||
609 | intel_uncore_sanitize(dev); | 614 | intel_uncore_sanitize(dev); |
610 | intel_power_domains_init_hw(dev_priv); | 615 | intel_power_domains_init_hw(dev_priv); |
611 | 616 | ||
@@ -638,8 +643,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | |||
638 | } | 643 | } |
639 | mutex_unlock(&dev->struct_mutex); | 644 | mutex_unlock(&dev->struct_mutex); |
640 | 645 | ||
641 | /* We need working interrupts for modeset enabling ... */ | 646 | intel_runtime_pm_restore_interrupts(dev); |
642 | drm_irq_install(dev, dev->pdev->irq); | ||
643 | 647 | ||
644 | intel_modeset_init_hw(dev); | 648 | intel_modeset_init_hw(dev); |
645 | 649 | ||
@@ -676,7 +680,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | |||
676 | dev_priv->modeset_restore = MODESET_DONE; | 680 | dev_priv->modeset_restore = MODESET_DONE; |
677 | mutex_unlock(&dev_priv->modeset_restore_lock); | 681 | mutex_unlock(&dev_priv->modeset_restore_lock); |
678 | 682 | ||
679 | intel_runtime_pm_put(dev_priv); | 683 | intel_opregion_notify_adapter(dev, PCI_D0); |
684 | |||
680 | return 0; | 685 | return 0; |
681 | } | 686 | } |
682 | 687 | ||
@@ -885,6 +890,7 @@ static int i915_pm_suspend_late(struct device *dev) | |||
885 | { | 890 | { |
886 | struct pci_dev *pdev = to_pci_dev(dev); | 891 | struct pci_dev *pdev = to_pci_dev(dev); |
887 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 892 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
893 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | ||
888 | 894 | ||
889 | /* | 895 | /* |
890 | * We have a suspedn ordering issue with the snd-hda driver also | 896 | * We have a suspedn ordering issue with the snd-hda driver also |
@@ -898,6 +904,9 @@ static int i915_pm_suspend_late(struct device *dev) | |||
898 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 904 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
899 | return 0; | 905 | return 0; |
900 | 906 | ||
907 | if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) | ||
908 | hsw_enable_pc8(dev_priv); | ||
909 | |||
901 | pci_disable_device(pdev); | 910 | pci_disable_device(pdev); |
902 | pci_set_power_state(pdev, PCI_D3hot); | 911 | pci_set_power_state(pdev, PCI_D3hot); |
903 | 912 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5484f052d50c..6a1e990cb482 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | #define DRIVER_NAME "i915" | 54 | #define DRIVER_NAME "i915" |
55 | #define DRIVER_DESC "Intel Graphics" | 55 | #define DRIVER_DESC "Intel Graphics" |
56 | #define DRIVER_DATE "20080730" | 56 | #define DRIVER_DATE "20140620" |
57 | 57 | ||
58 | enum pipe { | 58 | enum pipe { |
59 | INVALID_PIPE = -1, | 59 | INVALID_PIPE = -1, |
@@ -552,8 +552,6 @@ struct intel_device_info { | |||
552 | /* Register offsets for the various display pipes and transcoders */ | 552 | /* Register offsets for the various display pipes and transcoders */ |
553 | int pipe_offsets[I915_MAX_TRANSCODERS]; | 553 | int pipe_offsets[I915_MAX_TRANSCODERS]; |
554 | int trans_offsets[I915_MAX_TRANSCODERS]; | 554 | int trans_offsets[I915_MAX_TRANSCODERS]; |
555 | int dpll_offsets[I915_MAX_PIPES]; | ||
556 | int dpll_md_offsets[I915_MAX_PIPES]; | ||
557 | int palette_offsets[I915_MAX_PIPES]; | 555 | int palette_offsets[I915_MAX_PIPES]; |
558 | int cursor_offsets[I915_MAX_PIPES]; | 556 | int cursor_offsets[I915_MAX_PIPES]; |
559 | }; | 557 | }; |
@@ -593,7 +591,6 @@ struct intel_context { | |||
593 | bool is_initialized; | 591 | bool is_initialized; |
594 | uint8_t remap_slice; | 592 | uint8_t remap_slice; |
595 | struct drm_i915_file_private *file_priv; | 593 | struct drm_i915_file_private *file_priv; |
596 | struct intel_engine_cs *last_ring; | ||
597 | struct drm_i915_gem_object *obj; | 594 | struct drm_i915_gem_object *obj; |
598 | struct i915_ctx_hang_stats hang_stats; | 595 | struct i915_ctx_hang_stats hang_stats; |
599 | struct i915_address_space *vm; | 596 | struct i915_address_space *vm; |
@@ -638,6 +635,10 @@ struct i915_drrs { | |||
638 | struct i915_psr { | 635 | struct i915_psr { |
639 | bool sink_support; | 636 | bool sink_support; |
640 | bool source_ok; | 637 | bool source_ok; |
638 | bool setup_done; | ||
639 | bool enabled; | ||
640 | bool active; | ||
641 | struct delayed_work work; | ||
641 | }; | 642 | }; |
642 | 643 | ||
643 | enum intel_pch { | 644 | enum intel_pch { |
@@ -1331,6 +1332,17 @@ struct intel_pipe_crc { | |||
1331 | wait_queue_head_t wq; | 1332 | wait_queue_head_t wq; |
1332 | }; | 1333 | }; |
1333 | 1334 | ||
1335 | struct i915_frontbuffer_tracking { | ||
1336 | struct mutex lock; | ||
1337 | |||
1338 | /* | ||
1339 | * Tracking bits for delayed frontbuffer flushing du to gpu activity or | ||
1340 | * scheduled flips. | ||
1341 | */ | ||
1342 | unsigned busy_bits; | ||
1343 | unsigned flip_bits; | ||
1344 | }; | ||
1345 | |||
1334 | struct drm_i915_private { | 1346 | struct drm_i915_private { |
1335 | struct drm_device *dev; | 1347 | struct drm_device *dev; |
1336 | struct kmem_cache *slab; | 1348 | struct kmem_cache *slab; |
@@ -1370,6 +1382,9 @@ struct drm_i915_private { | |||
1370 | /* protects the irq masks */ | 1382 | /* protects the irq masks */ |
1371 | spinlock_t irq_lock; | 1383 | spinlock_t irq_lock; |
1372 | 1384 | ||
1385 | /* protects the mmio flip data */ | ||
1386 | spinlock_t mmio_flip_lock; | ||
1387 | |||
1373 | bool display_irqs_enabled; | 1388 | bool display_irqs_enabled; |
1374 | 1389 | ||
1375 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ | 1390 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
@@ -1473,6 +1488,9 @@ struct drm_i915_private { | |||
1473 | bool lvds_downclock_avail; | 1488 | bool lvds_downclock_avail; |
1474 | /* indicates the reduced downclock for LVDS*/ | 1489 | /* indicates the reduced downclock for LVDS*/ |
1475 | int lvds_downclock; | 1490 | int lvds_downclock; |
1491 | |||
1492 | struct i915_frontbuffer_tracking fb_tracking; | ||
1493 | |||
1476 | u16 orig_clock; | 1494 | u16 orig_clock; |
1477 | 1495 | ||
1478 | bool mchbar_need_disable; | 1496 | bool mchbar_need_disable; |
@@ -1590,6 +1608,28 @@ struct drm_i915_gem_object_ops { | |||
1590 | void (*release)(struct drm_i915_gem_object *); | 1608 | void (*release)(struct drm_i915_gem_object *); |
1591 | }; | 1609 | }; |
1592 | 1610 | ||
1611 | /* | ||
1612 | * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is | ||
1613 | * considered to be the frontbuffer for the given plane interface-vise. This | ||
1614 | * doesn't mean that the hw necessarily already scans it out, but that any | ||
1615 | * rendering (by the cpu or gpu) will land in the frontbuffer eventually. | ||
1616 | * | ||
1617 | * We have one bit per pipe and per scanout plane type. | ||
1618 | */ | ||
1619 | #define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 | ||
1620 | #define INTEL_FRONTBUFFER_BITS \ | ||
1621 | (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) | ||
1622 | #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ | ||
1623 | (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) | ||
1624 | #define INTEL_FRONTBUFFER_CURSOR(pipe) \ | ||
1625 | (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) | ||
1626 | #define INTEL_FRONTBUFFER_SPRITE(pipe) \ | ||
1627 | (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) | ||
1628 | #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ | ||
1629 | (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) | ||
1630 | #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ | ||
1631 | (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) | ||
1632 | |||
1593 | struct drm_i915_gem_object { | 1633 | struct drm_i915_gem_object { |
1594 | struct drm_gem_object base; | 1634 | struct drm_gem_object base; |
1595 | 1635 | ||
@@ -1660,6 +1700,12 @@ struct drm_i915_gem_object { | |||
1660 | unsigned int pin_display:1; | 1700 | unsigned int pin_display:1; |
1661 | 1701 | ||
1662 | /* | 1702 | /* |
1703 | * Is the object to be mapped as read-only to the GPU | ||
1704 | * Only honoured if hardware has relevant pte bit | ||
1705 | */ | ||
1706 | unsigned long gt_ro:1; | ||
1707 | |||
1708 | /* | ||
1663 | * Is the GPU currently using a fence to access this buffer, | 1709 | * Is the GPU currently using a fence to access this buffer, |
1664 | */ | 1710 | */ |
1665 | unsigned int pending_fenced_gpu_access:1; | 1711 | unsigned int pending_fenced_gpu_access:1; |
@@ -1671,6 +1717,8 @@ struct drm_i915_gem_object { | |||
1671 | unsigned int has_global_gtt_mapping:1; | 1717 | unsigned int has_global_gtt_mapping:1; |
1672 | unsigned int has_dma_mapping:1; | 1718 | unsigned int has_dma_mapping:1; |
1673 | 1719 | ||
1720 | unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; | ||
1721 | |||
1674 | struct sg_table *pages; | 1722 | struct sg_table *pages; |
1675 | int pages_pin_count; | 1723 | int pages_pin_count; |
1676 | 1724 | ||
@@ -1717,6 +1765,10 @@ struct drm_i915_gem_object { | |||
1717 | }; | 1765 | }; |
1718 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) | 1766 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
1719 | 1767 | ||
1768 | void i915_gem_track_fb(struct drm_i915_gem_object *old, | ||
1769 | struct drm_i915_gem_object *new, | ||
1770 | unsigned frontbuffer_bits); | ||
1771 | |||
1720 | /** | 1772 | /** |
1721 | * Request queue structure. | 1773 | * Request queue structure. |
1722 | * | 1774 | * |
@@ -1938,10 +1990,8 @@ struct drm_i915_cmd_table { | |||
1938 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1990 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1939 | 1991 | ||
1940 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) | 1992 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
1941 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \ | 1993 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) |
1942 | (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) | 1994 | #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) |
1943 | #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \ | ||
1944 | && !IS_GEN8(dev)) | ||
1945 | #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) | 1995 | #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) |
1946 | #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) | 1996 | #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) |
1947 | 1997 | ||
@@ -2038,6 +2088,7 @@ struct i915_params { | |||
2038 | bool reset; | 2088 | bool reset; |
2039 | bool disable_display; | 2089 | bool disable_display; |
2040 | bool disable_vtd_wa; | 2090 | bool disable_vtd_wa; |
2091 | int use_mmio_flip; | ||
2041 | }; | 2092 | }; |
2042 | extern struct i915_params i915 __read_mostly; | 2093 | extern struct i915_params i915 __read_mostly; |
2043 | 2094 | ||
@@ -2082,10 +2133,12 @@ extern void intel_irq_init(struct drm_device *dev); | |||
2082 | extern void intel_hpd_init(struct drm_device *dev); | 2133 | extern void intel_hpd_init(struct drm_device *dev); |
2083 | 2134 | ||
2084 | extern void intel_uncore_sanitize(struct drm_device *dev); | 2135 | extern void intel_uncore_sanitize(struct drm_device *dev); |
2085 | extern void intel_uncore_early_sanitize(struct drm_device *dev); | 2136 | extern void intel_uncore_early_sanitize(struct drm_device *dev, |
2137 | bool restore_forcewake); | ||
2086 | extern void intel_uncore_init(struct drm_device *dev); | 2138 | extern void intel_uncore_init(struct drm_device *dev); |
2087 | extern void intel_uncore_check_errors(struct drm_device *dev); | 2139 | extern void intel_uncore_check_errors(struct drm_device *dev); |
2088 | extern void intel_uncore_fini(struct drm_device *dev); | 2140 | extern void intel_uncore_fini(struct drm_device *dev); |
2141 | extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); | ||
2089 | 2142 | ||
2090 | void | 2143 | void |
2091 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | 2144 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
@@ -2233,6 +2286,8 @@ bool i915_gem_retire_requests(struct drm_device *dev); | |||
2233 | void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); | 2286 | void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); |
2234 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, | 2287 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
2235 | bool interruptible); | 2288 | bool interruptible); |
2289 | int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); | ||
2290 | |||
2236 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | 2291 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
2237 | { | 2292 | { |
2238 | return unlikely(atomic_read(&error->reset_counter) | 2293 | return unlikely(atomic_read(&error->reset_counter) |
@@ -2443,7 +2498,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
2443 | u32 stolen_offset, | 2498 | u32 stolen_offset, |
2444 | u32 gtt_offset, | 2499 | u32 gtt_offset, |
2445 | u32 size); | 2500 | u32 size); |
2446 | void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); | ||
2447 | 2501 | ||
2448 | /* i915_gem_tiling.c */ | 2502 | /* i915_gem_tiling.c */ |
2449 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) | 2503 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
@@ -2603,6 +2657,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, | |||
2603 | int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, | 2657 | int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, |
2604 | struct drm_file *file); | 2658 | struct drm_file *file); |
2605 | 2659 | ||
2660 | void intel_notify_mmio_flip(struct intel_engine_cs *ring); | ||
2661 | |||
2606 | /* overlay */ | 2662 | /* overlay */ |
2607 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); | 2663 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
2608 | extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, | 2664 | extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d86b77e905a2..f6d123828926 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error, | |||
1095 | * Compare seqno against outstanding lazy request. Emit a request if they are | 1095 | * Compare seqno against outstanding lazy request. Emit a request if they are |
1096 | * equal. | 1096 | * equal. |
1097 | */ | 1097 | */ |
1098 | static int | 1098 | int |
1099 | i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) | 1099 | i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) |
1100 | { | 1100 | { |
1101 | int ret; | 1101 | int ret; |
@@ -1561,14 +1561,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1561 | if (ret) | 1561 | if (ret) |
1562 | goto unpin; | 1562 | goto unpin; |
1563 | 1563 | ||
1564 | obj->fault_mappable = true; | 1564 | /* Finally, remap it using the new GTT offset */ |
1565 | |||
1566 | pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); | 1565 | pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); |
1567 | pfn >>= PAGE_SHIFT; | 1566 | pfn >>= PAGE_SHIFT; |
1568 | pfn += page_offset; | ||
1569 | 1567 | ||
1570 | /* Finally, remap it using the new GTT offset */ | 1568 | if (!obj->fault_mappable) { |
1571 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 1569 | unsigned long size = min_t(unsigned long, |
1570 | vma->vm_end - vma->vm_start, | ||
1571 | obj->base.size); | ||
1572 | int i; | ||
1573 | |||
1574 | for (i = 0; i < size >> PAGE_SHIFT; i++) { | ||
1575 | ret = vm_insert_pfn(vma, | ||
1576 | (unsigned long)vma->vm_start + i * PAGE_SIZE, | ||
1577 | pfn + i); | ||
1578 | if (ret) | ||
1579 | break; | ||
1580 | } | ||
1581 | |||
1582 | obj->fault_mappable = true; | ||
1583 | } else | ||
1584 | ret = vm_insert_pfn(vma, | ||
1585 | (unsigned long)vmf->virtual_address, | ||
1586 | pfn + page_offset); | ||
1572 | unpin: | 1587 | unpin: |
1573 | i915_gem_object_ggtt_unpin(obj); | 1588 | i915_gem_object_ggtt_unpin(obj); |
1574 | unlock: | 1589 | unlock: |
@@ -1616,22 +1631,6 @@ out: | |||
1616 | return ret; | 1631 | return ret; |
1617 | } | 1632 | } |
1618 | 1633 | ||
1619 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) | ||
1620 | { | ||
1621 | struct i915_vma *vma; | ||
1622 | |||
1623 | /* | ||
1624 | * Only the global gtt is relevant for gtt memory mappings, so restrict | ||
1625 | * list traversal to objects bound into the global address space. Note | ||
1626 | * that the active list should be empty, but better safe than sorry. | ||
1627 | */ | ||
1628 | WARN_ON(!list_empty(&dev_priv->gtt.base.active_list)); | ||
1629 | list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list) | ||
1630 | i915_gem_release_mmap(vma->obj); | ||
1631 | list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list) | ||
1632 | i915_gem_release_mmap(vma->obj); | ||
1633 | } | ||
1634 | |||
1635 | /** | 1634 | /** |
1636 | * i915_gem_release_mmap - remove physical page mappings | 1635 | * i915_gem_release_mmap - remove physical page mappings |
1637 | * @obj: obj in question | 1636 | * @obj: obj in question |
@@ -1657,6 +1656,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) | |||
1657 | obj->fault_mappable = false; | 1656 | obj->fault_mappable = false; |
1658 | } | 1657 | } |
1659 | 1658 | ||
1659 | void | ||
1660 | i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) | ||
1661 | { | ||
1662 | struct drm_i915_gem_object *obj; | ||
1663 | |||
1664 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) | ||
1665 | i915_gem_release_mmap(obj); | ||
1666 | } | ||
1667 | |||
1660 | uint32_t | 1668 | uint32_t |
1661 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | 1669 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1662 | { | 1670 | { |
@@ -2211,6 +2219,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
2211 | list_move_tail(&vma->mm_list, &vm->inactive_list); | 2219 | list_move_tail(&vma->mm_list, &vm->inactive_list); |
2212 | } | 2220 | } |
2213 | 2221 | ||
2222 | intel_fb_obj_flush(obj, true); | ||
2223 | |||
2214 | list_del_init(&obj->ring_list); | 2224 | list_del_init(&obj->ring_list); |
2215 | obj->ring = NULL; | 2225 | obj->ring = NULL; |
2216 | 2226 | ||
@@ -3540,6 +3550,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | |||
3540 | old_write_domain = obj->base.write_domain; | 3550 | old_write_domain = obj->base.write_domain; |
3541 | obj->base.write_domain = 0; | 3551 | obj->base.write_domain = 0; |
3542 | 3552 | ||
3553 | intel_fb_obj_flush(obj, false); | ||
3554 | |||
3543 | trace_i915_gem_object_change_domain(obj, | 3555 | trace_i915_gem_object_change_domain(obj, |
3544 | obj->base.read_domains, | 3556 | obj->base.read_domains, |
3545 | old_write_domain); | 3557 | old_write_domain); |
@@ -3561,6 +3573,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, | |||
3561 | old_write_domain = obj->base.write_domain; | 3573 | old_write_domain = obj->base.write_domain; |
3562 | obj->base.write_domain = 0; | 3574 | obj->base.write_domain = 0; |
3563 | 3575 | ||
3576 | intel_fb_obj_flush(obj, false); | ||
3577 | |||
3564 | trace_i915_gem_object_change_domain(obj, | 3578 | trace_i915_gem_object_change_domain(obj, |
3565 | obj->base.read_domains, | 3579 | obj->base.read_domains, |
3566 | old_write_domain); | 3580 | old_write_domain); |
@@ -3614,6 +3628,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
3614 | obj->dirty = 1; | 3628 | obj->dirty = 1; |
3615 | } | 3629 | } |
3616 | 3630 | ||
3631 | if (write) | ||
3632 | intel_fb_obj_invalidate(obj, NULL); | ||
3633 | |||
3617 | trace_i915_gem_object_change_domain(obj, | 3634 | trace_i915_gem_object_change_domain(obj, |
3618 | old_read_domains, | 3635 | old_read_domains, |
3619 | old_write_domain); | 3636 | old_write_domain); |
@@ -3950,6 +3967,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
3950 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3967 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3951 | } | 3968 | } |
3952 | 3969 | ||
3970 | if (write) | ||
3971 | intel_fb_obj_invalidate(obj, NULL); | ||
3972 | |||
3953 | trace_i915_gem_object_change_domain(obj, | 3973 | trace_i915_gem_object_change_domain(obj, |
3954 | old_read_domains, | 3974 | old_read_domains, |
3955 | old_write_domain); | 3975 | old_write_domain); |
@@ -4438,13 +4458,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4438 | if (obj->stolen) | 4458 | if (obj->stolen) |
4439 | i915_gem_object_unpin_pages(obj); | 4459 | i915_gem_object_unpin_pages(obj); |
4440 | 4460 | ||
4461 | WARN_ON(obj->frontbuffer_bits); | ||
4462 | |||
4441 | if (WARN_ON(obj->pages_pin_count)) | 4463 | if (WARN_ON(obj->pages_pin_count)) |
4442 | obj->pages_pin_count = 0; | 4464 | obj->pages_pin_count = 0; |
4443 | if (discard_backing_storage(obj)) | 4465 | if (discard_backing_storage(obj)) |
4444 | obj->madv = I915_MADV_DONTNEED; | 4466 | obj->madv = I915_MADV_DONTNEED; |
4445 | i915_gem_object_put_pages(obj); | 4467 | i915_gem_object_put_pages(obj); |
4446 | i915_gem_object_free_mmap_offset(obj); | 4468 | i915_gem_object_free_mmap_offset(obj); |
4447 | i915_gem_object_release_stolen(obj); | ||
4448 | 4469 | ||
4449 | BUG_ON(obj->pages); | 4470 | BUG_ON(obj->pages); |
4450 | 4471 | ||
@@ -4922,6 +4943,8 @@ i915_gem_load(struct drm_device *dev) | |||
4922 | 4943 | ||
4923 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; | 4944 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; |
4924 | register_oom_notifier(&dev_priv->mm.oom_notifier); | 4945 | register_oom_notifier(&dev_priv->mm.oom_notifier); |
4946 | |||
4947 | mutex_init(&dev_priv->fb_tracking.lock); | ||
4925 | } | 4948 | } |
4926 | 4949 | ||
4927 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) | 4950 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
@@ -4983,6 +5006,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) | |||
4983 | return ret; | 5006 | return ret; |
4984 | } | 5007 | } |
4985 | 5008 | ||
5009 | void i915_gem_track_fb(struct drm_i915_gem_object *old, | ||
5010 | struct drm_i915_gem_object *new, | ||
5011 | unsigned frontbuffer_bits) | ||
5012 | { | ||
5013 | if (old) { | ||
5014 | WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); | ||
5015 | WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); | ||
5016 | old->frontbuffer_bits &= ~frontbuffer_bits; | ||
5017 | } | ||
5018 | |||
5019 | if (new) { | ||
5020 | WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); | ||
5021 | WARN_ON(new->frontbuffer_bits & frontbuffer_bits); | ||
5022 | new->frontbuffer_bits |= frontbuffer_bits; | ||
5023 | } | ||
5024 | } | ||
5025 | |||
4986 | static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) | 5026 | static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) |
4987 | { | 5027 | { |
4988 | if (!mutex_is_locked(mutex)) | 5028 | if (!mutex_is_locked(mutex)) |
@@ -5065,12 +5105,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, | |||
5065 | vm == &dev_priv->mm.aliasing_ppgtt->base) | 5105 | vm == &dev_priv->mm.aliasing_ppgtt->base) |
5066 | vm = &dev_priv->gtt.base; | 5106 | vm = &dev_priv->gtt.base; |
5067 | 5107 | ||
5068 | BUG_ON(list_empty(&o->vma_list)); | ||
5069 | list_for_each_entry(vma, &o->vma_list, vma_link) { | 5108 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
5070 | if (vma->vm == vm) | 5109 | if (vma->vm == vm) |
5071 | return vma->node.start; | 5110 | return vma->node.start; |
5072 | 5111 | ||
5073 | } | 5112 | } |
5113 | WARN(1, "%s vma for this object not found.\n", | ||
5114 | i915_is_ggtt(vm) ? "global" : "ppgtt"); | ||
5074 | return -1; | 5115 | return -1; |
5075 | } | 5116 | } |
5076 | 5117 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a5ddf3bce9c3..0d2c75bde96e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -606,7 +606,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
606 | BUG_ON(!i915_gem_obj_is_pinned(from->obj)); | 606 | BUG_ON(!i915_gem_obj_is_pinned(from->obj)); |
607 | } | 607 | } |
608 | 608 | ||
609 | if (from == to && from->last_ring == ring && !to->remap_slice) | 609 | if (from == to && !to->remap_slice) |
610 | return 0; | 610 | return 0; |
611 | 611 | ||
612 | /* Trying to pin first makes error handling easier. */ | 612 | /* Trying to pin first makes error handling easier. */ |
@@ -703,7 +703,6 @@ static int do_switch(struct intel_engine_cs *ring, | |||
703 | done: | 703 | done: |
704 | i915_gem_context_reference(to); | 704 | i915_gem_context_reference(to); |
705 | ring->last_context = to; | 705 | ring->last_context = to; |
706 | to->last_ring = ring; | ||
707 | 706 | ||
708 | if (uninitialized) { | 707 | if (uninitialized) { |
709 | ret = i915_gem_render_state_init(ring); | 708 | ret = i915_gem_render_state_init(ring); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3a30133f93e8..d815ef51a5ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
975 | if (obj->base.write_domain) { | 975 | if (obj->base.write_domain) { |
976 | obj->dirty = 1; | 976 | obj->dirty = 1; |
977 | obj->last_write_seqno = intel_ring_get_seqno(ring); | 977 | obj->last_write_seqno = intel_ring_get_seqno(ring); |
978 | /* check for potential scanout */ | 978 | |
979 | if (i915_gem_obj_ggtt_bound(obj) && | 979 | intel_fb_obj_invalidate(obj, ring); |
980 | i915_gem_obj_to_ggtt(obj)->pin_count) | ||
981 | intel_mark_fb_busy(obj, ring); | ||
982 | 980 | ||
983 | /* update for the implicit flush after a batch */ | 981 | /* update for the implicit flush after a batch */ |
984 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 982 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
@@ -1525,7 +1523,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1525 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); | 1523 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); |
1526 | if (!ret) { | 1524 | if (!ret) { |
1527 | /* Copy the new buffer offsets back to the user's exec list. */ | 1525 | /* Copy the new buffer offsets back to the user's exec list. */ |
1528 | struct drm_i915_gem_exec_object2 *user_exec_list = | 1526 | struct drm_i915_gem_exec_object2 __user *user_exec_list = |
1529 | to_user_ptr(args->buffers_ptr); | 1527 | to_user_ptr(args->buffers_ptr); |
1530 | int i; | 1528 | int i; |
1531 | 1529 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8b3cde703364..a4153eef48c2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -63,6 +63,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) | |||
63 | } | 63 | } |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | /* Early VLV doesn't have this */ | ||
67 | if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { | ||
68 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
66 | return HAS_ALIASING_PPGTT(dev) ? 1 : 0; | 72 | return HAS_ALIASING_PPGTT(dev) ? 1 : 0; |
67 | } | 73 | } |
68 | 74 | ||
@@ -110,7 +116,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, | |||
110 | 116 | ||
111 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, | 117 | static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
112 | enum i915_cache_level level, | 118 | enum i915_cache_level level, |
113 | bool valid) | 119 | bool valid, u32 unused) |
114 | { | 120 | { |
115 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 121 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
116 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 122 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -132,7 +138,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, | |||
132 | 138 | ||
133 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, | 139 | static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
134 | enum i915_cache_level level, | 140 | enum i915_cache_level level, |
135 | bool valid) | 141 | bool valid, u32 unused) |
136 | { | 142 | { |
137 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 143 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
138 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 144 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -156,7 +162,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, | |||
156 | 162 | ||
157 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, | 163 | static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
158 | enum i915_cache_level level, | 164 | enum i915_cache_level level, |
159 | bool valid) | 165 | bool valid, u32 flags) |
160 | { | 166 | { |
161 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 167 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
162 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 168 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -164,7 +170,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, | |||
164 | /* Mark the page as writeable. Other platforms don't have a | 170 | /* Mark the page as writeable. Other platforms don't have a |
165 | * setting for read-only/writable, so this matches that behavior. | 171 | * setting for read-only/writable, so this matches that behavior. |
166 | */ | 172 | */ |
167 | pte |= BYT_PTE_WRITEABLE; | 173 | if (!(flags & PTE_READ_ONLY)) |
174 | pte |= BYT_PTE_WRITEABLE; | ||
168 | 175 | ||
169 | if (level != I915_CACHE_NONE) | 176 | if (level != I915_CACHE_NONE) |
170 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; | 177 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
@@ -174,7 +181,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, | |||
174 | 181 | ||
175 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, | 182 | static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
176 | enum i915_cache_level level, | 183 | enum i915_cache_level level, |
177 | bool valid) | 184 | bool valid, u32 unused) |
178 | { | 185 | { |
179 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 186 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
180 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 187 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
@@ -187,7 +194,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, | |||
187 | 194 | ||
188 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, | 195 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
189 | enum i915_cache_level level, | 196 | enum i915_cache_level level, |
190 | bool valid) | 197 | bool valid, u32 unused) |
191 | { | 198 | { |
192 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; | 199 | gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
193 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 200 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
@@ -301,7 +308,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | |||
301 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | 308 | static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, |
302 | struct sg_table *pages, | 309 | struct sg_table *pages, |
303 | uint64_t start, | 310 | uint64_t start, |
304 | enum i915_cache_level cache_level) | 311 | enum i915_cache_level cache_level, u32 unused) |
305 | { | 312 | { |
306 | struct i915_hw_ppgtt *ppgtt = | 313 | struct i915_hw_ppgtt *ppgtt = |
307 | container_of(vm, struct i915_hw_ppgtt, base); | 314 | container_of(vm, struct i915_hw_ppgtt, base); |
@@ -639,7 +646,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |||
639 | uint32_t pd_entry; | 646 | uint32_t pd_entry; |
640 | int pte, pde; | 647 | int pte, pde; |
641 | 648 | ||
642 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); | 649 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); |
643 | 650 | ||
644 | pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + | 651 | pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + |
645 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); | 652 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); |
@@ -941,7 +948,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | |||
941 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 948 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
942 | unsigned last_pte, i; | 949 | unsigned last_pte, i; |
943 | 950 | ||
944 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); | 951 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); |
945 | 952 | ||
946 | while (num_entries) { | 953 | while (num_entries) { |
947 | last_pte = first_pte + num_entries; | 954 | last_pte = first_pte + num_entries; |
@@ -964,7 +971,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | |||
964 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | 971 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
965 | struct sg_table *pages, | 972 | struct sg_table *pages, |
966 | uint64_t start, | 973 | uint64_t start, |
967 | enum i915_cache_level cache_level) | 974 | enum i915_cache_level cache_level, u32 flags) |
968 | { | 975 | { |
969 | struct i915_hw_ppgtt *ppgtt = | 976 | struct i915_hw_ppgtt *ppgtt = |
970 | container_of(vm, struct i915_hw_ppgtt, base); | 977 | container_of(vm, struct i915_hw_ppgtt, base); |
@@ -981,7 +988,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, | |||
981 | 988 | ||
982 | pt_vaddr[act_pte] = | 989 | pt_vaddr[act_pte] = |
983 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), | 990 | vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
984 | cache_level, true); | 991 | cache_level, true, flags); |
992 | |||
985 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { | 993 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
986 | kunmap_atomic(pt_vaddr); | 994 | kunmap_atomic(pt_vaddr); |
987 | pt_vaddr = NULL; | 995 | pt_vaddr = NULL; |
@@ -1218,8 +1226,12 @@ ppgtt_bind_vma(struct i915_vma *vma, | |||
1218 | enum i915_cache_level cache_level, | 1226 | enum i915_cache_level cache_level, |
1219 | u32 flags) | 1227 | u32 flags) |
1220 | { | 1228 | { |
1229 | /* Currently applicable only to VLV */ | ||
1230 | if (vma->obj->gt_ro) | ||
1231 | flags |= PTE_READ_ONLY; | ||
1232 | |||
1221 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, | 1233 | vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
1222 | cache_level); | 1234 | cache_level, flags); |
1223 | } | 1235 | } |
1224 | 1236 | ||
1225 | static void ppgtt_unbind_vma(struct i915_vma *vma) | 1237 | static void ppgtt_unbind_vma(struct i915_vma *vma) |
@@ -1394,7 +1406,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) | |||
1394 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | 1406 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
1395 | struct sg_table *st, | 1407 | struct sg_table *st, |
1396 | uint64_t start, | 1408 | uint64_t start, |
1397 | enum i915_cache_level level) | 1409 | enum i915_cache_level level, u32 unused) |
1398 | { | 1410 | { |
1399 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 1411 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
1400 | unsigned first_entry = start >> PAGE_SHIFT; | 1412 | unsigned first_entry = start >> PAGE_SHIFT; |
@@ -1440,7 +1452,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | |||
1440 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | 1452 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
1441 | struct sg_table *st, | 1453 | struct sg_table *st, |
1442 | uint64_t start, | 1454 | uint64_t start, |
1443 | enum i915_cache_level level) | 1455 | enum i915_cache_level level, u32 flags) |
1444 | { | 1456 | { |
1445 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | 1457 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
1446 | unsigned first_entry = start >> PAGE_SHIFT; | 1458 | unsigned first_entry = start >> PAGE_SHIFT; |
@@ -1452,7 +1464,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
1452 | 1464 | ||
1453 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 1465 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
1454 | addr = sg_page_iter_dma_address(&sg_iter); | 1466 | addr = sg_page_iter_dma_address(&sg_iter); |
1455 | iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); | 1467 | iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); |
1456 | i++; | 1468 | i++; |
1457 | } | 1469 | } |
1458 | 1470 | ||
@@ -1464,7 +1476,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
1464 | */ | 1476 | */ |
1465 | if (i != 0) | 1477 | if (i != 0) |
1466 | WARN_ON(readl(>t_entries[i-1]) != | 1478 | WARN_ON(readl(>t_entries[i-1]) != |
1467 | vm->pte_encode(addr, level, true)); | 1479 | vm->pte_encode(addr, level, true, flags)); |
1468 | 1480 | ||
1469 | /* This next bit makes the above posting read even more important. We | 1481 | /* This next bit makes the above posting read even more important. We |
1470 | * want to flush the TLBs only after we're certain all the PTE updates | 1482 | * want to flush the TLBs only after we're certain all the PTE updates |
@@ -1518,7 +1530,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
1518 | first_entry, num_entries, max_entries)) | 1530 | first_entry, num_entries, max_entries)) |
1519 | num_entries = max_entries; | 1531 | num_entries = max_entries; |
1520 | 1532 | ||
1521 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); | 1533 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); |
1522 | 1534 | ||
1523 | for (i = 0; i < num_entries; i++) | 1535 | for (i = 0; i < num_entries; i++) |
1524 | iowrite32(scratch_pte, >t_base[i]); | 1536 | iowrite32(scratch_pte, >t_base[i]); |
@@ -1567,6 +1579,10 @@ static void ggtt_bind_vma(struct i915_vma *vma, | |||
1567 | struct drm_i915_private *dev_priv = dev->dev_private; | 1579 | struct drm_i915_private *dev_priv = dev->dev_private; |
1568 | struct drm_i915_gem_object *obj = vma->obj; | 1580 | struct drm_i915_gem_object *obj = vma->obj; |
1569 | 1581 | ||
1582 | /* Currently applicable only to VLV */ | ||
1583 | if (obj->gt_ro) | ||
1584 | flags |= PTE_READ_ONLY; | ||
1585 | |||
1570 | /* If there is no aliasing PPGTT, or the caller needs a global mapping, | 1586 | /* If there is no aliasing PPGTT, or the caller needs a global mapping, |
1571 | * or we have a global mapping already but the cacheability flags have | 1587 | * or we have a global mapping already but the cacheability flags have |
1572 | * changed, set the global PTEs. | 1588 | * changed, set the global PTEs. |
@@ -1583,7 +1599,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, | |||
1583 | (cache_level != obj->cache_level)) { | 1599 | (cache_level != obj->cache_level)) { |
1584 | vma->vm->insert_entries(vma->vm, obj->pages, | 1600 | vma->vm->insert_entries(vma->vm, obj->pages, |
1585 | vma->node.start, | 1601 | vma->node.start, |
1586 | cache_level); | 1602 | cache_level, flags); |
1587 | obj->has_global_gtt_mapping = 1; | 1603 | obj->has_global_gtt_mapping = 1; |
1588 | } | 1604 | } |
1589 | } | 1605 | } |
@@ -1595,7 +1611,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, | |||
1595 | appgtt->base.insert_entries(&appgtt->base, | 1611 | appgtt->base.insert_entries(&appgtt->base, |
1596 | vma->obj->pages, | 1612 | vma->obj->pages, |
1597 | vma->node.start, | 1613 | vma->node.start, |
1598 | cache_level); | 1614 | cache_level, flags); |
1599 | vma->obj->has_aliasing_ppgtt_mapping = 1; | 1615 | vma->obj->has_aliasing_ppgtt_mapping = 1; |
1600 | } | 1616 | } |
1601 | } | 1617 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 1b96a06be3cb..8d6f7c18c404 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -154,6 +154,7 @@ struct i915_vma { | |||
154 | void (*unbind_vma)(struct i915_vma *vma); | 154 | void (*unbind_vma)(struct i915_vma *vma); |
155 | /* Map an object into an address space with the given cache flags. */ | 155 | /* Map an object into an address space with the given cache flags. */ |
156 | #define GLOBAL_BIND (1<<0) | 156 | #define GLOBAL_BIND (1<<0) |
157 | #define PTE_READ_ONLY (1<<1) | ||
157 | void (*bind_vma)(struct i915_vma *vma, | 158 | void (*bind_vma)(struct i915_vma *vma, |
158 | enum i915_cache_level cache_level, | 159 | enum i915_cache_level cache_level, |
159 | u32 flags); | 160 | u32 flags); |
@@ -197,7 +198,7 @@ struct i915_address_space { | |||
197 | /* FIXME: Need a more generic return type */ | 198 | /* FIXME: Need a more generic return type */ |
198 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, | 199 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, |
199 | enum i915_cache_level level, | 200 | enum i915_cache_level level, |
200 | bool valid); /* Create a valid PTE */ | 201 | bool valid, u32 flags); /* Create a valid PTE */ |
201 | void (*clear_range)(struct i915_address_space *vm, | 202 | void (*clear_range)(struct i915_address_space *vm, |
202 | uint64_t start, | 203 | uint64_t start, |
203 | uint64_t length, | 204 | uint64_t length, |
@@ -205,7 +206,7 @@ struct i915_address_space { | |||
205 | void (*insert_entries)(struct i915_address_space *vm, | 206 | void (*insert_entries)(struct i915_address_space *vm, |
206 | struct sg_table *st, | 207 | struct sg_table *st, |
207 | uint64_t start, | 208 | uint64_t start, |
208 | enum i915_cache_level cache_level); | 209 | enum i915_cache_level cache_level, u32 flags); |
209 | void (*cleanup)(struct i915_address_space *vm); | 210 | void (*cleanup)(struct i915_address_space *vm); |
210 | }; | 211 | }; |
211 | 212 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3521f998a178..e60be3f552a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
@@ -28,64 +28,13 @@ | |||
28 | #include "i915_drv.h" | 28 | #include "i915_drv.h" |
29 | #include "intel_renderstate.h" | 29 | #include "intel_renderstate.h" |
30 | 30 | ||
31 | struct i915_render_state { | 31 | struct render_state { |
32 | const struct intel_renderstate_rodata *rodata; | ||
32 | struct drm_i915_gem_object *obj; | 33 | struct drm_i915_gem_object *obj; |
33 | unsigned long ggtt_offset; | 34 | u64 ggtt_offset; |
34 | void *batch; | 35 | int gen; |
35 | u32 size; | ||
36 | u32 len; | ||
37 | }; | 36 | }; |
38 | 37 | ||
39 | static struct i915_render_state *render_state_alloc(struct drm_device *dev) | ||
40 | { | ||
41 | struct i915_render_state *so; | ||
42 | struct page *page; | ||
43 | int ret; | ||
44 | |||
45 | so = kzalloc(sizeof(*so), GFP_KERNEL); | ||
46 | if (!so) | ||
47 | return ERR_PTR(-ENOMEM); | ||
48 | |||
49 | so->obj = i915_gem_alloc_object(dev, 4096); | ||
50 | if (so->obj == NULL) { | ||
51 | ret = -ENOMEM; | ||
52 | goto free; | ||
53 | } | ||
54 | so->size = 4096; | ||
55 | |||
56 | ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); | ||
57 | if (ret) | ||
58 | goto free_gem; | ||
59 | |||
60 | BUG_ON(so->obj->pages->nents != 1); | ||
61 | page = sg_page(so->obj->pages->sgl); | ||
62 | |||
63 | so->batch = kmap(page); | ||
64 | if (!so->batch) { | ||
65 | ret = -ENOMEM; | ||
66 | goto unpin; | ||
67 | } | ||
68 | |||
69 | so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); | ||
70 | |||
71 | return so; | ||
72 | unpin: | ||
73 | i915_gem_object_ggtt_unpin(so->obj); | ||
74 | free_gem: | ||
75 | drm_gem_object_unreference(&so->obj->base); | ||
76 | free: | ||
77 | kfree(so); | ||
78 | return ERR_PTR(ret); | ||
79 | } | ||
80 | |||
81 | static void render_state_free(struct i915_render_state *so) | ||
82 | { | ||
83 | kunmap(so->batch); | ||
84 | i915_gem_object_ggtt_unpin(so->obj); | ||
85 | drm_gem_object_unreference(&so->obj->base); | ||
86 | kfree(so); | ||
87 | } | ||
88 | |||
89 | static const struct intel_renderstate_rodata * | 38 | static const struct intel_renderstate_rodata * |
90 | render_state_get_rodata(struct drm_device *dev, const int gen) | 39 | render_state_get_rodata(struct drm_device *dev, const int gen) |
91 | { | 40 | { |
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen) | |||
101 | return NULL; | 50 | return NULL; |
102 | } | 51 | } |
103 | 52 | ||
104 | static int render_state_setup(const int gen, | 53 | static int render_state_init(struct render_state *so, struct drm_device *dev) |
105 | const struct intel_renderstate_rodata *rodata, | ||
106 | struct i915_render_state *so) | ||
107 | { | 54 | { |
108 | const u64 goffset = i915_gem_obj_ggtt_offset(so->obj); | ||
109 | u32 reloc_index = 0; | ||
110 | u32 * const d = so->batch; | ||
111 | unsigned int i = 0; | ||
112 | int ret; | 55 | int ret; |
113 | 56 | ||
114 | if (!rodata || rodata->batch_items * 4 > so->size) | 57 | so->gen = INTEL_INFO(dev)->gen; |
58 | so->rodata = render_state_get_rodata(dev, so->gen); | ||
59 | if (so->rodata == NULL) | ||
60 | return 0; | ||
61 | |||
62 | if (so->rodata->batch_items * 4 > 4096) | ||
115 | return -EINVAL; | 63 | return -EINVAL; |
116 | 64 | ||
65 | so->obj = i915_gem_alloc_object(dev, 4096); | ||
66 | if (so->obj == NULL) | ||
67 | return -ENOMEM; | ||
68 | |||
69 | ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); | ||
70 | if (ret) | ||
71 | goto free_gem; | ||
72 | |||
73 | so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); | ||
74 | return 0; | ||
75 | |||
76 | free_gem: | ||
77 | drm_gem_object_unreference(&so->obj->base); | ||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static int render_state_setup(struct render_state *so) | ||
82 | { | ||
83 | const struct intel_renderstate_rodata *rodata = so->rodata; | ||
84 | unsigned int i = 0, reloc_index = 0; | ||
85 | struct page *page; | ||
86 | u32 *d; | ||
87 | int ret; | ||
88 | |||
117 | ret = i915_gem_object_set_to_cpu_domain(so->obj, true); | 89 | ret = i915_gem_object_set_to_cpu_domain(so->obj, true); |
118 | if (ret) | 90 | if (ret) |
119 | return ret; | 91 | return ret; |
120 | 92 | ||
93 | page = sg_page(so->obj->pages->sgl); | ||
94 | d = kmap(page); | ||
95 | |||
121 | while (i < rodata->batch_items) { | 96 | while (i < rodata->batch_items) { |
122 | u32 s = rodata->batch[i]; | 97 | u32 s = rodata->batch[i]; |
123 | 98 | ||
124 | if (reloc_index < rodata->reloc_items && | 99 | if (i * 4 == rodata->reloc[reloc_index]) { |
125 | i * 4 == rodata->reloc[reloc_index]) { | 100 | u64 r = s + so->ggtt_offset; |
126 | 101 | s = lower_32_bits(r); | |
127 | s += goffset & 0xffffffff; | 102 | if (so->gen >= 8) { |
128 | |||
129 | /* We keep batch offsets max 32bit */ | ||
130 | if (gen >= 8) { | ||
131 | if (i + 1 >= rodata->batch_items || | 103 | if (i + 1 >= rodata->batch_items || |
132 | rodata->batch[i + 1] != 0) | 104 | rodata->batch[i + 1] != 0) |
133 | return -EINVAL; | 105 | return -EINVAL; |
134 | 106 | ||
135 | d[i] = s; | 107 | d[i++] = s; |
136 | i++; | 108 | s = upper_32_bits(r); |
137 | s = (goffset & 0xffffffff00000000ull) >> 32; | ||
138 | } | 109 | } |
139 | 110 | ||
140 | reloc_index++; | 111 | reloc_index++; |
141 | } | 112 | } |
142 | 113 | ||
143 | d[i] = s; | 114 | d[i++] = s; |
144 | i++; | ||
145 | } | 115 | } |
116 | kunmap(page); | ||
146 | 117 | ||
147 | ret = i915_gem_object_set_to_gtt_domain(so->obj, false); | 118 | ret = i915_gem_object_set_to_gtt_domain(so->obj, false); |
148 | if (ret) | 119 | if (ret) |
149 | return ret; | 120 | return ret; |
150 | 121 | ||
151 | if (rodata->reloc_items != reloc_index) { | 122 | if (rodata->reloc[reloc_index] != -1) { |
152 | DRM_ERROR("not all relocs resolved, %d out of %d\n", | 123 | DRM_ERROR("only %d relocs resolved\n", reloc_index); |
153 | reloc_index, rodata->reloc_items); | ||
154 | return -EINVAL; | 124 | return -EINVAL; |
155 | } | 125 | } |
156 | 126 | ||
157 | so->len = rodata->batch_items * 4; | ||
158 | |||
159 | return 0; | 127 | return 0; |
160 | } | 128 | } |
161 | 129 | ||
130 | static void render_state_fini(struct render_state *so) | ||
131 | { | ||
132 | i915_gem_object_ggtt_unpin(so->obj); | ||
133 | drm_gem_object_unreference(&so->obj->base); | ||
134 | } | ||
135 | |||
162 | int i915_gem_render_state_init(struct intel_engine_cs *ring) | 136 | int i915_gem_render_state_init(struct intel_engine_cs *ring) |
163 | { | 137 | { |
164 | const int gen = INTEL_INFO(ring->dev)->gen; | 138 | struct render_state so; |
165 | struct i915_render_state *so; | ||
166 | const struct intel_renderstate_rodata *rodata; | ||
167 | int ret; | 139 | int ret; |
168 | 140 | ||
169 | if (WARN_ON(ring->id != RCS)) | 141 | if (WARN_ON(ring->id != RCS)) |
170 | return -ENOENT; | 142 | return -ENOENT; |
171 | 143 | ||
172 | rodata = render_state_get_rodata(ring->dev, gen); | 144 | ret = render_state_init(&so, ring->dev); |
173 | if (rodata == NULL) | 145 | if (ret) |
174 | return 0; | 146 | return ret; |
175 | 147 | ||
176 | so = render_state_alloc(ring->dev); | 148 | if (so.rodata == NULL) |
177 | if (IS_ERR(so)) | 149 | return 0; |
178 | return PTR_ERR(so); | ||
179 | 150 | ||
180 | ret = render_state_setup(gen, rodata, so); | 151 | ret = render_state_setup(&so); |
181 | if (ret) | 152 | if (ret) |
182 | goto out; | 153 | goto out; |
183 | 154 | ||
184 | ret = ring->dispatch_execbuffer(ring, | 155 | ret = ring->dispatch_execbuffer(ring, |
185 | i915_gem_obj_ggtt_offset(so->obj), | 156 | so.ggtt_offset, |
186 | so->len, | 157 | so.rodata->batch_items * 4, |
187 | I915_DISPATCH_SECURE); | 158 | I915_DISPATCH_SECURE); |
188 | if (ret) | 159 | if (ret) |
189 | goto out; | 160 | goto out; |
190 | 161 | ||
191 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring); | 162 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); |
192 | 163 | ||
193 | ret = __i915_add_request(ring, NULL, so->obj, NULL); | 164 | ret = __i915_add_request(ring, NULL, so.obj, NULL); |
194 | /* __i915_add_request moves object to inactive if it fails */ | 165 | /* __i915_add_request moves object to inactive if it fails */ |
195 | out: | 166 | out: |
196 | render_state_free(so); | 167 | render_state_fini(&so); |
197 | return ret; | 168 | return ret; |
198 | } | 169 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 62ef55ba061c..644117855e01 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -292,9 +292,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) | |||
292 | kfree(obj->pages); | 292 | kfree(obj->pages); |
293 | } | 293 | } |
294 | 294 | ||
295 | |||
296 | static void | ||
297 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) | ||
298 | { | ||
299 | if (obj->stolen) { | ||
300 | drm_mm_remove_node(obj->stolen); | ||
301 | kfree(obj->stolen); | ||
302 | obj->stolen = NULL; | ||
303 | } | ||
304 | } | ||
295 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { | 305 | static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
296 | .get_pages = i915_gem_object_get_pages_stolen, | 306 | .get_pages = i915_gem_object_get_pages_stolen, |
297 | .put_pages = i915_gem_object_put_pages_stolen, | 307 | .put_pages = i915_gem_object_put_pages_stolen, |
308 | .release = i915_gem_object_release_stolen, | ||
298 | }; | 309 | }; |
299 | 310 | ||
300 | static struct drm_i915_gem_object * | 311 | static struct drm_i915_gem_object * |
@@ -452,13 +463,3 @@ err_out: | |||
452 | drm_gem_object_unreference(&obj->base); | 463 | drm_gem_object_unreference(&obj->base); |
453 | return NULL; | 464 | return NULL; |
454 | } | 465 | } |
455 | |||
456 | void | ||
457 | i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) | ||
458 | { | ||
459 | if (obj->stolen) { | ||
460 | drm_mm_remove_node(obj->stolen); | ||
461 | kfree(obj->stolen); | ||
462 | obj->stolen = NULL; | ||
463 | } | ||
464 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 69a7960c36bb..c0d7674c45cd 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1214,6 +1214,9 @@ static void notify_ring(struct drm_device *dev, | |||
1214 | 1214 | ||
1215 | trace_i915_gem_request_complete(ring); | 1215 | trace_i915_gem_request_complete(ring); |
1216 | 1216 | ||
1217 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1218 | intel_notify_mmio_flip(ring); | ||
1219 | |||
1217 | wake_up_all(&ring->irq_queue); | 1220 | wake_up_all(&ring->irq_queue); |
1218 | i915_queue_hangcheck(dev); | 1221 | i915_queue_hangcheck(dev); |
1219 | } | 1222 | } |
@@ -1248,8 +1251,10 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1248 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | 1251 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
1249 | if (adj > 0) | 1252 | if (adj > 0) |
1250 | adj *= 2; | 1253 | adj *= 2; |
1251 | else | 1254 | else { |
1252 | adj = 1; | 1255 | /* CHV needs even encode values */ |
1256 | adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; | ||
1257 | } | ||
1253 | new_delay = dev_priv->rps.cur_freq + adj; | 1258 | new_delay = dev_priv->rps.cur_freq + adj; |
1254 | 1259 | ||
1255 | /* | 1260 | /* |
@@ -1267,8 +1272,10 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1267 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1272 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1268 | if (adj < 0) | 1273 | if (adj < 0) |
1269 | adj *= 2; | 1274 | adj *= 2; |
1270 | else | 1275 | else { |
1271 | adj = -1; | 1276 | /* CHV needs even encode values */ |
1277 | adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; | ||
1278 | } | ||
1272 | new_delay = dev_priv->rps.cur_freq + adj; | 1279 | new_delay = dev_priv->rps.cur_freq + adj; |
1273 | } else { /* unknown event */ | 1280 | } else { /* unknown event */ |
1274 | new_delay = dev_priv->rps.cur_freq; | 1281 | new_delay = dev_priv->rps.cur_freq; |
@@ -1454,6 +1461,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1454 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | 1461 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { |
1455 | tmp = I915_READ(GEN8_GT_IIR(0)); | 1462 | tmp = I915_READ(GEN8_GT_IIR(0)); |
1456 | if (tmp) { | 1463 | if (tmp) { |
1464 | I915_WRITE(GEN8_GT_IIR(0), tmp); | ||
1457 | ret = IRQ_HANDLED; | 1465 | ret = IRQ_HANDLED; |
1458 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; | 1466 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; |
1459 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; | 1467 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; |
@@ -1461,7 +1469,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1461 | notify_ring(dev, &dev_priv->ring[RCS]); | 1469 | notify_ring(dev, &dev_priv->ring[RCS]); |
1462 | if (bcs & GT_RENDER_USER_INTERRUPT) | 1470 | if (bcs & GT_RENDER_USER_INTERRUPT) |
1463 | notify_ring(dev, &dev_priv->ring[BCS]); | 1471 | notify_ring(dev, &dev_priv->ring[BCS]); |
1464 | I915_WRITE(GEN8_GT_IIR(0), tmp); | ||
1465 | } else | 1472 | } else |
1466 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | 1473 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); |
1467 | } | 1474 | } |
@@ -1469,6 +1476,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1469 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { | 1476 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { |
1470 | tmp = I915_READ(GEN8_GT_IIR(1)); | 1477 | tmp = I915_READ(GEN8_GT_IIR(1)); |
1471 | if (tmp) { | 1478 | if (tmp) { |
1479 | I915_WRITE(GEN8_GT_IIR(1), tmp); | ||
1472 | ret = IRQ_HANDLED; | 1480 | ret = IRQ_HANDLED; |
1473 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; | 1481 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; |
1474 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1482 | if (vcs & GT_RENDER_USER_INTERRUPT) |
@@ -1476,7 +1484,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1476 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; | 1484 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; |
1477 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1485 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1478 | notify_ring(dev, &dev_priv->ring[VCS2]); | 1486 | notify_ring(dev, &dev_priv->ring[VCS2]); |
1479 | I915_WRITE(GEN8_GT_IIR(1), tmp); | ||
1480 | } else | 1487 | } else |
1481 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | 1488 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); |
1482 | } | 1489 | } |
@@ -1484,10 +1491,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1484 | if (master_ctl & GEN8_GT_PM_IRQ) { | 1491 | if (master_ctl & GEN8_GT_PM_IRQ) { |
1485 | tmp = I915_READ(GEN8_GT_IIR(2)); | 1492 | tmp = I915_READ(GEN8_GT_IIR(2)); |
1486 | if (tmp & dev_priv->pm_rps_events) { | 1493 | if (tmp & dev_priv->pm_rps_events) { |
1487 | ret = IRQ_HANDLED; | ||
1488 | gen8_rps_irq_handler(dev_priv, tmp); | ||
1489 | I915_WRITE(GEN8_GT_IIR(2), | 1494 | I915_WRITE(GEN8_GT_IIR(2), |
1490 | tmp & dev_priv->pm_rps_events); | 1495 | tmp & dev_priv->pm_rps_events); |
1496 | ret = IRQ_HANDLED; | ||
1497 | gen8_rps_irq_handler(dev_priv, tmp); | ||
1491 | } else | 1498 | } else |
1492 | DRM_ERROR("The master control interrupt lied (PM)!\n"); | 1499 | DRM_ERROR("The master control interrupt lied (PM)!\n"); |
1493 | } | 1500 | } |
@@ -1495,11 +1502,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1495 | if (master_ctl & GEN8_GT_VECS_IRQ) { | 1502 | if (master_ctl & GEN8_GT_VECS_IRQ) { |
1496 | tmp = I915_READ(GEN8_GT_IIR(3)); | 1503 | tmp = I915_READ(GEN8_GT_IIR(3)); |
1497 | if (tmp) { | 1504 | if (tmp) { |
1505 | I915_WRITE(GEN8_GT_IIR(3), tmp); | ||
1498 | ret = IRQ_HANDLED; | 1506 | ret = IRQ_HANDLED; |
1499 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; | 1507 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; |
1500 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1508 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1501 | notify_ring(dev, &dev_priv->ring[VECS]); | 1509 | notify_ring(dev, &dev_priv->ring[VECS]); |
1502 | I915_WRITE(GEN8_GT_IIR(3), tmp); | ||
1503 | } else | 1510 | } else |
1504 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | 1511 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); |
1505 | } | 1512 | } |
@@ -1805,26 +1812,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) | |||
1805 | struct drm_i915_private *dev_priv = dev->dev_private; | 1812 | struct drm_i915_private *dev_priv = dev->dev_private; |
1806 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 1813 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
1807 | 1814 | ||
1808 | if (IS_G4X(dev)) { | 1815 | if (hotplug_status) { |
1809 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | 1816 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
1817 | /* | ||
1818 | * Make sure hotplug status is cleared before we clear IIR, or else we | ||
1819 | * may miss hotplug events. | ||
1820 | */ | ||
1821 | POSTING_READ(PORT_HOTPLUG_STAT); | ||
1810 | 1822 | ||
1811 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); | 1823 | if (IS_G4X(dev)) { |
1812 | } else { | 1824 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; |
1813 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | ||
1814 | 1825 | ||
1815 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | 1826 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); |
1816 | } | 1827 | } else { |
1828 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | ||
1817 | 1829 | ||
1818 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && | 1830 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); |
1819 | hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | 1831 | } |
1820 | dp_aux_irq_handler(dev); | ||
1821 | 1832 | ||
1822 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 1833 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && |
1823 | /* | 1834 | hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) |
1824 | * Make sure hotplug status is cleared before we clear IIR, or else we | 1835 | dp_aux_irq_handler(dev); |
1825 | * may miss hotplug events. | 1836 | } |
1826 | */ | ||
1827 | POSTING_READ(PORT_HOTPLUG_STAT); | ||
1828 | } | 1837 | } |
1829 | 1838 | ||
1830 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) | 1839 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
@@ -1835,29 +1844,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1835 | irqreturn_t ret = IRQ_NONE; | 1844 | irqreturn_t ret = IRQ_NONE; |
1836 | 1845 | ||
1837 | while (true) { | 1846 | while (true) { |
1838 | iir = I915_READ(VLV_IIR); | 1847 | /* Find, clear, then process each source of interrupt */ |
1848 | |||
1839 | gt_iir = I915_READ(GTIIR); | 1849 | gt_iir = I915_READ(GTIIR); |
1850 | if (gt_iir) | ||
1851 | I915_WRITE(GTIIR, gt_iir); | ||
1852 | |||
1840 | pm_iir = I915_READ(GEN6_PMIIR); | 1853 | pm_iir = I915_READ(GEN6_PMIIR); |
1854 | if (pm_iir) | ||
1855 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
1856 | |||
1857 | iir = I915_READ(VLV_IIR); | ||
1858 | if (iir) { | ||
1859 | /* Consume port before clearing IIR or we'll miss events */ | ||
1860 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | ||
1861 | i9xx_hpd_irq_handler(dev); | ||
1862 | I915_WRITE(VLV_IIR, iir); | ||
1863 | } | ||
1841 | 1864 | ||
1842 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | 1865 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
1843 | goto out; | 1866 | goto out; |
1844 | 1867 | ||
1845 | ret = IRQ_HANDLED; | 1868 | ret = IRQ_HANDLED; |
1846 | 1869 | ||
1847 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 1870 | if (gt_iir) |
1848 | 1871 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
1849 | valleyview_pipestat_irq_handler(dev, iir); | ||
1850 | |||
1851 | /* Consume port. Then clear IIR or we'll miss events */ | ||
1852 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | ||
1853 | i9xx_hpd_irq_handler(dev); | ||
1854 | |||
1855 | if (pm_iir) | 1872 | if (pm_iir) |
1856 | gen6_rps_irq_handler(dev_priv, pm_iir); | 1873 | gen6_rps_irq_handler(dev_priv, pm_iir); |
1857 | 1874 | /* Call regardless, as some status bits might not be | |
1858 | I915_WRITE(GTIIR, gt_iir); | 1875 | * signalled in iir */ |
1859 | I915_WRITE(GEN6_PMIIR, pm_iir); | 1876 | valleyview_pipestat_irq_handler(dev, iir); |
1860 | I915_WRITE(VLV_IIR, iir); | ||
1861 | } | 1877 | } |
1862 | 1878 | ||
1863 | out: | 1879 | out: |
@@ -1878,21 +1894,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1878 | if (master_ctl == 0 && iir == 0) | 1894 | if (master_ctl == 0 && iir == 0) |
1879 | break; | 1895 | break; |
1880 | 1896 | ||
1897 | ret = IRQ_HANDLED; | ||
1898 | |||
1881 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 1899 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
1882 | 1900 | ||
1883 | gen8_gt_irq_handler(dev, dev_priv, master_ctl); | 1901 | /* Find, clear, then process each source of interrupt */ |
1884 | 1902 | ||
1885 | valleyview_pipestat_irq_handler(dev, iir); | 1903 | if (iir) { |
1904 | /* Consume port before clearing IIR or we'll miss events */ | ||
1905 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | ||
1906 | i9xx_hpd_irq_handler(dev); | ||
1907 | I915_WRITE(VLV_IIR, iir); | ||
1908 | } | ||
1886 | 1909 | ||
1887 | /* Consume port. Then clear IIR or we'll miss events */ | 1910 | gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
1888 | i9xx_hpd_irq_handler(dev); | ||
1889 | 1911 | ||
1890 | I915_WRITE(VLV_IIR, iir); | 1912 | /* Call regardless, as some status bits might not be |
1913 | * signalled in iir */ | ||
1914 | valleyview_pipestat_irq_handler(dev, iir); | ||
1891 | 1915 | ||
1892 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | 1916 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); |
1893 | POSTING_READ(GEN8_MASTER_IRQ); | 1917 | POSTING_READ(GEN8_MASTER_IRQ); |
1894 | |||
1895 | ret = IRQ_HANDLED; | ||
1896 | } | 1918 | } |
1897 | 1919 | ||
1898 | return ret; | 1920 | return ret; |
@@ -2128,6 +2150,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) | |||
2128 | } | 2150 | } |
2129 | } | 2151 | } |
2130 | 2152 | ||
2153 | /* | ||
2154 | * To handle irqs with the minimum potential races with fresh interrupts, we: | ||
2155 | * 1 - Disable Master Interrupt Control. | ||
2156 | * 2 - Find the source(s) of the interrupt. | ||
2157 | * 3 - Clear the Interrupt Identity bits (IIR). | ||
2158 | * 4 - Process the interrupt(s) that had bits set in the IIRs. | ||
2159 | * 5 - Re-enable Master Interrupt Control. | ||
2160 | */ | ||
2131 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | 2161 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
2132 | { | 2162 | { |
2133 | struct drm_device *dev = arg; | 2163 | struct drm_device *dev = arg; |
@@ -2155,32 +2185,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2155 | POSTING_READ(SDEIER); | 2185 | POSTING_READ(SDEIER); |
2156 | } | 2186 | } |
2157 | 2187 | ||
2188 | /* Find, clear, then process each source of interrupt */ | ||
2189 | |||
2158 | gt_iir = I915_READ(GTIIR); | 2190 | gt_iir = I915_READ(GTIIR); |
2159 | if (gt_iir) { | 2191 | if (gt_iir) { |
2192 | I915_WRITE(GTIIR, gt_iir); | ||
2193 | ret = IRQ_HANDLED; | ||
2160 | if (INTEL_INFO(dev)->gen >= 6) | 2194 | if (INTEL_INFO(dev)->gen >= 6) |
2161 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 2195 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
2162 | else | 2196 | else |
2163 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | 2197 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); |
2164 | I915_WRITE(GTIIR, gt_iir); | ||
2165 | ret = IRQ_HANDLED; | ||
2166 | } | 2198 | } |
2167 | 2199 | ||
2168 | de_iir = I915_READ(DEIIR); | 2200 | de_iir = I915_READ(DEIIR); |
2169 | if (de_iir) { | 2201 | if (de_iir) { |
2202 | I915_WRITE(DEIIR, de_iir); | ||
2203 | ret = IRQ_HANDLED; | ||
2170 | if (INTEL_INFO(dev)->gen >= 7) | 2204 | if (INTEL_INFO(dev)->gen >= 7) |
2171 | ivb_display_irq_handler(dev, de_iir); | 2205 | ivb_display_irq_handler(dev, de_iir); |
2172 | else | 2206 | else |
2173 | ilk_display_irq_handler(dev, de_iir); | 2207 | ilk_display_irq_handler(dev, de_iir); |
2174 | I915_WRITE(DEIIR, de_iir); | ||
2175 | ret = IRQ_HANDLED; | ||
2176 | } | 2208 | } |
2177 | 2209 | ||
2178 | if (INTEL_INFO(dev)->gen >= 6) { | 2210 | if (INTEL_INFO(dev)->gen >= 6) { |
2179 | u32 pm_iir = I915_READ(GEN6_PMIIR); | 2211 | u32 pm_iir = I915_READ(GEN6_PMIIR); |
2180 | if (pm_iir) { | 2212 | if (pm_iir) { |
2181 | gen6_rps_irq_handler(dev_priv, pm_iir); | ||
2182 | I915_WRITE(GEN6_PMIIR, pm_iir); | 2213 | I915_WRITE(GEN6_PMIIR, pm_iir); |
2183 | ret = IRQ_HANDLED; | 2214 | ret = IRQ_HANDLED; |
2215 | gen6_rps_irq_handler(dev_priv, pm_iir); | ||
2184 | } | 2216 | } |
2185 | } | 2217 | } |
2186 | 2218 | ||
@@ -2211,36 +2243,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2211 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 2243 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
2212 | POSTING_READ(GEN8_MASTER_IRQ); | 2244 | POSTING_READ(GEN8_MASTER_IRQ); |
2213 | 2245 | ||
2246 | /* Find, clear, then process each source of interrupt */ | ||
2247 | |||
2214 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); | 2248 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
2215 | 2249 | ||
2216 | if (master_ctl & GEN8_DE_MISC_IRQ) { | 2250 | if (master_ctl & GEN8_DE_MISC_IRQ) { |
2217 | tmp = I915_READ(GEN8_DE_MISC_IIR); | 2251 | tmp = I915_READ(GEN8_DE_MISC_IIR); |
2218 | if (tmp & GEN8_DE_MISC_GSE) | ||
2219 | intel_opregion_asle_intr(dev); | ||
2220 | else if (tmp) | ||
2221 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | ||
2222 | else | ||
2223 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); | ||
2224 | |||
2225 | if (tmp) { | 2252 | if (tmp) { |
2226 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); | 2253 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); |
2227 | ret = IRQ_HANDLED; | 2254 | ret = IRQ_HANDLED; |
2255 | if (tmp & GEN8_DE_MISC_GSE) | ||
2256 | intel_opregion_asle_intr(dev); | ||
2257 | else | ||
2258 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | ||
2228 | } | 2259 | } |
2260 | else | ||
2261 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); | ||
2229 | } | 2262 | } |
2230 | 2263 | ||
2231 | if (master_ctl & GEN8_DE_PORT_IRQ) { | 2264 | if (master_ctl & GEN8_DE_PORT_IRQ) { |
2232 | tmp = I915_READ(GEN8_DE_PORT_IIR); | 2265 | tmp = I915_READ(GEN8_DE_PORT_IIR); |
2233 | if (tmp & GEN8_AUX_CHANNEL_A) | ||
2234 | dp_aux_irq_handler(dev); | ||
2235 | else if (tmp) | ||
2236 | DRM_ERROR("Unexpected DE Port interrupt\n"); | ||
2237 | else | ||
2238 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); | ||
2239 | |||
2240 | if (tmp) { | 2266 | if (tmp) { |
2241 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); | 2267 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); |
2242 | ret = IRQ_HANDLED; | 2268 | ret = IRQ_HANDLED; |
2269 | if (tmp & GEN8_AUX_CHANNEL_A) | ||
2270 | dp_aux_irq_handler(dev); | ||
2271 | else | ||
2272 | DRM_ERROR("Unexpected DE Port interrupt\n"); | ||
2243 | } | 2273 | } |
2274 | else | ||
2275 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); | ||
2244 | } | 2276 | } |
2245 | 2277 | ||
2246 | for_each_pipe(pipe) { | 2278 | for_each_pipe(pipe) { |
@@ -2250,33 +2282,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2250 | continue; | 2282 | continue; |
2251 | 2283 | ||
2252 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); | 2284 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); |
2253 | if (pipe_iir & GEN8_PIPE_VBLANK) | 2285 | if (pipe_iir) { |
2254 | intel_pipe_handle_vblank(dev, pipe); | 2286 | ret = IRQ_HANDLED; |
2255 | 2287 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); | |
2256 | if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { | 2288 | if (pipe_iir & GEN8_PIPE_VBLANK) |
2257 | intel_prepare_page_flip(dev, pipe); | 2289 | intel_pipe_handle_vblank(dev, pipe); |
2258 | intel_finish_page_flip_plane(dev, pipe); | ||
2259 | } | ||
2260 | 2290 | ||
2261 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) | 2291 | if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { |
2262 | hsw_pipe_crc_irq_handler(dev, pipe); | 2292 | intel_prepare_page_flip(dev, pipe); |
2293 | intel_finish_page_flip_plane(dev, pipe); | ||
2294 | } | ||
2263 | 2295 | ||
2264 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { | 2296 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) |
2265 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | 2297 | hsw_pipe_crc_irq_handler(dev, pipe); |
2266 | false)) | ||
2267 | DRM_ERROR("Pipe %c FIFO underrun\n", | ||
2268 | pipe_name(pipe)); | ||
2269 | } | ||
2270 | 2298 | ||
2271 | if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { | 2299 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { |
2272 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", | 2300 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, |
2273 | pipe_name(pipe), | 2301 | false)) |
2274 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); | 2302 | DRM_ERROR("Pipe %c FIFO underrun\n", |
2275 | } | 2303 | pipe_name(pipe)); |
2304 | } | ||
2276 | 2305 | ||
2277 | if (pipe_iir) { | 2306 | if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { |
2278 | ret = IRQ_HANDLED; | 2307 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", |
2279 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); | 2308 | pipe_name(pipe), |
2309 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); | ||
2310 | } | ||
2280 | } else | 2311 | } else |
2281 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); | 2312 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); |
2282 | } | 2313 | } |
@@ -2288,13 +2319,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2288 | * on older pch-split platforms. But this needs testing. | 2319 | * on older pch-split platforms. But this needs testing. |
2289 | */ | 2320 | */ |
2290 | u32 pch_iir = I915_READ(SDEIIR); | 2321 | u32 pch_iir = I915_READ(SDEIIR); |
2291 | |||
2292 | cpt_irq_handler(dev, pch_iir); | ||
2293 | |||
2294 | if (pch_iir) { | 2322 | if (pch_iir) { |
2295 | I915_WRITE(SDEIIR, pch_iir); | 2323 | I915_WRITE(SDEIIR, pch_iir); |
2296 | ret = IRQ_HANDLED; | 2324 | ret = IRQ_HANDLED; |
2297 | } | 2325 | cpt_irq_handler(dev, pch_iir); |
2326 | } else | ||
2327 | DRM_ERROR("The master control interrupt lied (SDE)!\n"); | ||
2328 | |||
2298 | } | 2329 | } |
2299 | 2330 | ||
2300 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 2331 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index d05a2afa17dc..81457293cd3e 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = { | |||
48 | .disable_display = 0, | 48 | .disable_display = 0, |
49 | .enable_cmd_parser = 1, | 49 | .enable_cmd_parser = 1, |
50 | .disable_vtd_wa = 0, | 50 | .disable_vtd_wa = 0, |
51 | .use_mmio_flip = 0, | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | module_param_named(modeset, i915.modeset, int, 0400); | 54 | module_param_named(modeset, i915.modeset, int, 0400); |
@@ -156,3 +157,7 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)" | |||
156 | module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); | 157 | module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); |
157 | MODULE_PARM_DESC(enable_cmd_parser, | 158 | MODULE_PARM_DESC(enable_cmd_parser, |
158 | "Enable command parsing (1=enabled [default], 0=disabled)"); | 159 | "Enable command parsing (1=enabled [default], 0=disabled)"); |
160 | |||
161 | module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); | ||
162 | MODULE_PARM_DESC(use_mmio_flip, | ||
163 | "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e691b30b2817..348856787b7c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -29,8 +29,8 @@ | |||
29 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) | 29 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) |
30 | 30 | ||
31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) | 31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) |
32 | #define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c) | 32 | #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ |
33 | #define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c) | 33 | (pipe) == PIPE_B ? (b) : (c)) |
34 | 34 | ||
35 | #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) | 35 | #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) |
36 | #define _MASKED_BIT_DISABLE(a) ((a) << 16) | 36 | #define _MASKED_BIT_DISABLE(a) ((a) << 16) |
@@ -529,6 +529,16 @@ enum punit_power_well { | |||
529 | #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ | 529 | #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ |
530 | #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ | 530 | #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ |
531 | 531 | ||
532 | #define PUNIT_GPU_STATUS_REG 0xdb | ||
533 | #define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 | ||
534 | #define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff | ||
535 | #define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 | ||
536 | #define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff | ||
537 | |||
538 | #define PUNIT_GPU_DUTYCYCLE_REG 0xdf | ||
539 | #define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 | ||
540 | #define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff | ||
541 | |||
532 | #define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c | 542 | #define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c |
533 | #define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 | 543 | #define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 |
534 | #define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 | 544 | #define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 |
@@ -761,6 +771,8 @@ enum punit_power_well { | |||
761 | 771 | ||
762 | #define _VLV_PCS_DW8_CH0 0x8220 | 772 | #define _VLV_PCS_DW8_CH0 0x8220 |
763 | #define _VLV_PCS_DW8_CH1 0x8420 | 773 | #define _VLV_PCS_DW8_CH1 0x8420 |
774 | #define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20) | ||
775 | #define CHV_PCS_USEDCLKCHANNEL (1 << 21) | ||
764 | #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) | 776 | #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) |
765 | 777 | ||
766 | #define _VLV_PCS01_DW8_CH0 0x0220 | 778 | #define _VLV_PCS01_DW8_CH0 0x0220 |
@@ -869,6 +881,16 @@ enum punit_power_well { | |||
869 | #define DPIO_CHV_PROP_COEFF_SHIFT 0 | 881 | #define DPIO_CHV_PROP_COEFF_SHIFT 0 |
870 | #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) | 882 | #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) |
871 | 883 | ||
884 | #define _CHV_CMN_DW5_CH0 0x8114 | ||
885 | #define CHV_BUFRIGHTENA1_DISABLE (0 << 20) | ||
886 | #define CHV_BUFRIGHTENA1_NORMAL (1 << 20) | ||
887 | #define CHV_BUFRIGHTENA1_FORCE (3 << 20) | ||
888 | #define CHV_BUFRIGHTENA1_MASK (3 << 20) | ||
889 | #define CHV_BUFLEFTENA1_DISABLE (0 << 22) | ||
890 | #define CHV_BUFLEFTENA1_NORMAL (1 << 22) | ||
891 | #define CHV_BUFLEFTENA1_FORCE (3 << 22) | ||
892 | #define CHV_BUFLEFTENA1_MASK (3 << 22) | ||
893 | |||
872 | #define _CHV_CMN_DW13_CH0 0x8134 | 894 | #define _CHV_CMN_DW13_CH0 0x8134 |
873 | #define _CHV_CMN_DW0_CH1 0x8080 | 895 | #define _CHV_CMN_DW0_CH1 0x8080 |
874 | #define DPIO_CHV_S1_DIV_SHIFT 21 | 896 | #define DPIO_CHV_S1_DIV_SHIFT 21 |
@@ -883,8 +905,21 @@ enum punit_power_well { | |||
883 | #define _CHV_CMN_DW1_CH1 0x8084 | 905 | #define _CHV_CMN_DW1_CH1 0x8084 |
884 | #define DPIO_AFC_RECAL (1 << 14) | 906 | #define DPIO_AFC_RECAL (1 << 14) |
885 | #define DPIO_DCLKP_EN (1 << 13) | 907 | #define DPIO_DCLKP_EN (1 << 13) |
908 | #define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */ | ||
909 | #define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */ | ||
910 | #define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */ | ||
911 | #define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */ | ||
912 | #define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */ | ||
913 | #define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */ | ||
914 | #define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */ | ||
915 | #define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */ | ||
886 | #define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1) | 916 | #define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1) |
887 | 917 | ||
918 | #define _CHV_CMN_DW19_CH0 0x814c | ||
919 | #define _CHV_CMN_DW6_CH1 0x8098 | ||
920 | #define CHV_CMN_USEDCLKCHANNEL (1 << 13) | ||
921 | #define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) | ||
922 | |||
888 | #define CHV_CMN_DW30 0x8178 | 923 | #define CHV_CMN_DW30 0x8178 |
889 | #define DPIO_LRC_BYPASS (1 << 3) | 924 | #define DPIO_LRC_BYPASS (1 << 3) |
890 | 925 | ||
@@ -933,6 +968,7 @@ enum punit_power_well { | |||
933 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 | 968 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 |
934 | #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 | 969 | #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 |
935 | 970 | ||
971 | |||
936 | /* control register for cpu gtt access */ | 972 | /* control register for cpu gtt access */ |
937 | #define TILECTL 0x101000 | 973 | #define TILECTL 0x101000 |
938 | #define TILECTL_SWZCTL (1 << 0) | 974 | #define TILECTL_SWZCTL (1 << 0) |
@@ -1167,6 +1203,8 @@ enum punit_power_well { | |||
1167 | #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) | 1203 | #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) |
1168 | #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) | 1204 | #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) |
1169 | #define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) | 1205 | #define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) |
1206 | #define VLV_PCBR_ADDR_SHIFT 12 | ||
1207 | |||
1170 | #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ | 1208 | #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ |
1171 | #define EIR 0x020b0 | 1209 | #define EIR 0x020b0 |
1172 | #define EMR 0x020b4 | 1210 | #define EMR 0x020b4 |
@@ -1567,11 +1605,10 @@ enum punit_power_well { | |||
1567 | /* | 1605 | /* |
1568 | * Clock control & power management | 1606 | * Clock control & power management |
1569 | */ | 1607 | */ |
1570 | #define DPLL_A_OFFSET 0x6014 | 1608 | #define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) |
1571 | #define DPLL_B_OFFSET 0x6018 | 1609 | #define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) |
1572 | #define CHV_DPLL_C_OFFSET 0x6030 | 1610 | #define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) |
1573 | #define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ | 1611 | #define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) |
1574 | dev_priv->info.display_mmio_offset) | ||
1575 | 1612 | ||
1576 | #define VGA0 0x6000 | 1613 | #define VGA0 0x6000 |
1577 | #define VGA1 0x6004 | 1614 | #define VGA1 0x6004 |
@@ -1659,11 +1696,10 @@ enum punit_power_well { | |||
1659 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 | 1696 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 |
1660 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 | 1697 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 |
1661 | 1698 | ||
1662 | #define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ | 1699 | #define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) |
1663 | #define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ | 1700 | #define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) |
1664 | #define CHV_DPLL_C_MD_OFFSET 0x603c | 1701 | #define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) |
1665 | #define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ | 1702 | #define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) |
1666 | dev_priv->info.display_mmio_offset) | ||
1667 | 1703 | ||
1668 | /* | 1704 | /* |
1669 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. | 1705 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. |
@@ -2373,6 +2409,7 @@ enum punit_power_well { | |||
2373 | #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) | 2409 | #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) |
2374 | #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) | 2410 | #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) |
2375 | #define EDP_PSR_ENABLE (1<<31) | 2411 | #define EDP_PSR_ENABLE (1<<31) |
2412 | #define BDW_PSR_SINGLE_FRAME (1<<30) | ||
2376 | #define EDP_PSR_LINK_DISABLE (0<<27) | 2413 | #define EDP_PSR_LINK_DISABLE (0<<27) |
2377 | #define EDP_PSR_LINK_STANDBY (1<<27) | 2414 | #define EDP_PSR_LINK_STANDBY (1<<27) |
2378 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) | 2415 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) |
@@ -2530,8 +2567,14 @@ enum punit_power_well { | |||
2530 | #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) | 2567 | #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) |
2531 | #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) | 2568 | #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) |
2532 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 2569 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
2570 | #define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) | ||
2571 | #define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) | ||
2533 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 2572 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
2573 | #define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19) | ||
2574 | #define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19) | ||
2534 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 2575 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
2576 | #define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17) | ||
2577 | #define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17) | ||
2535 | /* CRT/TV common between gen3+ */ | 2578 | /* CRT/TV common between gen3+ */ |
2536 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) | 2579 | #define CRT_HOTPLUG_INT_STATUS (1 << 11) |
2537 | #define TV_HOTPLUG_INT_STATUS (1 << 10) | 2580 | #define TV_HOTPLUG_INT_STATUS (1 << 10) |
@@ -2585,7 +2628,7 @@ enum punit_power_well { | |||
2585 | 2628 | ||
2586 | #define PORT_DFT_I9XX 0x61150 | 2629 | #define PORT_DFT_I9XX 0x61150 |
2587 | #define DC_BALANCE_RESET (1 << 25) | 2630 | #define DC_BALANCE_RESET (1 << 25) |
2588 | #define PORT_DFT2_G4X 0x61154 | 2631 | #define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) |
2589 | #define DC_BALANCE_RESET_VLV (1 << 31) | 2632 | #define DC_BALANCE_RESET_VLV (1 << 31) |
2590 | #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) | 2633 | #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) |
2591 | #define PIPE_B_SCRAMBLE_RESET (1 << 1) | 2634 | #define PIPE_B_SCRAMBLE_RESET (1 << 1) |
@@ -4627,6 +4670,8 @@ enum punit_power_well { | |||
4627 | #define GEN7_L3CNTLREG1 0xB01C | 4670 | #define GEN7_L3CNTLREG1 0xB01C |
4628 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C | 4671 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C |
4629 | #define GEN7_L3AGDIS (1<<19) | 4672 | #define GEN7_L3AGDIS (1<<19) |
4673 | #define GEN7_L3CNTLREG2 0xB020 | ||
4674 | #define GEN7_L3CNTLREG3 0xB024 | ||
4630 | 4675 | ||
4631 | #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 | 4676 | #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 |
4632 | #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 | 4677 | #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 |
@@ -4873,8 +4918,7 @@ enum punit_power_well { | |||
4873 | #define _PCH_TRANSA_LINK_M2 0xe0048 | 4918 | #define _PCH_TRANSA_LINK_M2 0xe0048 |
4874 | #define _PCH_TRANSA_LINK_N2 0xe004c | 4919 | #define _PCH_TRANSA_LINK_N2 0xe004c |
4875 | 4920 | ||
4876 | /* Per-transcoder DIP controls */ | 4921 | /* Per-transcoder DIP controls (PCH) */ |
4877 | |||
4878 | #define _VIDEO_DIP_CTL_A 0xe0200 | 4922 | #define _VIDEO_DIP_CTL_A 0xe0200 |
4879 | #define _VIDEO_DIP_DATA_A 0xe0208 | 4923 | #define _VIDEO_DIP_DATA_A 0xe0208 |
4880 | #define _VIDEO_DIP_GCP_A 0xe0210 | 4924 | #define _VIDEO_DIP_GCP_A 0xe0210 |
@@ -4887,6 +4931,7 @@ enum punit_power_well { | |||
4887 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) | 4931 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) |
4888 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) | 4932 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) |
4889 | 4933 | ||
4934 | /* Per-transcoder DIP controls (VLV) */ | ||
4890 | #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) | 4935 | #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) |
4891 | #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) | 4936 | #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) |
4892 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) | 4937 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) |
@@ -4895,12 +4940,19 @@ enum punit_power_well { | |||
4895 | #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) | 4940 | #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) |
4896 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) | 4941 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) |
4897 | 4942 | ||
4943 | #define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) | ||
4944 | #define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) | ||
4945 | #define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) | ||
4946 | |||
4898 | #define VLV_TVIDEO_DIP_CTL(pipe) \ | 4947 | #define VLV_TVIDEO_DIP_CTL(pipe) \ |
4899 | _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) | 4948 | _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \ |
4949 | VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C) | ||
4900 | #define VLV_TVIDEO_DIP_DATA(pipe) \ | 4950 | #define VLV_TVIDEO_DIP_DATA(pipe) \ |
4901 | _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) | 4951 | _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \ |
4952 | VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C) | ||
4902 | #define VLV_TVIDEO_DIP_GCP(pipe) \ | 4953 | #define VLV_TVIDEO_DIP_GCP(pipe) \ |
4903 | _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) | 4954 | _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ |
4955 | VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C) | ||
4904 | 4956 | ||
4905 | /* Haswell DIP controls */ | 4957 | /* Haswell DIP controls */ |
4906 | #define HSW_VIDEO_DIP_CTL_A 0x60200 | 4958 | #define HSW_VIDEO_DIP_CTL_A 0x60200 |
@@ -5771,7 +5823,6 @@ enum punit_power_well { | |||
5771 | #define DDI_BUF_CTL_B 0x64100 | 5823 | #define DDI_BUF_CTL_B 0x64100 |
5772 | #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) | 5824 | #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) |
5773 | #define DDI_BUF_CTL_ENABLE (1<<31) | 5825 | #define DDI_BUF_CTL_ENABLE (1<<31) |
5774 | /* Haswell */ | ||
5775 | #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ | 5826 | #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ |
5776 | #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ | 5827 | #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ |
5777 | #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ | 5828 | #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ |
@@ -5781,16 +5832,6 @@ enum punit_power_well { | |||
5781 | #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ | 5832 | #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ |
5782 | #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ | 5833 | #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ |
5783 | #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ | 5834 | #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ |
5784 | /* Broadwell */ | ||
5785 | #define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */ | ||
5786 | #define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */ | ||
5787 | #define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */ | ||
5788 | #define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */ | ||
5789 | #define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */ | ||
5790 | #define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */ | ||
5791 | #define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */ | ||
5792 | #define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */ | ||
5793 | #define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */ | ||
5794 | #define DDI_BUF_EMP_MASK (0xf<<24) | 5835 | #define DDI_BUF_EMP_MASK (0xf<<24) |
5795 | #define DDI_BUF_PORT_REVERSAL (1<<16) | 5836 | #define DDI_BUF_PORT_REVERSAL (1<<16) |
5796 | #define DDI_BUF_IS_IDLE (1<<7) | 5837 | #define DDI_BUF_IS_IDLE (1<<7) |
@@ -6002,7 +6043,8 @@ enum punit_power_well { | |||
6002 | 6043 | ||
6003 | #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) | 6044 | #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) |
6004 | #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) | 6045 | #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) |
6005 | #define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) | 6046 | #define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \ |
6047 | _MIPIB_PORT_CTRL) | ||
6006 | #define DPI_ENABLE (1 << 31) /* A + B */ | 6048 | #define DPI_ENABLE (1 << 31) /* A + B */ |
6007 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 | 6049 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 |
6008 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) | 6050 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) |
@@ -6044,18 +6086,20 @@ enum punit_power_well { | |||
6044 | 6086 | ||
6045 | #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) | 6087 | #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) |
6046 | #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) | 6088 | #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) |
6047 | #define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) | 6089 | #define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \ |
6090 | _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) | ||
6048 | #define TEARING_EFFECT_DELAY_SHIFT 0 | 6091 | #define TEARING_EFFECT_DELAY_SHIFT 0 |
6049 | #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) | 6092 | #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) |
6050 | 6093 | ||
6051 | /* XXX: all bits reserved */ | 6094 | /* XXX: all bits reserved */ |
6052 | #define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) | 6095 | #define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) |
6053 | 6096 | ||
6054 | /* MIPI DSI Controller and D-PHY registers */ | 6097 | /* MIPI DSI Controller and D-PHY registers */ |
6055 | 6098 | ||
6056 | #define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) | 6099 | #define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) |
6057 | #define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) | 6100 | #define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) |
6058 | #define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) | 6101 | #define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \ |
6102 | _MIPIB_DEVICE_READY) | ||
6059 | #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ | 6103 | #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ |
6060 | #define ULPS_STATE_MASK (3 << 1) | 6104 | #define ULPS_STATE_MASK (3 << 1) |
6061 | #define ULPS_STATE_ENTER (2 << 1) | 6105 | #define ULPS_STATE_ENTER (2 << 1) |
@@ -6063,12 +6107,14 @@ enum punit_power_well { | |||
6063 | #define ULPS_STATE_NORMAL_OPERATION (0 << 1) | 6107 | #define ULPS_STATE_NORMAL_OPERATION (0 << 1) |
6064 | #define DEVICE_READY (1 << 0) | 6108 | #define DEVICE_READY (1 << 0) |
6065 | 6109 | ||
6066 | #define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) | 6110 | #define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) |
6067 | #define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) | 6111 | #define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) |
6068 | #define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) | 6112 | #define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \ |
6069 | #define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) | 6113 | _MIPIB_INTR_STAT) |
6070 | #define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) | 6114 | #define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) |
6071 | #define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) | 6115 | #define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) |
6116 | #define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \ | ||
6117 | _MIPIB_INTR_EN) | ||
6072 | #define TEARING_EFFECT (1 << 31) | 6118 | #define TEARING_EFFECT (1 << 31) |
6073 | #define SPL_PKT_SENT_INTERRUPT (1 << 30) | 6119 | #define SPL_PKT_SENT_INTERRUPT (1 << 30) |
6074 | #define GEN_READ_DATA_AVAIL (1 << 29) | 6120 | #define GEN_READ_DATA_AVAIL (1 << 29) |
@@ -6102,9 +6148,10 @@ enum punit_power_well { | |||
6102 | #define RXSOT_SYNC_ERROR (1 << 1) | 6148 | #define RXSOT_SYNC_ERROR (1 << 1) |
6103 | #define RXSOT_ERROR (1 << 0) | 6149 | #define RXSOT_ERROR (1 << 0) |
6104 | 6150 | ||
6105 | #define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) | 6151 | #define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) |
6106 | #define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) | 6152 | #define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) |
6107 | #define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) | 6153 | #define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \ |
6154 | _MIPIB_DSI_FUNC_PRG) | ||
6108 | #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) | 6155 | #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) |
6109 | #define CMD_MODE_NOT_SUPPORTED (0 << 13) | 6156 | #define CMD_MODE_NOT_SUPPORTED (0 << 13) |
6110 | #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) | 6157 | #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) |
@@ -6125,78 +6172,94 @@ enum punit_power_well { | |||
6125 | #define DATA_LANES_PRG_REG_SHIFT 0 | 6172 | #define DATA_LANES_PRG_REG_SHIFT 0 |
6126 | #define DATA_LANES_PRG_REG_MASK (7 << 0) | 6173 | #define DATA_LANES_PRG_REG_MASK (7 << 0) |
6127 | 6174 | ||
6128 | #define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) | 6175 | #define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) |
6129 | #define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) | 6176 | #define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) |
6130 | #define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) | 6177 | #define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \ |
6178 | _MIPIB_HS_TX_TIMEOUT) | ||
6131 | #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff | 6179 | #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff |
6132 | 6180 | ||
6133 | #define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) | 6181 | #define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) |
6134 | #define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) | 6182 | #define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) |
6135 | #define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) | 6183 | #define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \ |
6184 | _MIPIB_LP_RX_TIMEOUT) | ||
6136 | #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff | 6185 | #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff |
6137 | 6186 | ||
6138 | #define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) | 6187 | #define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) |
6139 | #define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) | 6188 | #define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) |
6140 | #define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) | 6189 | #define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \ |
6190 | _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) | ||
6141 | #define TURN_AROUND_TIMEOUT_MASK 0x3f | 6191 | #define TURN_AROUND_TIMEOUT_MASK 0x3f |
6142 | 6192 | ||
6143 | #define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) | 6193 | #define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) |
6144 | #define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) | 6194 | #define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) |
6145 | #define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) | 6195 | #define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \ |
6196 | _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) | ||
6146 | #define DEVICE_RESET_TIMER_MASK 0xffff | 6197 | #define DEVICE_RESET_TIMER_MASK 0xffff |
6147 | 6198 | ||
6148 | #define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) | 6199 | #define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) |
6149 | #define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) | 6200 | #define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) |
6150 | #define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) | 6201 | #define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \ |
6202 | _MIPIB_DPI_RESOLUTION) | ||
6151 | #define VERTICAL_ADDRESS_SHIFT 16 | 6203 | #define VERTICAL_ADDRESS_SHIFT 16 |
6152 | #define VERTICAL_ADDRESS_MASK (0xffff << 16) | 6204 | #define VERTICAL_ADDRESS_MASK (0xffff << 16) |
6153 | #define HORIZONTAL_ADDRESS_SHIFT 0 | 6205 | #define HORIZONTAL_ADDRESS_SHIFT 0 |
6154 | #define HORIZONTAL_ADDRESS_MASK 0xffff | 6206 | #define HORIZONTAL_ADDRESS_MASK 0xffff |
6155 | 6207 | ||
6156 | #define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) | 6208 | #define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) |
6157 | #define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) | 6209 | #define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) |
6158 | #define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) | 6210 | #define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \ |
6211 | _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) | ||
6159 | #define DBI_FIFO_EMPTY_HALF (0 << 0) | 6212 | #define DBI_FIFO_EMPTY_HALF (0 << 0) |
6160 | #define DBI_FIFO_EMPTY_QUARTER (1 << 0) | 6213 | #define DBI_FIFO_EMPTY_QUARTER (1 << 0) |
6161 | #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) | 6214 | #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) |
6162 | 6215 | ||
6163 | /* regs below are bits 15:0 */ | 6216 | /* regs below are bits 15:0 */ |
6164 | #define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) | 6217 | #define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) |
6165 | #define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) | 6218 | #define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) |
6166 | #define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) | 6219 | #define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ |
6167 | 6220 | _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) | |
6168 | #define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) | 6221 | |
6169 | #define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) | 6222 | #define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) |
6170 | #define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) | 6223 | #define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) |
6171 | 6224 | #define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \ | |
6172 | #define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) | 6225 | _MIPIB_HBP_COUNT) |
6173 | #define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) | 6226 | |
6174 | #define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) | 6227 | #define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) |
6175 | 6228 | #define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) | |
6176 | #define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) | 6229 | #define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \ |
6177 | #define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) | 6230 | _MIPIB_HFP_COUNT) |
6178 | #define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) | 6231 | |
6179 | 6232 | #define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) | |
6180 | #define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) | 6233 | #define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) |
6181 | #define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) | 6234 | #define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \ |
6182 | #define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) | 6235 | _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) |
6183 | 6236 | ||
6184 | #define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c) | 6237 | #define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) |
6185 | #define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c) | 6238 | #define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) |
6186 | #define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT) | 6239 | #define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ |
6240 | _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) | ||
6241 | |||
6242 | #define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) | ||
6243 | #define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) | ||
6244 | #define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \ | ||
6245 | _MIPIB_VBP_COUNT) | ||
6246 | |||
6247 | #define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) | ||
6248 | #define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) | ||
6249 | #define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \ | ||
6250 | _MIPIB_VFP_COUNT) | ||
6251 | |||
6252 | #define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) | ||
6253 | #define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) | ||
6254 | #define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \ | ||
6255 | _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) | ||
6187 | 6256 | ||
6188 | #define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040) | ||
6189 | #define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840) | ||
6190 | #define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT) | ||
6191 | |||
6192 | #define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044) | ||
6193 | #define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844) | ||
6194 | #define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) | ||
6195 | /* regs above are bits 15:0 */ | 6257 | /* regs above are bits 15:0 */ |
6196 | 6258 | ||
6197 | #define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) | 6259 | #define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) |
6198 | #define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) | 6260 | #define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) |
6199 | #define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) | 6261 | #define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \ |
6262 | _MIPIB_DPI_CONTROL) | ||
6200 | #define DPI_LP_MODE (1 << 6) | 6263 | #define DPI_LP_MODE (1 << 6) |
6201 | #define BACKLIGHT_OFF (1 << 5) | 6264 | #define BACKLIGHT_OFF (1 << 5) |
6202 | #define BACKLIGHT_ON (1 << 4) | 6265 | #define BACKLIGHT_ON (1 << 4) |
@@ -6205,27 +6268,31 @@ enum punit_power_well { | |||
6205 | #define TURN_ON (1 << 1) | 6268 | #define TURN_ON (1 << 1) |
6206 | #define SHUTDOWN (1 << 0) | 6269 | #define SHUTDOWN (1 << 0) |
6207 | 6270 | ||
6208 | #define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) | 6271 | #define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) |
6209 | #define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) | 6272 | #define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) |
6210 | #define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) | 6273 | #define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \ |
6274 | _MIPIB_DPI_DATA) | ||
6211 | #define COMMAND_BYTE_SHIFT 0 | 6275 | #define COMMAND_BYTE_SHIFT 0 |
6212 | #define COMMAND_BYTE_MASK (0x3f << 0) | 6276 | #define COMMAND_BYTE_MASK (0x3f << 0) |
6213 | 6277 | ||
6214 | #define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) | 6278 | #define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) |
6215 | #define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) | 6279 | #define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) |
6216 | #define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) | 6280 | #define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \ |
6281 | _MIPIB_INIT_COUNT) | ||
6217 | #define MASTER_INIT_TIMER_SHIFT 0 | 6282 | #define MASTER_INIT_TIMER_SHIFT 0 |
6218 | #define MASTER_INIT_TIMER_MASK (0xffff << 0) | 6283 | #define MASTER_INIT_TIMER_MASK (0xffff << 0) |
6219 | 6284 | ||
6220 | #define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) | 6285 | #define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) |
6221 | #define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) | 6286 | #define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) |
6222 | #define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) | 6287 | #define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \ |
6288 | _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) | ||
6223 | #define MAX_RETURN_PKT_SIZE_SHIFT 0 | 6289 | #define MAX_RETURN_PKT_SIZE_SHIFT 0 |
6224 | #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) | 6290 | #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) |
6225 | 6291 | ||
6226 | #define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) | 6292 | #define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) |
6227 | #define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) | 6293 | #define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) |
6228 | #define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) | 6294 | #define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \ |
6295 | _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) | ||
6229 | #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) | 6296 | #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) |
6230 | #define DISABLE_VIDEO_BTA (1 << 3) | 6297 | #define DISABLE_VIDEO_BTA (1 << 3) |
6231 | #define IP_TG_CONFIG (1 << 2) | 6298 | #define IP_TG_CONFIG (1 << 2) |
@@ -6233,9 +6300,10 @@ enum punit_power_well { | |||
6233 | #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) | 6300 | #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) |
6234 | #define VIDEO_MODE_BURST (3 << 0) | 6301 | #define VIDEO_MODE_BURST (3 << 0) |
6235 | 6302 | ||
6236 | #define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) | 6303 | #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) |
6237 | #define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) | 6304 | #define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) |
6238 | #define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) | 6305 | #define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \ |
6306 | _MIPIB_EOT_DISABLE) | ||
6239 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) | 6307 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) |
6240 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) | 6308 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) |
6241 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) | 6309 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) |
@@ -6245,28 +6313,33 @@ enum punit_power_well { | |||
6245 | #define CLOCKSTOP (1 << 1) | 6313 | #define CLOCKSTOP (1 << 1) |
6246 | #define EOT_DISABLE (1 << 0) | 6314 | #define EOT_DISABLE (1 << 0) |
6247 | 6315 | ||
6248 | #define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) | 6316 | #define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) |
6249 | #define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) | 6317 | #define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) |
6250 | #define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) | 6318 | #define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \ |
6319 | _MIPIB_LP_BYTECLK) | ||
6251 | #define LP_BYTECLK_SHIFT 0 | 6320 | #define LP_BYTECLK_SHIFT 0 |
6252 | #define LP_BYTECLK_MASK (0xffff << 0) | 6321 | #define LP_BYTECLK_MASK (0xffff << 0) |
6253 | 6322 | ||
6254 | /* bits 31:0 */ | 6323 | /* bits 31:0 */ |
6255 | #define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) | 6324 | #define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) |
6256 | #define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) | 6325 | #define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) |
6257 | #define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) | 6326 | #define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \ |
6327 | _MIPIB_LP_GEN_DATA) | ||
6258 | 6328 | ||
6259 | /* bits 31:0 */ | 6329 | /* bits 31:0 */ |
6260 | #define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) | 6330 | #define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) |
6261 | #define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) | 6331 | #define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) |
6262 | #define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) | 6332 | #define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \ |
6263 | 6333 | _MIPIB_HS_GEN_DATA) | |
6264 | #define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) | 6334 | |
6265 | #define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) | 6335 | #define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) |
6266 | #define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) | 6336 | #define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) |
6267 | #define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) | 6337 | #define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \ |
6268 | #define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) | 6338 | _MIPIB_LP_GEN_CTRL) |
6269 | #define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) | 6339 | #define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) |
6340 | #define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) | ||
6341 | #define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \ | ||
6342 | _MIPIB_HS_GEN_CTRL) | ||
6270 | #define LONG_PACKET_WORD_COUNT_SHIFT 8 | 6343 | #define LONG_PACKET_WORD_COUNT_SHIFT 8 |
6271 | #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) | 6344 | #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) |
6272 | #define SHORT_PACKET_PARAM_SHIFT 8 | 6345 | #define SHORT_PACKET_PARAM_SHIFT 8 |
@@ -6277,9 +6350,10 @@ enum punit_power_well { | |||
6277 | #define DATA_TYPE_MASK (3f << 0) | 6350 | #define DATA_TYPE_MASK (3f << 0) |
6278 | /* data type values, see include/video/mipi_display.h */ | 6351 | /* data type values, see include/video/mipi_display.h */ |
6279 | 6352 | ||
6280 | #define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) | 6353 | #define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) |
6281 | #define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) | 6354 | #define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) |
6282 | #define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) | 6355 | #define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \ |
6356 | _MIPIB_GEN_FIFO_STAT) | ||
6283 | #define DPI_FIFO_EMPTY (1 << 28) | 6357 | #define DPI_FIFO_EMPTY (1 << 28) |
6284 | #define DBI_FIFO_EMPTY (1 << 27) | 6358 | #define DBI_FIFO_EMPTY (1 << 27) |
6285 | #define LP_CTRL_FIFO_EMPTY (1 << 26) | 6359 | #define LP_CTRL_FIFO_EMPTY (1 << 26) |
@@ -6295,16 +6369,18 @@ enum punit_power_well { | |||
6295 | #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) | 6369 | #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) |
6296 | #define HS_DATA_FIFO_FULL (1 << 0) | 6370 | #define HS_DATA_FIFO_FULL (1 << 0) |
6297 | 6371 | ||
6298 | #define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) | 6372 | #define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) |
6299 | #define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) | 6373 | #define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) |
6300 | #define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) | 6374 | #define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \ |
6375 | _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) | ||
6301 | #define DBI_HS_LP_MODE_MASK (1 << 0) | 6376 | #define DBI_HS_LP_MODE_MASK (1 << 0) |
6302 | #define DBI_LP_MODE (1 << 0) | 6377 | #define DBI_LP_MODE (1 << 0) |
6303 | #define DBI_HS_MODE (0 << 0) | 6378 | #define DBI_HS_MODE (0 << 0) |
6304 | 6379 | ||
6305 | #define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) | 6380 | #define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) |
6306 | #define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) | 6381 | #define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) |
6307 | #define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) | 6382 | #define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \ |
6383 | _MIPIB_DPHY_PARAM) | ||
6308 | #define EXIT_ZERO_COUNT_SHIFT 24 | 6384 | #define EXIT_ZERO_COUNT_SHIFT 24 |
6309 | #define EXIT_ZERO_COUNT_MASK (0x3f << 24) | 6385 | #define EXIT_ZERO_COUNT_MASK (0x3f << 24) |
6310 | #define TRAIL_COUNT_SHIFT 16 | 6386 | #define TRAIL_COUNT_SHIFT 16 |
@@ -6315,34 +6391,41 @@ enum punit_power_well { | |||
6315 | #define PREPARE_COUNT_MASK (0x3f << 0) | 6391 | #define PREPARE_COUNT_MASK (0x3f << 0) |
6316 | 6392 | ||
6317 | /* bits 31:0 */ | 6393 | /* bits 31:0 */ |
6318 | #define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) | 6394 | #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) |
6319 | #define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) | 6395 | #define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) |
6320 | #define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) | 6396 | #define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \ |
6321 | 6397 | _MIPIB_DBI_BW_CTRL) | |
6322 | #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) | 6398 | |
6323 | #define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) | 6399 | #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ |
6324 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) | 6400 | + 0xb088) |
6401 | #define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ | ||
6402 | + 0xb888) | ||
6403 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \ | ||
6404 | _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) | ||
6325 | #define LP_HS_SSW_CNT_SHIFT 16 | 6405 | #define LP_HS_SSW_CNT_SHIFT 16 |
6326 | #define LP_HS_SSW_CNT_MASK (0xffff << 16) | 6406 | #define LP_HS_SSW_CNT_MASK (0xffff << 16) |
6327 | #define HS_LP_PWR_SW_CNT_SHIFT 0 | 6407 | #define HS_LP_PWR_SW_CNT_SHIFT 0 |
6328 | #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) | 6408 | #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) |
6329 | 6409 | ||
6330 | #define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) | 6410 | #define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) |
6331 | #define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) | 6411 | #define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) |
6332 | #define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) | 6412 | #define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \ |
6413 | _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) | ||
6333 | #define STOP_STATE_STALL_COUNTER_SHIFT 0 | 6414 | #define STOP_STATE_STALL_COUNTER_SHIFT 0 |
6334 | #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) | 6415 | #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) |
6335 | 6416 | ||
6336 | #define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) | 6417 | #define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) |
6337 | #define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) | 6418 | #define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) |
6338 | #define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) | 6419 | #define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \ |
6339 | #define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) | 6420 | _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) |
6340 | #define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) | 6421 | #define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) |
6341 | #define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) | 6422 | #define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) |
6423 | #define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \ | ||
6424 | _MIPIB_INTR_EN_REG_1) | ||
6342 | #define RX_CONTENTION_DETECTED (1 << 0) | 6425 | #define RX_CONTENTION_DETECTED (1 << 0) |
6343 | 6426 | ||
6344 | /* XXX: only pipe A ?!? */ | 6427 | /* XXX: only pipe A ?!? */ |
6345 | #define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) | 6428 | #define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) |
6346 | #define DBI_TYPEC_ENABLE (1 << 31) | 6429 | #define DBI_TYPEC_ENABLE (1 << 31) |
6347 | #define DBI_TYPEC_WIP (1 << 30) | 6430 | #define DBI_TYPEC_WIP (1 << 30) |
6348 | #define DBI_TYPEC_OPTION_SHIFT 28 | 6431 | #define DBI_TYPEC_OPTION_SHIFT 28 |
@@ -6356,9 +6439,10 @@ enum punit_power_well { | |||
6356 | 6439 | ||
6357 | /* MIPI adapter registers */ | 6440 | /* MIPI adapter registers */ |
6358 | 6441 | ||
6359 | #define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) | 6442 | #define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) |
6360 | #define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) | 6443 | #define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904) |
6361 | #define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) | 6444 | #define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \ |
6445 | _MIPIB_CTRL) | ||
6362 | #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ | 6446 | #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ |
6363 | #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) | 6447 | #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) |
6364 | #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) | 6448 | #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) |
@@ -6370,50 +6454,52 @@ enum punit_power_well { | |||
6370 | #define READ_REQUEST_PRIORITY_HIGH (3 << 3) | 6454 | #define READ_REQUEST_PRIORITY_HIGH (3 << 3) |
6371 | #define RGB_FLIP_TO_BGR (1 << 2) | 6455 | #define RGB_FLIP_TO_BGR (1 << 2) |
6372 | 6456 | ||
6373 | #define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) | 6457 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) |
6374 | #define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) | 6458 | #define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) |
6375 | #define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) | 6459 | #define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \ |
6460 | _MIPIB_DATA_ADDRESS) | ||
6376 | #define DATA_MEM_ADDRESS_SHIFT 5 | 6461 | #define DATA_MEM_ADDRESS_SHIFT 5 |
6377 | #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) | 6462 | #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) |
6378 | #define DATA_VALID (1 << 0) | 6463 | #define DATA_VALID (1 << 0) |
6379 | 6464 | ||
6380 | #define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) | 6465 | #define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) |
6381 | #define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) | 6466 | #define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) |
6382 | #define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) | 6467 | #define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \ |
6468 | _MIPIB_DATA_LENGTH) | ||
6383 | #define DATA_LENGTH_SHIFT 0 | 6469 | #define DATA_LENGTH_SHIFT 0 |
6384 | #define DATA_LENGTH_MASK (0xfffff << 0) | 6470 | #define DATA_LENGTH_MASK (0xfffff << 0) |
6385 | 6471 | ||
6386 | #define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) | 6472 | #define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) |
6387 | #define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) | 6473 | #define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) |
6388 | #define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) | 6474 | #define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \ |
6475 | _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) | ||
6389 | #define COMMAND_MEM_ADDRESS_SHIFT 5 | 6476 | #define COMMAND_MEM_ADDRESS_SHIFT 5 |
6390 | #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) | 6477 | #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) |
6391 | #define AUTO_PWG_ENABLE (1 << 2) | 6478 | #define AUTO_PWG_ENABLE (1 << 2) |
6392 | #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) | 6479 | #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) |
6393 | #define COMMAND_VALID (1 << 0) | 6480 | #define COMMAND_VALID (1 << 0) |
6394 | 6481 | ||
6395 | #define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) | 6482 | #define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) |
6396 | #define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) | 6483 | #define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) |
6397 | #define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) | 6484 | #define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \ |
6485 | _MIPIB_COMMAND_LENGTH) | ||
6398 | #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ | 6486 | #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ |
6399 | #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) | 6487 | #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) |
6400 | 6488 | ||
6401 | #define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) | 6489 | #define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) |
6402 | #define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) | 6490 | #define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) |
6403 | #define MIPI_READ_DATA_RETURN(pipe, n) \ | 6491 | #define MIPI_READ_DATA_RETURN(tc, n) \ |
6404 | (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ | 6492 | (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \ |
6493 | + 4 * (n)) /* n: 0...7 */ | ||
6405 | 6494 | ||
6406 | #define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) | 6495 | #define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) |
6407 | #define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) | 6496 | #define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) |
6408 | #define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) | 6497 | #define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \ |
6498 | _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) | ||
6409 | #define READ_DATA_VALID(n) (1 << (n)) | 6499 | #define READ_DATA_VALID(n) (1 << (n)) |
6410 | 6500 | ||
6411 | /* For UMS only (deprecated): */ | 6501 | /* For UMS only (deprecated): */ |
6412 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) | 6502 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) |
6413 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) | 6503 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) |
6414 | #define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) | ||
6415 | #define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) | ||
6416 | #define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) | ||
6417 | #define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) | ||
6418 | 6504 | ||
6419 | #endif /* _I915_REG_H_ */ | 6505 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b17b9c7c769f..ded60139820e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = { | |||
76 | 0x00FFFFFF, 0x00000012, /* eDP parameters */ | 76 | 0x00FFFFFF, 0x00000012, /* eDP parameters */ |
77 | 0x00EBAFFF, 0x00020011, | 77 | 0x00EBAFFF, 0x00020011, |
78 | 0x00C71FFF, 0x0006000F, | 78 | 0x00C71FFF, 0x0006000F, |
79 | 0x00AAAFFF, 0x000E000A, | ||
79 | 0x00FFFFFF, 0x00020011, | 80 | 0x00FFFFFF, 0x00020011, |
80 | 0x00DB6FFF, 0x0005000F, | 81 | 0x00DB6FFF, 0x0005000F, |
81 | 0x00BEEFFF, 0x000A000C, | 82 | 0x00BEEFFF, 0x000A000C, |
82 | 0x00FFFFFF, 0x0005000F, | 83 | 0x00FFFFFF, 0x0005000F, |
83 | 0x00DB6FFF, 0x000A000C, | 84 | 0x00DB6FFF, 0x000A000C, |
84 | 0x00FFFFFF, 0x000A000C, | ||
85 | 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ | 85 | 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ |
86 | }; | 86 | }; |
87 | 87 | ||
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = { | |||
89 | 0x00FFFFFF, 0x0007000E, /* DP parameters */ | 89 | 0x00FFFFFF, 0x0007000E, /* DP parameters */ |
90 | 0x00D75FFF, 0x000E000A, | 90 | 0x00D75FFF, 0x000E000A, |
91 | 0x00BEFFFF, 0x00140006, | 91 | 0x00BEFFFF, 0x00140006, |
92 | 0x80B2CFFF, 0x001B0002, | ||
92 | 0x00FFFFFF, 0x000E000A, | 93 | 0x00FFFFFF, 0x000E000A, |
93 | 0x00D75FFF, 0x00180004, | 94 | 0x00D75FFF, 0x00180004, |
94 | 0x80CB2FFF, 0x001B0002, | 95 | 0x80CB2FFF, 0x001B0002, |
95 | 0x00F7DFFF, 0x00180004, | 96 | 0x00F7DFFF, 0x00180004, |
96 | 0x80D75FFF, 0x001B0002, | 97 | 0x80D75FFF, 0x001B0002, |
97 | 0x80FFFFFF, 0x001B0002, | ||
98 | 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ | 98 | 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ |
99 | }; | 99 | }; |
100 | 100 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1112d9ecc226..927d2476f60a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -39,12 +39,45 @@ | |||
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" |
40 | #include <drm/drm_dp_helper.h> | 40 | #include <drm/drm_dp_helper.h> |
41 | #include <drm/drm_crtc_helper.h> | 41 | #include <drm/drm_crtc_helper.h> |
42 | #include <drm/drm_plane_helper.h> | ||
43 | #include <drm/drm_rect.h> | ||
42 | #include <linux/dma_remapping.h> | 44 | #include <linux/dma_remapping.h> |
43 | 45 | ||
46 | /* Primary plane formats supported by all gen */ | ||
47 | #define COMMON_PRIMARY_FORMATS \ | ||
48 | DRM_FORMAT_C8, \ | ||
49 | DRM_FORMAT_RGB565, \ | ||
50 | DRM_FORMAT_XRGB8888, \ | ||
51 | DRM_FORMAT_ARGB8888 | ||
52 | |||
53 | /* Primary plane formats for gen <= 3 */ | ||
54 | static const uint32_t intel_primary_formats_gen2[] = { | ||
55 | COMMON_PRIMARY_FORMATS, | ||
56 | DRM_FORMAT_XRGB1555, | ||
57 | DRM_FORMAT_ARGB1555, | ||
58 | }; | ||
59 | |||
60 | /* Primary plane formats for gen >= 4 */ | ||
61 | static const uint32_t intel_primary_formats_gen4[] = { | ||
62 | COMMON_PRIMARY_FORMATS, \ | ||
63 | DRM_FORMAT_XBGR8888, | ||
64 | DRM_FORMAT_ABGR8888, | ||
65 | DRM_FORMAT_XRGB2101010, | ||
66 | DRM_FORMAT_ARGB2101010, | ||
67 | DRM_FORMAT_XBGR2101010, | ||
68 | DRM_FORMAT_ABGR2101010, | ||
69 | }; | ||
70 | |||
71 | /* Cursor formats */ | ||
72 | static const uint32_t intel_cursor_formats[] = { | ||
73 | DRM_FORMAT_ARGB8888, | ||
74 | }; | ||
75 | |||
44 | #define DIV_ROUND_CLOSEST_ULL(ll, d) \ | 76 | #define DIV_ROUND_CLOSEST_ULL(ll, d) \ |
45 | ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) | 77 | ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) |
46 | 78 | ||
47 | static void intel_increase_pllclock(struct drm_crtc *crtc); | 79 | static void intel_increase_pllclock(struct drm_device *dev, |
80 | enum pipe pipe); | ||
48 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); | 81 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
49 | 82 | ||
50 | static void i9xx_crtc_clock_get(struct intel_crtc *crtc, | 83 | static void i9xx_crtc_clock_get(struct intel_crtc *crtc, |
@@ -1712,6 +1745,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
1712 | val &= ~DPIO_DCLKP_EN; | 1745 | val &= ~DPIO_DCLKP_EN; |
1713 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); | 1746 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); |
1714 | 1747 | ||
1748 | /* disable left/right clock distribution */ | ||
1749 | if (pipe != PIPE_B) { | ||
1750 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | ||
1751 | val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); | ||
1752 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); | ||
1753 | } else { | ||
1754 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); | ||
1755 | val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); | ||
1756 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); | ||
1757 | } | ||
1758 | |||
1715 | mutex_unlock(&dev_priv->dpio_lock); | 1759 | mutex_unlock(&dev_priv->dpio_lock); |
1716 | } | 1760 | } |
1717 | 1761 | ||
@@ -1749,6 +1793,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc) | |||
1749 | struct drm_i915_private *dev_priv = dev->dev_private; | 1793 | struct drm_i915_private *dev_priv = dev->dev_private; |
1750 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); | 1794 | struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); |
1751 | 1795 | ||
1796 | if (WARN_ON(pll == NULL)) | ||
1797 | return; | ||
1798 | |||
1752 | WARN_ON(!pll->refcount); | 1799 | WARN_ON(!pll->refcount); |
1753 | if (pll->active == 0) { | 1800 | if (pll->active == 0) { |
1754 | DRM_DEBUG_DRIVER("setting up %s\n", pll->name); | 1801 | DRM_DEBUG_DRIVER("setting up %s\n", pll->name); |
@@ -2314,6 +2361,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc, | |||
2314 | goto out_unref_obj; | 2361 | goto out_unref_obj; |
2315 | } | 2362 | } |
2316 | 2363 | ||
2364 | obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); | ||
2317 | mutex_unlock(&dev->struct_mutex); | 2365 | mutex_unlock(&dev->struct_mutex); |
2318 | 2366 | ||
2319 | DRM_DEBUG_KMS("plane fb obj %p\n", obj); | 2367 | DRM_DEBUG_KMS("plane fb obj %p\n", obj); |
@@ -2359,6 +2407,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2359 | if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { | 2407 | if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { |
2360 | drm_framebuffer_reference(c->primary->fb); | 2408 | drm_framebuffer_reference(c->primary->fb); |
2361 | intel_crtc->base.primary->fb = c->primary->fb; | 2409 | intel_crtc->base.primary->fb = c->primary->fb; |
2410 | fb->obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); | ||
2362 | break; | 2411 | break; |
2363 | } | 2412 | } |
2364 | } | 2413 | } |
@@ -2546,7 +2595,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
2546 | 2595 | ||
2547 | if (dev_priv->display.disable_fbc) | 2596 | if (dev_priv->display.disable_fbc) |
2548 | dev_priv->display.disable_fbc(dev); | 2597 | dev_priv->display.disable_fbc(dev); |
2549 | intel_increase_pllclock(crtc); | 2598 | intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe); |
2550 | 2599 | ||
2551 | dev_priv->display.update_primary_plane(crtc, fb, x, y); | 2600 | dev_priv->display.update_primary_plane(crtc, fb, x, y); |
2552 | 2601 | ||
@@ -2647,7 +2696,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2647 | struct drm_device *dev = crtc->dev; | 2696 | struct drm_device *dev = crtc->dev; |
2648 | struct drm_i915_private *dev_priv = dev->dev_private; | 2697 | struct drm_i915_private *dev_priv = dev->dev_private; |
2649 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2698 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2699 | enum pipe pipe = intel_crtc->pipe; | ||
2650 | struct drm_framebuffer *old_fb; | 2700 | struct drm_framebuffer *old_fb; |
2701 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; | ||
2651 | int ret; | 2702 | int ret; |
2652 | 2703 | ||
2653 | if (intel_crtc_has_pending_flip(crtc)) { | 2704 | if (intel_crtc_has_pending_flip(crtc)) { |
@@ -2668,10 +2719,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2668 | return -EINVAL; | 2719 | return -EINVAL; |
2669 | } | 2720 | } |
2670 | 2721 | ||
2722 | old_fb = crtc->primary->fb; | ||
2723 | |||
2671 | mutex_lock(&dev->struct_mutex); | 2724 | mutex_lock(&dev->struct_mutex); |
2672 | ret = intel_pin_and_fence_fb_obj(dev, | 2725 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); |
2673 | to_intel_framebuffer(fb)->obj, | 2726 | if (ret == 0) |
2674 | NULL); | 2727 | i915_gem_track_fb(to_intel_framebuffer(old_fb)->obj, obj, |
2728 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
2675 | mutex_unlock(&dev->struct_mutex); | 2729 | mutex_unlock(&dev->struct_mutex); |
2676 | if (ret != 0) { | 2730 | if (ret != 0) { |
2677 | DRM_ERROR("pin & fence failed\n"); | 2731 | DRM_ERROR("pin & fence failed\n"); |
@@ -2711,7 +2765,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2711 | 2765 | ||
2712 | dev_priv->display.update_primary_plane(crtc, fb, x, y); | 2766 | dev_priv->display.update_primary_plane(crtc, fb, x, y); |
2713 | 2767 | ||
2714 | old_fb = crtc->primary->fb; | 2768 | if (intel_crtc->active) |
2769 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
2770 | |||
2715 | crtc->primary->fb = fb; | 2771 | crtc->primary->fb = fb; |
2716 | crtc->x = x; | 2772 | crtc->x = x; |
2717 | crtc->y = y; | 2773 | crtc->y = y; |
@@ -2726,7 +2782,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2726 | 2782 | ||
2727 | mutex_lock(&dev->struct_mutex); | 2783 | mutex_lock(&dev->struct_mutex); |
2728 | intel_update_fbc(dev); | 2784 | intel_update_fbc(dev); |
2729 | intel_edp_psr_update(dev); | ||
2730 | mutex_unlock(&dev->struct_mutex); | 2785 | mutex_unlock(&dev->struct_mutex); |
2731 | 2786 | ||
2732 | return 0; | 2787 | return 0; |
@@ -3892,6 +3947,8 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) | |||
3892 | int pipe = intel_crtc->pipe; | 3947 | int pipe = intel_crtc->pipe; |
3893 | int plane = intel_crtc->plane; | 3948 | int plane = intel_crtc->plane; |
3894 | 3949 | ||
3950 | drm_vblank_on(dev, pipe); | ||
3951 | |||
3895 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); | 3952 | intel_enable_primary_hw_plane(dev_priv, plane, pipe); |
3896 | intel_enable_planes(crtc); | 3953 | intel_enable_planes(crtc); |
3897 | /* The fixup needs to happen before cursor is enabled */ | 3954 | /* The fixup needs to happen before cursor is enabled */ |
@@ -3904,8 +3961,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) | |||
3904 | 3961 | ||
3905 | mutex_lock(&dev->struct_mutex); | 3962 | mutex_lock(&dev->struct_mutex); |
3906 | intel_update_fbc(dev); | 3963 | intel_update_fbc(dev); |
3907 | intel_edp_psr_update(dev); | ||
3908 | mutex_unlock(&dev->struct_mutex); | 3964 | mutex_unlock(&dev->struct_mutex); |
3965 | |||
3966 | /* | ||
3967 | * FIXME: Once we grow proper nuclear flip support out of this we need | ||
3968 | * to compute the mask of flip planes precisely. For the time being | ||
3969 | * consider this a flip from a NULL plane. | ||
3970 | */ | ||
3971 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); | ||
3909 | } | 3972 | } |
3910 | 3973 | ||
3911 | static void intel_crtc_disable_planes(struct drm_crtc *crtc) | 3974 | static void intel_crtc_disable_planes(struct drm_crtc *crtc) |
@@ -3917,7 +3980,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) | |||
3917 | int plane = intel_crtc->plane; | 3980 | int plane = intel_crtc->plane; |
3918 | 3981 | ||
3919 | intel_crtc_wait_for_pending_flips(crtc); | 3982 | intel_crtc_wait_for_pending_flips(crtc); |
3920 | drm_crtc_vblank_off(crtc); | ||
3921 | 3983 | ||
3922 | if (dev_priv->fbc.plane == plane) | 3984 | if (dev_priv->fbc.plane == plane) |
3923 | intel_disable_fbc(dev); | 3985 | intel_disable_fbc(dev); |
@@ -3928,6 +3990,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) | |||
3928 | intel_crtc_update_cursor(crtc, false); | 3990 | intel_crtc_update_cursor(crtc, false); |
3929 | intel_disable_planes(crtc); | 3991 | intel_disable_planes(crtc); |
3930 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); | 3992 | intel_disable_primary_hw_plane(dev_priv, plane, pipe); |
3993 | |||
3994 | /* | ||
3995 | * FIXME: Once we grow proper nuclear flip support out of this we need | ||
3996 | * to compute the mask of flip planes precisely. For the time being | ||
3997 | * consider this a flip to a NULL plane. | ||
3998 | */ | ||
3999 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); | ||
4000 | |||
4001 | drm_vblank_off(dev, pipe); | ||
3931 | } | 4002 | } |
3932 | 4003 | ||
3933 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 4004 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
@@ -4006,8 +4077,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
4006 | cpt_verify_modeset(dev, intel_crtc->pipe); | 4077 | cpt_verify_modeset(dev, intel_crtc->pipe); |
4007 | 4078 | ||
4008 | intel_crtc_enable_planes(crtc); | 4079 | intel_crtc_enable_planes(crtc); |
4009 | |||
4010 | drm_crtc_vblank_on(crtc); | ||
4011 | } | 4080 | } |
4012 | 4081 | ||
4013 | /* IPS only exists on ULT machines and is tied to pipe A. */ | 4082 | /* IPS only exists on ULT machines and is tied to pipe A. */ |
@@ -4121,8 +4190,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
4121 | * to change the workaround. */ | 4190 | * to change the workaround. */ |
4122 | haswell_mode_set_planes_workaround(intel_crtc); | 4191 | haswell_mode_set_planes_workaround(intel_crtc); |
4123 | intel_crtc_enable_planes(crtc); | 4192 | intel_crtc_enable_planes(crtc); |
4124 | |||
4125 | drm_crtc_vblank_on(crtc); | ||
4126 | } | 4193 | } |
4127 | 4194 | ||
4128 | static void ironlake_pfit_disable(struct intel_crtc *crtc) | 4195 | static void ironlake_pfit_disable(struct intel_crtc *crtc) |
@@ -4200,7 +4267,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
4200 | 4267 | ||
4201 | mutex_lock(&dev->struct_mutex); | 4268 | mutex_lock(&dev->struct_mutex); |
4202 | intel_update_fbc(dev); | 4269 | intel_update_fbc(dev); |
4203 | intel_edp_psr_update(dev); | ||
4204 | mutex_unlock(&dev->struct_mutex); | 4270 | mutex_unlock(&dev->struct_mutex); |
4205 | } | 4271 | } |
4206 | 4272 | ||
@@ -4248,7 +4314,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
4248 | 4314 | ||
4249 | mutex_lock(&dev->struct_mutex); | 4315 | mutex_lock(&dev->struct_mutex); |
4250 | intel_update_fbc(dev); | 4316 | intel_update_fbc(dev); |
4251 | intel_edp_psr_update(dev); | ||
4252 | mutex_unlock(&dev->struct_mutex); | 4317 | mutex_unlock(&dev->struct_mutex); |
4253 | } | 4318 | } |
4254 | 4319 | ||
@@ -4633,8 +4698,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
4633 | 4698 | ||
4634 | intel_crtc_enable_planes(crtc); | 4699 | intel_crtc_enable_planes(crtc); |
4635 | 4700 | ||
4636 | drm_crtc_vblank_on(crtc); | ||
4637 | |||
4638 | /* Underruns don't raise interrupts, so check manually. */ | 4701 | /* Underruns don't raise interrupts, so check manually. */ |
4639 | i9xx_check_fifo_underruns(dev); | 4702 | i9xx_check_fifo_underruns(dev); |
4640 | } | 4703 | } |
@@ -4727,8 +4790,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
4727 | if (IS_GEN2(dev)) | 4790 | if (IS_GEN2(dev)) |
4728 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); | 4791 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); |
4729 | 4792 | ||
4730 | drm_crtc_vblank_on(crtc); | ||
4731 | |||
4732 | /* Underruns don't raise interrupts, so check manually. */ | 4793 | /* Underruns don't raise interrupts, so check manually. */ |
4733 | i9xx_check_fifo_underruns(dev); | 4794 | i9xx_check_fifo_underruns(dev); |
4734 | } | 4795 | } |
@@ -4805,7 +4866,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
4805 | 4866 | ||
4806 | mutex_lock(&dev->struct_mutex); | 4867 | mutex_lock(&dev->struct_mutex); |
4807 | intel_update_fbc(dev); | 4868 | intel_update_fbc(dev); |
4808 | intel_edp_psr_update(dev); | ||
4809 | mutex_unlock(&dev->struct_mutex); | 4869 | mutex_unlock(&dev->struct_mutex); |
4810 | } | 4870 | } |
4811 | 4871 | ||
@@ -4850,16 +4910,43 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc) | |||
4850 | { | 4910 | { |
4851 | struct drm_device *dev = crtc->dev; | 4911 | struct drm_device *dev = crtc->dev; |
4852 | struct drm_i915_private *dev_priv = dev->dev_private; | 4912 | struct drm_i915_private *dev_priv = dev->dev_private; |
4913 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4853 | struct intel_encoder *intel_encoder; | 4914 | struct intel_encoder *intel_encoder; |
4915 | enum intel_display_power_domain domain; | ||
4916 | unsigned long domains; | ||
4854 | bool enable = false; | 4917 | bool enable = false; |
4855 | 4918 | ||
4856 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) | 4919 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
4857 | enable |= intel_encoder->connectors_active; | 4920 | enable |= intel_encoder->connectors_active; |
4858 | 4921 | ||
4859 | if (enable) | 4922 | if (enable) { |
4860 | dev_priv->display.crtc_enable(crtc); | 4923 | if (!intel_crtc->active) { |
4861 | else | 4924 | /* |
4862 | dev_priv->display.crtc_disable(crtc); | 4925 | * FIXME: DDI plls and relevant code isn't converted |
4926 | * yet, so do runtime PM for DPMS only for all other | ||
4927 | * platforms for now. | ||
4928 | */ | ||
4929 | if (!HAS_DDI(dev)) { | ||
4930 | domains = get_crtc_power_domains(crtc); | ||
4931 | for_each_power_domain(domain, domains) | ||
4932 | intel_display_power_get(dev_priv, domain); | ||
4933 | intel_crtc->enabled_power_domains = domains; | ||
4934 | } | ||
4935 | |||
4936 | dev_priv->display.crtc_enable(crtc); | ||
4937 | } | ||
4938 | } else { | ||
4939 | if (intel_crtc->active) { | ||
4940 | dev_priv->display.crtc_disable(crtc); | ||
4941 | |||
4942 | if (!HAS_DDI(dev)) { | ||
4943 | domains = intel_crtc->enabled_power_domains; | ||
4944 | for_each_power_domain(domain, domains) | ||
4945 | intel_display_power_put(dev_priv, domain); | ||
4946 | intel_crtc->enabled_power_domains = 0; | ||
4947 | } | ||
4948 | } | ||
4949 | } | ||
4863 | 4950 | ||
4864 | intel_crtc_update_sarea(crtc, enable); | 4951 | intel_crtc_update_sarea(crtc, enable); |
4865 | } | 4952 | } |
@@ -4869,6 +4956,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
4869 | struct drm_device *dev = crtc->dev; | 4956 | struct drm_device *dev = crtc->dev; |
4870 | struct drm_connector *connector; | 4957 | struct drm_connector *connector; |
4871 | struct drm_i915_private *dev_priv = dev->dev_private; | 4958 | struct drm_i915_private *dev_priv = dev->dev_private; |
4959 | struct drm_i915_gem_object *old_obj; | ||
4960 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
4872 | 4961 | ||
4873 | /* crtc should still be enabled when we disable it. */ | 4962 | /* crtc should still be enabled when we disable it. */ |
4874 | WARN_ON(!crtc->enabled); | 4963 | WARN_ON(!crtc->enabled); |
@@ -4878,12 +4967,15 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
4878 | dev_priv->display.off(crtc); | 4967 | dev_priv->display.off(crtc); |
4879 | 4968 | ||
4880 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); | 4969 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); |
4881 | assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); | 4970 | assert_cursor_disabled(dev_priv, pipe); |
4882 | assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); | 4971 | assert_pipe_disabled(dev->dev_private, pipe); |
4883 | 4972 | ||
4884 | if (crtc->primary->fb) { | 4973 | if (crtc->primary->fb) { |
4974 | old_obj = to_intel_framebuffer(crtc->primary->fb)->obj; | ||
4885 | mutex_lock(&dev->struct_mutex); | 4975 | mutex_lock(&dev->struct_mutex); |
4886 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); | 4976 | intel_unpin_fb_obj(old_obj); |
4977 | i915_gem_track_fb(old_obj, NULL, | ||
4978 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
4887 | mutex_unlock(&dev->struct_mutex); | 4979 | mutex_unlock(&dev->struct_mutex); |
4888 | crtc->primary->fb = NULL; | 4980 | crtc->primary->fb = NULL; |
4889 | } | 4981 | } |
@@ -7991,8 +8083,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
7991 | struct drm_i915_private *dev_priv = dev->dev_private; | 8083 | struct drm_i915_private *dev_priv = dev->dev_private; |
7992 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7993 | int pipe = intel_crtc->pipe; | 8085 | int pipe = intel_crtc->pipe; |
7994 | int x = intel_crtc->cursor_x; | 8086 | int x = crtc->cursor_x; |
7995 | int y = intel_crtc->cursor_y; | 8087 | int y = crtc->cursor_y; |
7996 | u32 base = 0, pos = 0; | 8088 | u32 base = 0, pos = 0; |
7997 | 8089 | ||
7998 | if (on) | 8090 | if (on) |
@@ -8036,21 +8128,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
8036 | intel_crtc->cursor_base = base; | 8128 | intel_crtc->cursor_base = base; |
8037 | } | 8129 | } |
8038 | 8130 | ||
8039 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 8131 | /* |
8040 | struct drm_file *file, | 8132 | * intel_crtc_cursor_set_obj - Set cursor to specified GEM object |
8041 | uint32_t handle, | 8133 | * |
8042 | uint32_t width, uint32_t height) | 8134 | * Note that the object's reference will be consumed if the update fails. If |
8135 | * the update succeeds, the reference of the old object (if any) will be | ||
8136 | * consumed. | ||
8137 | */ | ||
8138 | static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, | ||
8139 | struct drm_i915_gem_object *obj, | ||
8140 | uint32_t width, uint32_t height) | ||
8043 | { | 8141 | { |
8044 | struct drm_device *dev = crtc->dev; | 8142 | struct drm_device *dev = crtc->dev; |
8045 | struct drm_i915_private *dev_priv = dev->dev_private; | 8143 | struct drm_i915_private *dev_priv = dev->dev_private; |
8046 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8144 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
8047 | struct drm_i915_gem_object *obj; | 8145 | enum pipe pipe = intel_crtc->pipe; |
8048 | unsigned old_width; | 8146 | unsigned old_width; |
8049 | uint32_t addr; | 8147 | uint32_t addr; |
8050 | int ret; | 8148 | int ret; |
8051 | 8149 | ||
8052 | /* if we want to turn off the cursor ignore width and height */ | 8150 | /* if we want to turn off the cursor ignore width and height */ |
8053 | if (!handle) { | 8151 | if (!obj) { |
8054 | DRM_DEBUG_KMS("cursor off\n"); | 8152 | DRM_DEBUG_KMS("cursor off\n"); |
8055 | addr = 0; | 8153 | addr = 0; |
8056 | obj = NULL; | 8154 | obj = NULL; |
@@ -8066,12 +8164,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
8066 | return -EINVAL; | 8164 | return -EINVAL; |
8067 | } | 8165 | } |
8068 | 8166 | ||
8069 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | ||
8070 | if (&obj->base == NULL) | ||
8071 | return -ENOENT; | ||
8072 | |||
8073 | if (obj->base.size < width * height * 4) { | 8167 | if (obj->base.size < width * height * 4) { |
8074 | DRM_DEBUG_KMS("buffer is to small\n"); | 8168 | DRM_DEBUG_KMS("buffer is too small\n"); |
8075 | ret = -ENOMEM; | 8169 | ret = -ENOMEM; |
8076 | goto fail; | 8170 | goto fail; |
8077 | } | 8171 | } |
@@ -8126,9 +8220,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
8126 | if (intel_crtc->cursor_bo) { | 8220 | if (intel_crtc->cursor_bo) { |
8127 | if (!INTEL_INFO(dev)->cursor_needs_physical) | 8221 | if (!INTEL_INFO(dev)->cursor_needs_physical) |
8128 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); | 8222 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); |
8129 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); | ||
8130 | } | 8223 | } |
8131 | 8224 | ||
8225 | i915_gem_track_fb(intel_crtc->cursor_bo, obj, | ||
8226 | INTEL_FRONTBUFFER_CURSOR(pipe)); | ||
8132 | mutex_unlock(&dev->struct_mutex); | 8227 | mutex_unlock(&dev->struct_mutex); |
8133 | 8228 | ||
8134 | old_width = intel_crtc->cursor_width; | 8229 | old_width = intel_crtc->cursor_width; |
@@ -8144,6 +8239,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
8144 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | 8239 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); |
8145 | } | 8240 | } |
8146 | 8241 | ||
8242 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); | ||
8243 | |||
8147 | return 0; | 8244 | return 0; |
8148 | fail_unpin: | 8245 | fail_unpin: |
8149 | i915_gem_object_unpin_from_display_plane(obj); | 8246 | i915_gem_object_unpin_from_display_plane(obj); |
@@ -8154,19 +8251,6 @@ fail: | |||
8154 | return ret; | 8251 | return ret; |
8155 | } | 8252 | } |
8156 | 8253 | ||
8157 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | ||
8158 | { | ||
8159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
8160 | |||
8161 | intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX); | ||
8162 | intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX); | ||
8163 | |||
8164 | if (intel_crtc->active) | ||
8165 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | ||
8166 | |||
8167 | return 0; | ||
8168 | } | ||
8169 | |||
8170 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 8254 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
8171 | u16 *blue, uint32_t start, uint32_t size) | 8255 | u16 *blue, uint32_t start, uint32_t size) |
8172 | { | 8256 | { |
@@ -8667,12 +8751,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
8667 | return mode; | 8751 | return mode; |
8668 | } | 8752 | } |
8669 | 8753 | ||
8670 | static void intel_increase_pllclock(struct drm_crtc *crtc) | 8754 | static void intel_increase_pllclock(struct drm_device *dev, |
8755 | enum pipe pipe) | ||
8671 | { | 8756 | { |
8672 | struct drm_device *dev = crtc->dev; | ||
8673 | struct drm_i915_private *dev_priv = dev->dev_private; | 8757 | struct drm_i915_private *dev_priv = dev->dev_private; |
8674 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
8675 | int pipe = intel_crtc->pipe; | ||
8676 | int dpll_reg = DPLL(pipe); | 8758 | int dpll_reg = DPLL(pipe); |
8677 | int dpll; | 8759 | int dpll; |
8678 | 8760 | ||
@@ -8773,28 +8855,179 @@ out: | |||
8773 | intel_runtime_pm_put(dev_priv); | 8855 | intel_runtime_pm_put(dev_priv); |
8774 | } | 8856 | } |
8775 | 8857 | ||
8776 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, | 8858 | |
8777 | struct intel_engine_cs *ring) | 8859 | /** |
8860 | * intel_mark_fb_busy - mark given planes as busy | ||
8861 | * @dev: DRM device | ||
8862 | * @frontbuffer_bits: bits for the affected planes | ||
8863 | * @ring: optional ring for asynchronous commands | ||
8864 | * | ||
8865 | * This function gets called every time the screen contents change. It can be | ||
8866 | * used to keep e.g. the update rate at the nominal refresh rate with DRRS. | ||
8867 | */ | ||
8868 | static void intel_mark_fb_busy(struct drm_device *dev, | ||
8869 | unsigned frontbuffer_bits, | ||
8870 | struct intel_engine_cs *ring) | ||
8778 | { | 8871 | { |
8779 | struct drm_device *dev = obj->base.dev; | 8872 | enum pipe pipe; |
8780 | struct drm_crtc *crtc; | ||
8781 | 8873 | ||
8782 | if (!i915.powersave) | 8874 | if (!i915.powersave) |
8783 | return; | 8875 | return; |
8784 | 8876 | ||
8785 | for_each_crtc(dev, crtc) { | 8877 | for_each_pipe(pipe) { |
8786 | if (!crtc->primary->fb) | 8878 | if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) |
8787 | continue; | ||
8788 | |||
8789 | if (to_intel_framebuffer(crtc->primary->fb)->obj != obj) | ||
8790 | continue; | 8879 | continue; |
8791 | 8880 | ||
8792 | intel_increase_pllclock(crtc); | 8881 | intel_increase_pllclock(dev, pipe); |
8793 | if (ring && intel_fbc_enabled(dev)) | 8882 | if (ring && intel_fbc_enabled(dev)) |
8794 | ring->fbc_dirty = true; | 8883 | ring->fbc_dirty = true; |
8795 | } | 8884 | } |
8796 | } | 8885 | } |
8797 | 8886 | ||
8887 | /** | ||
8888 | * intel_fb_obj_invalidate - invalidate frontbuffer object | ||
8889 | * @obj: GEM object to invalidate | ||
8890 | * @ring: set for asynchronous rendering | ||
8891 | * | ||
8892 | * This function gets called every time rendering on the given object starts and | ||
8893 | * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must | ||
8894 | * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed | ||
8895 | * until the rendering completes or a flip on this frontbuffer plane is | ||
8896 | * scheduled. | ||
8897 | */ | ||
8898 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | ||
8899 | struct intel_engine_cs *ring) | ||
8900 | { | ||
8901 | struct drm_device *dev = obj->base.dev; | ||
8902 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
8903 | |||
8904 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
8905 | |||
8906 | if (!obj->frontbuffer_bits) | ||
8907 | return; | ||
8908 | |||
8909 | if (ring) { | ||
8910 | mutex_lock(&dev_priv->fb_tracking.lock); | ||
8911 | dev_priv->fb_tracking.busy_bits | ||
8912 | |= obj->frontbuffer_bits; | ||
8913 | dev_priv->fb_tracking.flip_bits | ||
8914 | &= ~obj->frontbuffer_bits; | ||
8915 | mutex_unlock(&dev_priv->fb_tracking.lock); | ||
8916 | } | ||
8917 | |||
8918 | intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); | ||
8919 | |||
8920 | intel_edp_psr_exit(dev); | ||
8921 | } | ||
8922 | |||
8923 | /** | ||
8924 | * intel_frontbuffer_flush - flush frontbuffer | ||
8925 | * @dev: DRM device | ||
8926 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
8927 | * | ||
8928 | * This function gets called every time rendering on the given planes has | ||
8929 | * completed and frontbuffer caching can be started again. Flushes will get | ||
8930 | * delayed if they're blocked by some oustanding asynchronous rendering. | ||
8931 | * | ||
8932 | * Can be called without any locks held. | ||
8933 | */ | ||
8934 | void intel_frontbuffer_flush(struct drm_device *dev, | ||
8935 | unsigned frontbuffer_bits) | ||
8936 | { | ||
8937 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
8938 | |||
8939 | /* Delay flushing when rings are still busy.*/ | ||
8940 | mutex_lock(&dev_priv->fb_tracking.lock); | ||
8941 | frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; | ||
8942 | mutex_unlock(&dev_priv->fb_tracking.lock); | ||
8943 | |||
8944 | intel_mark_fb_busy(dev, frontbuffer_bits, NULL); | ||
8945 | |||
8946 | intel_edp_psr_exit(dev); | ||
8947 | } | ||
8948 | |||
8949 | /** | ||
8950 | * intel_fb_obj_flush - flush frontbuffer object | ||
8951 | * @obj: GEM object to flush | ||
8952 | * @retire: set when retiring asynchronous rendering | ||
8953 | * | ||
8954 | * This function gets called every time rendering on the given object has | ||
8955 | * completed and frontbuffer caching can be started again. If @retire is true | ||
8956 | * then any delayed flushes will be unblocked. | ||
8957 | */ | ||
8958 | void intel_fb_obj_flush(struct drm_i915_gem_object *obj, | ||
8959 | bool retire) | ||
8960 | { | ||
8961 | struct drm_device *dev = obj->base.dev; | ||
8962 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
8963 | unsigned frontbuffer_bits; | ||
8964 | |||
8965 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
8966 | |||
8967 | if (!obj->frontbuffer_bits) | ||
8968 | return; | ||
8969 | |||
8970 | frontbuffer_bits = obj->frontbuffer_bits; | ||
8971 | |||
8972 | if (retire) { | ||
8973 | mutex_lock(&dev_priv->fb_tracking.lock); | ||
8974 | /* Filter out new bits since rendering started. */ | ||
8975 | frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; | ||
8976 | |||
8977 | dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; | ||
8978 | mutex_unlock(&dev_priv->fb_tracking.lock); | ||
8979 | } | ||
8980 | |||
8981 | intel_frontbuffer_flush(dev, frontbuffer_bits); | ||
8982 | } | ||
8983 | |||
8984 | /** | ||
8985 | * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip | ||
8986 | * @dev: DRM device | ||
8987 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
8988 | * | ||
8989 | * This function gets called after scheduling a flip on @obj. The actual | ||
8990 | * frontbuffer flushing will be delayed until completion is signalled with | ||
8991 | * intel_frontbuffer_flip_complete. If an invalidate happens in between this | ||
8992 | * flush will be cancelled. | ||
8993 | * | ||
8994 | * Can be called without any locks held. | ||
8995 | */ | ||
8996 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, | ||
8997 | unsigned frontbuffer_bits) | ||
8998 | { | ||
8999 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9000 | |||
9001 | mutex_lock(&dev_priv->fb_tracking.lock); | ||
9002 | dev_priv->fb_tracking.flip_bits | ||
9003 | |= frontbuffer_bits; | ||
9004 | mutex_unlock(&dev_priv->fb_tracking.lock); | ||
9005 | } | ||
9006 | |||
9007 | /** | ||
9008 | * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush | ||
9009 | * @dev: DRM device | ||
9010 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
9011 | * | ||
9012 | * This function gets called after the flip has been latched and will complete | ||
9013 | * on the next vblank. It will execute the fush if it hasn't been cancalled yet. | ||
9014 | * | ||
9015 | * Can be called without any locks held. | ||
9016 | */ | ||
9017 | void intel_frontbuffer_flip_complete(struct drm_device *dev, | ||
9018 | unsigned frontbuffer_bits) | ||
9019 | { | ||
9020 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9021 | |||
9022 | mutex_lock(&dev_priv->fb_tracking.lock); | ||
9023 | /* Mask any cancelled flips. */ | ||
9024 | frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; | ||
9025 | dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; | ||
9026 | mutex_unlock(&dev_priv->fb_tracking.lock); | ||
9027 | |||
9028 | intel_frontbuffer_flush(dev, frontbuffer_bits); | ||
9029 | } | ||
9030 | |||
8798 | static void intel_crtc_destroy(struct drm_crtc *crtc) | 9031 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
8799 | { | 9032 | { |
8800 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9033 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -8812,8 +9045,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
8812 | kfree(work); | 9045 | kfree(work); |
8813 | } | 9046 | } |
8814 | 9047 | ||
8815 | intel_crtc_cursor_set(crtc, NULL, 0, 0, 0); | ||
8816 | |||
8817 | drm_crtc_cleanup(crtc); | 9048 | drm_crtc_cleanup(crtc); |
8818 | 9049 | ||
8819 | kfree(intel_crtc); | 9050 | kfree(intel_crtc); |
@@ -8824,6 +9055,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
8824 | struct intel_unpin_work *work = | 9055 | struct intel_unpin_work *work = |
8825 | container_of(__work, struct intel_unpin_work, work); | 9056 | container_of(__work, struct intel_unpin_work, work); |
8826 | struct drm_device *dev = work->crtc->dev; | 9057 | struct drm_device *dev = work->crtc->dev; |
9058 | enum pipe pipe = to_intel_crtc(work->crtc)->pipe; | ||
8827 | 9059 | ||
8828 | mutex_lock(&dev->struct_mutex); | 9060 | mutex_lock(&dev->struct_mutex); |
8829 | intel_unpin_fb_obj(work->old_fb_obj); | 9061 | intel_unpin_fb_obj(work->old_fb_obj); |
@@ -8833,6 +9065,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
8833 | intel_update_fbc(dev); | 9065 | intel_update_fbc(dev); |
8834 | mutex_unlock(&dev->struct_mutex); | 9066 | mutex_unlock(&dev->struct_mutex); |
8835 | 9067 | ||
9068 | intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
9069 | |||
8836 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); | 9070 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); |
8837 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); | 9071 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); |
8838 | 9072 | ||
@@ -9202,6 +9436,147 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
9202 | return 0; | 9436 | return 0; |
9203 | } | 9437 | } |
9204 | 9438 | ||
9439 | static bool use_mmio_flip(struct intel_engine_cs *ring, | ||
9440 | struct drm_i915_gem_object *obj) | ||
9441 | { | ||
9442 | /* | ||
9443 | * This is not being used for older platforms, because | ||
9444 | * non-availability of flip done interrupt forces us to use | ||
9445 | * CS flips. Older platforms derive flip done using some clever | ||
9446 | * tricks involving the flip_pending status bits and vblank irqs. | ||
9447 | * So using MMIO flips there would disrupt this mechanism. | ||
9448 | */ | ||
9449 | |||
9450 | if (INTEL_INFO(ring->dev)->gen < 5) | ||
9451 | return false; | ||
9452 | |||
9453 | if (i915.use_mmio_flip < 0) | ||
9454 | return false; | ||
9455 | else if (i915.use_mmio_flip > 0) | ||
9456 | return true; | ||
9457 | else | ||
9458 | return ring != obj->ring; | ||
9459 | } | ||
9460 | |||
9461 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | ||
9462 | { | ||
9463 | struct drm_device *dev = intel_crtc->base.dev; | ||
9464 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9465 | struct intel_framebuffer *intel_fb = | ||
9466 | to_intel_framebuffer(intel_crtc->base.primary->fb); | ||
9467 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
9468 | u32 dspcntr; | ||
9469 | u32 reg; | ||
9470 | |||
9471 | intel_mark_page_flip_active(intel_crtc); | ||
9472 | |||
9473 | reg = DSPCNTR(intel_crtc->plane); | ||
9474 | dspcntr = I915_READ(reg); | ||
9475 | |||
9476 | if (INTEL_INFO(dev)->gen >= 4) { | ||
9477 | if (obj->tiling_mode != I915_TILING_NONE) | ||
9478 | dspcntr |= DISPPLANE_TILED; | ||
9479 | else | ||
9480 | dspcntr &= ~DISPPLANE_TILED; | ||
9481 | } | ||
9482 | I915_WRITE(reg, dspcntr); | ||
9483 | |||
9484 | I915_WRITE(DSPSURF(intel_crtc->plane), | ||
9485 | intel_crtc->unpin_work->gtt_offset); | ||
9486 | POSTING_READ(DSPSURF(intel_crtc->plane)); | ||
9487 | } | ||
9488 | |||
9489 | static int intel_postpone_flip(struct drm_i915_gem_object *obj) | ||
9490 | { | ||
9491 | struct intel_engine_cs *ring; | ||
9492 | int ret; | ||
9493 | |||
9494 | lockdep_assert_held(&obj->base.dev->struct_mutex); | ||
9495 | |||
9496 | if (!obj->last_write_seqno) | ||
9497 | return 0; | ||
9498 | |||
9499 | ring = obj->ring; | ||
9500 | |||
9501 | if (i915_seqno_passed(ring->get_seqno(ring, true), | ||
9502 | obj->last_write_seqno)) | ||
9503 | return 0; | ||
9504 | |||
9505 | ret = i915_gem_check_olr(ring, obj->last_write_seqno); | ||
9506 | if (ret) | ||
9507 | return ret; | ||
9508 | |||
9509 | if (WARN_ON(!ring->irq_get(ring))) | ||
9510 | return 0; | ||
9511 | |||
9512 | return 1; | ||
9513 | } | ||
9514 | |||
9515 | void intel_notify_mmio_flip(struct intel_engine_cs *ring) | ||
9516 | { | ||
9517 | struct drm_i915_private *dev_priv = to_i915(ring->dev); | ||
9518 | struct intel_crtc *intel_crtc; | ||
9519 | unsigned long irq_flags; | ||
9520 | u32 seqno; | ||
9521 | |||
9522 | seqno = ring->get_seqno(ring, false); | ||
9523 | |||
9524 | spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); | ||
9525 | for_each_intel_crtc(ring->dev, intel_crtc) { | ||
9526 | struct intel_mmio_flip *mmio_flip; | ||
9527 | |||
9528 | mmio_flip = &intel_crtc->mmio_flip; | ||
9529 | if (mmio_flip->seqno == 0) | ||
9530 | continue; | ||
9531 | |||
9532 | if (ring->id != mmio_flip->ring_id) | ||
9533 | continue; | ||
9534 | |||
9535 | if (i915_seqno_passed(seqno, mmio_flip->seqno)) { | ||
9536 | intel_do_mmio_flip(intel_crtc); | ||
9537 | mmio_flip->seqno = 0; | ||
9538 | ring->irq_put(ring); | ||
9539 | } | ||
9540 | } | ||
9541 | spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); | ||
9542 | } | ||
9543 | |||
9544 | static int intel_queue_mmio_flip(struct drm_device *dev, | ||
9545 | struct drm_crtc *crtc, | ||
9546 | struct drm_framebuffer *fb, | ||
9547 | struct drm_i915_gem_object *obj, | ||
9548 | struct intel_engine_cs *ring, | ||
9549 | uint32_t flags) | ||
9550 | { | ||
9551 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9552 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
9553 | unsigned long irq_flags; | ||
9554 | int ret; | ||
9555 | |||
9556 | if (WARN_ON(intel_crtc->mmio_flip.seqno)) | ||
9557 | return -EBUSY; | ||
9558 | |||
9559 | ret = intel_postpone_flip(obj); | ||
9560 | if (ret < 0) | ||
9561 | return ret; | ||
9562 | if (ret == 0) { | ||
9563 | intel_do_mmio_flip(intel_crtc); | ||
9564 | return 0; | ||
9565 | } | ||
9566 | |||
9567 | spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); | ||
9568 | intel_crtc->mmio_flip.seqno = obj->last_write_seqno; | ||
9569 | intel_crtc->mmio_flip.ring_id = obj->ring->id; | ||
9570 | spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); | ||
9571 | |||
9572 | /* | ||
9573 | * Double check to catch cases where irq fired before | ||
9574 | * mmio flip data was ready | ||
9575 | */ | ||
9576 | intel_notify_mmio_flip(obj->ring); | ||
9577 | return 0; | ||
9578 | } | ||
9579 | |||
9205 | static int intel_default_queue_flip(struct drm_device *dev, | 9580 | static int intel_default_queue_flip(struct drm_device *dev, |
9206 | struct drm_crtc *crtc, | 9581 | struct drm_crtc *crtc, |
9207 | struct drm_framebuffer *fb, | 9582 | struct drm_framebuffer *fb, |
@@ -9222,6 +9597,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9222 | struct drm_framebuffer *old_fb = crtc->primary->fb; | 9597 | struct drm_framebuffer *old_fb = crtc->primary->fb; |
9223 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; | 9598 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; |
9224 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9599 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9600 | enum pipe pipe = intel_crtc->pipe; | ||
9225 | struct intel_unpin_work *work; | 9601 | struct intel_unpin_work *work; |
9226 | struct intel_engine_cs *ring; | 9602 | struct intel_engine_cs *ring; |
9227 | unsigned long flags; | 9603 | unsigned long flags; |
@@ -9290,7 +9666,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9290 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 9666 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
9291 | 9667 | ||
9292 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | 9668 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) |
9293 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1; | 9669 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; |
9294 | 9670 | ||
9295 | if (IS_VALLEYVIEW(dev)) { | 9671 | if (IS_VALLEYVIEW(dev)) { |
9296 | ring = &dev_priv->ring[BCS]; | 9672 | ring = &dev_priv->ring[BCS]; |
@@ -9309,12 +9685,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9309 | work->gtt_offset = | 9685 | work->gtt_offset = |
9310 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; | 9686 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; |
9311 | 9687 | ||
9312 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags); | 9688 | if (use_mmio_flip(ring, obj)) |
9689 | ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, | ||
9690 | page_flip_flags); | ||
9691 | else | ||
9692 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, | ||
9693 | page_flip_flags); | ||
9313 | if (ret) | 9694 | if (ret) |
9314 | goto cleanup_unpin; | 9695 | goto cleanup_unpin; |
9315 | 9696 | ||
9697 | i915_gem_track_fb(work->old_fb_obj, obj, | ||
9698 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
9699 | |||
9316 | intel_disable_fbc(dev); | 9700 | intel_disable_fbc(dev); |
9317 | intel_mark_fb_busy(obj, NULL); | 9701 | intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); |
9318 | mutex_unlock(&dev->struct_mutex); | 9702 | mutex_unlock(&dev->struct_mutex); |
9319 | 9703 | ||
9320 | trace_i915_flip_request(intel_crtc->plane, obj); | 9704 | trace_i915_flip_request(intel_crtc->plane, obj); |
@@ -9344,7 +9728,7 @@ out_hang: | |||
9344 | intel_crtc_wait_for_pending_flips(crtc); | 9728 | intel_crtc_wait_for_pending_flips(crtc); |
9345 | ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); | 9729 | ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); |
9346 | if (ret == 0 && event) | 9730 | if (ret == 0 && event) |
9347 | drm_send_vblank_event(dev, intel_crtc->pipe, event); | 9731 | drm_send_vblank_event(dev, pipe, event); |
9348 | } | 9732 | } |
9349 | return ret; | 9733 | return ret; |
9350 | } | 9734 | } |
@@ -10379,10 +10763,13 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
10379 | */ | 10763 | */ |
10380 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { | 10764 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { |
10381 | struct drm_framebuffer *old_fb; | 10765 | struct drm_framebuffer *old_fb; |
10766 | struct drm_i915_gem_object *old_obj = NULL; | ||
10767 | struct drm_i915_gem_object *obj = | ||
10768 | to_intel_framebuffer(fb)->obj; | ||
10382 | 10769 | ||
10383 | mutex_lock(&dev->struct_mutex); | 10770 | mutex_lock(&dev->struct_mutex); |
10384 | ret = intel_pin_and_fence_fb_obj(dev, | 10771 | ret = intel_pin_and_fence_fb_obj(dev, |
10385 | to_intel_framebuffer(fb)->obj, | 10772 | obj, |
10386 | NULL); | 10773 | NULL); |
10387 | if (ret != 0) { | 10774 | if (ret != 0) { |
10388 | DRM_ERROR("pin & fence failed\n"); | 10775 | DRM_ERROR("pin & fence failed\n"); |
@@ -10390,8 +10777,12 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
10390 | goto done; | 10777 | goto done; |
10391 | } | 10778 | } |
10392 | old_fb = crtc->primary->fb; | 10779 | old_fb = crtc->primary->fb; |
10393 | if (old_fb) | 10780 | if (old_fb) { |
10394 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); | 10781 | old_obj = to_intel_framebuffer(old_fb)->obj; |
10782 | intel_unpin_fb_obj(old_obj); | ||
10783 | } | ||
10784 | i915_gem_track_fb(old_obj, obj, | ||
10785 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); | ||
10395 | mutex_unlock(&dev->struct_mutex); | 10786 | mutex_unlock(&dev->struct_mutex); |
10396 | 10787 | ||
10397 | crtc->primary->fb = fb; | 10788 | crtc->primary->fb = fb; |
@@ -10563,12 +10954,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, | |||
10563 | if (is_crtc_connector_off(set)) { | 10954 | if (is_crtc_connector_off(set)) { |
10564 | config->mode_changed = true; | 10955 | config->mode_changed = true; |
10565 | } else if (set->crtc->primary->fb != set->fb) { | 10956 | } else if (set->crtc->primary->fb != set->fb) { |
10566 | /* If we have no fb then treat it as a full mode set */ | 10957 | /* |
10958 | * If we have no fb, we can only flip as long as the crtc is | ||
10959 | * active, otherwise we need a full mode set. The crtc may | ||
10960 | * be active if we've only disabled the primary plane, or | ||
10961 | * in fastboot situations. | ||
10962 | */ | ||
10567 | if (set->crtc->primary->fb == NULL) { | 10963 | if (set->crtc->primary->fb == NULL) { |
10568 | struct intel_crtc *intel_crtc = | 10964 | struct intel_crtc *intel_crtc = |
10569 | to_intel_crtc(set->crtc); | 10965 | to_intel_crtc(set->crtc); |
10570 | 10966 | ||
10571 | if (intel_crtc->active && i915.fastboot) { | 10967 | if (intel_crtc->active) { |
10572 | DRM_DEBUG_KMS("crtc has no fb, will flip\n"); | 10968 | DRM_DEBUG_KMS("crtc has no fb, will flip\n"); |
10573 | config->fb_changed = true; | 10969 | config->fb_changed = true; |
10574 | } else { | 10970 | } else { |
@@ -10806,10 +11202,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set) | |||
10806 | ret = intel_set_mode(set->crtc, set->mode, | 11202 | ret = intel_set_mode(set->crtc, set->mode, |
10807 | set->x, set->y, set->fb); | 11203 | set->x, set->y, set->fb); |
10808 | } else if (config->fb_changed) { | 11204 | } else if (config->fb_changed) { |
11205 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
11206 | struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); | ||
11207 | |||
10809 | intel_crtc_wait_for_pending_flips(set->crtc); | 11208 | intel_crtc_wait_for_pending_flips(set->crtc); |
10810 | 11209 | ||
10811 | ret = intel_pipe_set_base(set->crtc, | 11210 | ret = intel_pipe_set_base(set->crtc, |
10812 | set->x, set->y, set->fb); | 11211 | set->x, set->y, set->fb); |
11212 | |||
11213 | /* | ||
11214 | * We need to make sure the primary plane is re-enabled if it | ||
11215 | * has previously been turned off. | ||
11216 | */ | ||
11217 | if (!intel_crtc->primary_enabled && ret == 0) { | ||
11218 | WARN_ON(!intel_crtc->active); | ||
11219 | intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, | ||
11220 | intel_crtc->pipe); | ||
11221 | } | ||
11222 | |||
10813 | /* | 11223 | /* |
10814 | * In the fastboot case this may be our only check of the | 11224 | * In the fastboot case this may be our only check of the |
10815 | * state after boot. It would be better to only do it on | 11225 | * state after boot. It would be better to only do it on |
@@ -10850,8 +11260,6 @@ out_config: | |||
10850 | } | 11260 | } |
10851 | 11261 | ||
10852 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 11262 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
10853 | .cursor_set = intel_crtc_cursor_set, | ||
10854 | .cursor_move = intel_crtc_cursor_move, | ||
10855 | .gamma_set = intel_crtc_gamma_set, | 11263 | .gamma_set = intel_crtc_gamma_set, |
10856 | .set_config = intel_crtc_set_config, | 11264 | .set_config = intel_crtc_set_config, |
10857 | .destroy = intel_crtc_destroy, | 11265 | .destroy = intel_crtc_destroy, |
@@ -10959,17 +11367,318 @@ static void intel_shared_dpll_init(struct drm_device *dev) | |||
10959 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); | 11367 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); |
10960 | } | 11368 | } |
10961 | 11369 | ||
11370 | static int | ||
11371 | intel_primary_plane_disable(struct drm_plane *plane) | ||
11372 | { | ||
11373 | struct drm_device *dev = plane->dev; | ||
11374 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
11375 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
11376 | struct intel_crtc *intel_crtc; | ||
11377 | |||
11378 | if (!plane->fb) | ||
11379 | return 0; | ||
11380 | |||
11381 | BUG_ON(!plane->crtc); | ||
11382 | |||
11383 | intel_crtc = to_intel_crtc(plane->crtc); | ||
11384 | |||
11385 | /* | ||
11386 | * Even though we checked plane->fb above, it's still possible that | ||
11387 | * the primary plane has been implicitly disabled because the crtc | ||
11388 | * coordinates given weren't visible, or because we detected | ||
11389 | * that it was 100% covered by a sprite plane. Or, the CRTC may be | ||
11390 | * off and we've set a fb, but haven't actually turned on the CRTC yet. | ||
11391 | * In either case, we need to unpin the FB and let the fb pointer get | ||
11392 | * updated, but otherwise we don't need to touch the hardware. | ||
11393 | */ | ||
11394 | if (!intel_crtc->primary_enabled) | ||
11395 | goto disable_unpin; | ||
11396 | |||
11397 | intel_crtc_wait_for_pending_flips(plane->crtc); | ||
11398 | intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, | ||
11399 | intel_plane->pipe); | ||
11400 | disable_unpin: | ||
11401 | i915_gem_track_fb(to_intel_framebuffer(plane->fb)->obj, NULL, | ||
11402 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); | ||
11403 | intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj); | ||
11404 | plane->fb = NULL; | ||
11405 | |||
11406 | return 0; | ||
11407 | } | ||
11408 | |||
11409 | static int | ||
11410 | intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, | ||
11411 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
11412 | unsigned int crtc_w, unsigned int crtc_h, | ||
11413 | uint32_t src_x, uint32_t src_y, | ||
11414 | uint32_t src_w, uint32_t src_h) | ||
11415 | { | ||
11416 | struct drm_device *dev = crtc->dev; | ||
11417 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
11418 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
11419 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
11420 | struct drm_i915_gem_object *obj, *old_obj = NULL; | ||
11421 | struct drm_rect dest = { | ||
11422 | /* integer pixels */ | ||
11423 | .x1 = crtc_x, | ||
11424 | .y1 = crtc_y, | ||
11425 | .x2 = crtc_x + crtc_w, | ||
11426 | .y2 = crtc_y + crtc_h, | ||
11427 | }; | ||
11428 | struct drm_rect src = { | ||
11429 | /* 16.16 fixed point */ | ||
11430 | .x1 = src_x, | ||
11431 | .y1 = src_y, | ||
11432 | .x2 = src_x + src_w, | ||
11433 | .y2 = src_y + src_h, | ||
11434 | }; | ||
11435 | const struct drm_rect clip = { | ||
11436 | /* integer pixels */ | ||
11437 | .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, | ||
11438 | .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, | ||
11439 | }; | ||
11440 | bool visible; | ||
11441 | int ret; | ||
11442 | |||
11443 | ret = drm_plane_helper_check_update(plane, crtc, fb, | ||
11444 | &src, &dest, &clip, | ||
11445 | DRM_PLANE_HELPER_NO_SCALING, | ||
11446 | DRM_PLANE_HELPER_NO_SCALING, | ||
11447 | false, true, &visible); | ||
11448 | |||
11449 | if (ret) | ||
11450 | return ret; | ||
11451 | |||
11452 | if (plane->fb) | ||
11453 | old_obj = to_intel_framebuffer(plane->fb)->obj; | ||
11454 | obj = to_intel_framebuffer(fb)->obj; | ||
11455 | |||
11456 | /* | ||
11457 | * If the CRTC isn't enabled, we're just pinning the framebuffer, | ||
11458 | * updating the fb pointer, and returning without touching the | ||
11459 | * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to | ||
11460 | * turn on the display with all planes setup as desired. | ||
11461 | */ | ||
11462 | if (!crtc->enabled) { | ||
11463 | /* | ||
11464 | * If we already called setplane while the crtc was disabled, | ||
11465 | * we may have an fb pinned; unpin it. | ||
11466 | */ | ||
11467 | if (plane->fb) | ||
11468 | intel_unpin_fb_obj(old_obj); | ||
11469 | |||
11470 | i915_gem_track_fb(old_obj, obj, | ||
11471 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); | ||
11472 | |||
11473 | /* Pin and return without programming hardware */ | ||
11474 | return intel_pin_and_fence_fb_obj(dev, obj, NULL); | ||
11475 | } | ||
11476 | |||
11477 | intel_crtc_wait_for_pending_flips(crtc); | ||
11478 | |||
11479 | /* | ||
11480 | * If clipping results in a non-visible primary plane, we'll disable | ||
11481 | * the primary plane. Note that this is a bit different than what | ||
11482 | * happens if userspace explicitly disables the plane by passing fb=0 | ||
11483 | * because plane->fb still gets set and pinned. | ||
11484 | */ | ||
11485 | if (!visible) { | ||
11486 | /* | ||
11487 | * Try to pin the new fb first so that we can bail out if we | ||
11488 | * fail. | ||
11489 | */ | ||
11490 | if (plane->fb != fb) { | ||
11491 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); | ||
11492 | if (ret) | ||
11493 | return ret; | ||
11494 | } | ||
11495 | |||
11496 | i915_gem_track_fb(old_obj, obj, | ||
11497 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); | ||
11498 | |||
11499 | if (intel_crtc->primary_enabled) | ||
11500 | intel_disable_primary_hw_plane(dev_priv, | ||
11501 | intel_plane->plane, | ||
11502 | intel_plane->pipe); | ||
11503 | |||
11504 | |||
11505 | if (plane->fb != fb) | ||
11506 | if (plane->fb) | ||
11507 | intel_unpin_fb_obj(old_obj); | ||
11508 | |||
11509 | return 0; | ||
11510 | } | ||
11511 | |||
11512 | ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); | ||
11513 | if (ret) | ||
11514 | return ret; | ||
11515 | |||
11516 | if (!intel_crtc->primary_enabled) | ||
11517 | intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, | ||
11518 | intel_crtc->pipe); | ||
11519 | |||
11520 | return 0; | ||
11521 | } | ||
11522 | |||
11523 | /* Common destruction function for both primary and cursor planes */ | ||
11524 | static void intel_plane_destroy(struct drm_plane *plane) | ||
11525 | { | ||
11526 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
11527 | drm_plane_cleanup(plane); | ||
11528 | kfree(intel_plane); | ||
11529 | } | ||
11530 | |||
11531 | static const struct drm_plane_funcs intel_primary_plane_funcs = { | ||
11532 | .update_plane = intel_primary_plane_setplane, | ||
11533 | .disable_plane = intel_primary_plane_disable, | ||
11534 | .destroy = intel_plane_destroy, | ||
11535 | }; | ||
11536 | |||
11537 | static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, | ||
11538 | int pipe) | ||
11539 | { | ||
11540 | struct intel_plane *primary; | ||
11541 | const uint32_t *intel_primary_formats; | ||
11542 | int num_formats; | ||
11543 | |||
11544 | primary = kzalloc(sizeof(*primary), GFP_KERNEL); | ||
11545 | if (primary == NULL) | ||
11546 | return NULL; | ||
11547 | |||
11548 | primary->can_scale = false; | ||
11549 | primary->max_downscale = 1; | ||
11550 | primary->pipe = pipe; | ||
11551 | primary->plane = pipe; | ||
11552 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) | ||
11553 | primary->plane = !pipe; | ||
11554 | |||
11555 | if (INTEL_INFO(dev)->gen <= 3) { | ||
11556 | intel_primary_formats = intel_primary_formats_gen2; | ||
11557 | num_formats = ARRAY_SIZE(intel_primary_formats_gen2); | ||
11558 | } else { | ||
11559 | intel_primary_formats = intel_primary_formats_gen4; | ||
11560 | num_formats = ARRAY_SIZE(intel_primary_formats_gen4); | ||
11561 | } | ||
11562 | |||
11563 | drm_universal_plane_init(dev, &primary->base, 0, | ||
11564 | &intel_primary_plane_funcs, | ||
11565 | intel_primary_formats, num_formats, | ||
11566 | DRM_PLANE_TYPE_PRIMARY); | ||
11567 | return &primary->base; | ||
11568 | } | ||
11569 | |||
11570 | static int | ||
11571 | intel_cursor_plane_disable(struct drm_plane *plane) | ||
11572 | { | ||
11573 | if (!plane->fb) | ||
11574 | return 0; | ||
11575 | |||
11576 | BUG_ON(!plane->crtc); | ||
11577 | |||
11578 | return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0); | ||
11579 | } | ||
11580 | |||
11581 | static int | ||
11582 | intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, | ||
11583 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
11584 | unsigned int crtc_w, unsigned int crtc_h, | ||
11585 | uint32_t src_x, uint32_t src_y, | ||
11586 | uint32_t src_w, uint32_t src_h) | ||
11587 | { | ||
11588 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
11589 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
11590 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
11591 | struct drm_rect dest = { | ||
11592 | /* integer pixels */ | ||
11593 | .x1 = crtc_x, | ||
11594 | .y1 = crtc_y, | ||
11595 | .x2 = crtc_x + crtc_w, | ||
11596 | .y2 = crtc_y + crtc_h, | ||
11597 | }; | ||
11598 | struct drm_rect src = { | ||
11599 | /* 16.16 fixed point */ | ||
11600 | .x1 = src_x, | ||
11601 | .y1 = src_y, | ||
11602 | .x2 = src_x + src_w, | ||
11603 | .y2 = src_y + src_h, | ||
11604 | }; | ||
11605 | const struct drm_rect clip = { | ||
11606 | /* integer pixels */ | ||
11607 | .x2 = intel_crtc->config.pipe_src_w, | ||
11608 | .y2 = intel_crtc->config.pipe_src_h, | ||
11609 | }; | ||
11610 | bool visible; | ||
11611 | int ret; | ||
11612 | |||
11613 | ret = drm_plane_helper_check_update(plane, crtc, fb, | ||
11614 | &src, &dest, &clip, | ||
11615 | DRM_PLANE_HELPER_NO_SCALING, | ||
11616 | DRM_PLANE_HELPER_NO_SCALING, | ||
11617 | true, true, &visible); | ||
11618 | if (ret) | ||
11619 | return ret; | ||
11620 | |||
11621 | crtc->cursor_x = crtc_x; | ||
11622 | crtc->cursor_y = crtc_y; | ||
11623 | if (fb != crtc->cursor->fb) { | ||
11624 | return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); | ||
11625 | } else { | ||
11626 | intel_crtc_update_cursor(crtc, visible); | ||
11627 | return 0; | ||
11628 | } | ||
11629 | } | ||
11630 | static const struct drm_plane_funcs intel_cursor_plane_funcs = { | ||
11631 | .update_plane = intel_cursor_plane_update, | ||
11632 | .disable_plane = intel_cursor_plane_disable, | ||
11633 | .destroy = intel_plane_destroy, | ||
11634 | }; | ||
11635 | |||
11636 | static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, | ||
11637 | int pipe) | ||
11638 | { | ||
11639 | struct intel_plane *cursor; | ||
11640 | |||
11641 | cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); | ||
11642 | if (cursor == NULL) | ||
11643 | return NULL; | ||
11644 | |||
11645 | cursor->can_scale = false; | ||
11646 | cursor->max_downscale = 1; | ||
11647 | cursor->pipe = pipe; | ||
11648 | cursor->plane = pipe; | ||
11649 | |||
11650 | drm_universal_plane_init(dev, &cursor->base, 0, | ||
11651 | &intel_cursor_plane_funcs, | ||
11652 | intel_cursor_formats, | ||
11653 | ARRAY_SIZE(intel_cursor_formats), | ||
11654 | DRM_PLANE_TYPE_CURSOR); | ||
11655 | return &cursor->base; | ||
11656 | } | ||
11657 | |||
10962 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 11658 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
10963 | { | 11659 | { |
10964 | struct drm_i915_private *dev_priv = dev->dev_private; | 11660 | struct drm_i915_private *dev_priv = dev->dev_private; |
10965 | struct intel_crtc *intel_crtc; | 11661 | struct intel_crtc *intel_crtc; |
10966 | int i; | 11662 | struct drm_plane *primary = NULL; |
11663 | struct drm_plane *cursor = NULL; | ||
11664 | int i, ret; | ||
10967 | 11665 | ||
10968 | intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); | 11666 | intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); |
10969 | if (intel_crtc == NULL) | 11667 | if (intel_crtc == NULL) |
10970 | return; | 11668 | return; |
10971 | 11669 | ||
10972 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | 11670 | primary = intel_primary_plane_create(dev, pipe); |
11671 | if (!primary) | ||
11672 | goto fail; | ||
11673 | |||
11674 | cursor = intel_cursor_plane_create(dev, pipe); | ||
11675 | if (!cursor) | ||
11676 | goto fail; | ||
11677 | |||
11678 | ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, | ||
11679 | cursor, &intel_crtc_funcs); | ||
11680 | if (ret) | ||
11681 | goto fail; | ||
10973 | 11682 | ||
10974 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | 11683 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
10975 | for (i = 0; i < 256; i++) { | 11684 | for (i = 0; i < 256; i++) { |
@@ -10980,7 +11689,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
10980 | 11689 | ||
10981 | /* | 11690 | /* |
10982 | * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port | 11691 | * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port |
10983 | * is hooked to plane B. Hence we want plane A feeding pipe B. | 11692 | * is hooked to pipe B. Hence we want plane A feeding pipe B. |
10984 | */ | 11693 | */ |
10985 | intel_crtc->pipe = pipe; | 11694 | intel_crtc->pipe = pipe; |
10986 | intel_crtc->plane = pipe; | 11695 | intel_crtc->plane = pipe; |
@@ -11002,6 +11711,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
11002 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 11711 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
11003 | 11712 | ||
11004 | WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); | 11713 | WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); |
11714 | return; | ||
11715 | |||
11716 | fail: | ||
11717 | if (primary) | ||
11718 | drm_plane_cleanup(primary); | ||
11719 | if (cursor) | ||
11720 | drm_plane_cleanup(cursor); | ||
11721 | kfree(intel_crtc); | ||
11005 | } | 11722 | } |
11006 | 11723 | ||
11007 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) | 11724 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) |
@@ -11236,6 +11953,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
11236 | if (SUPPORTS_TV(dev)) | 11953 | if (SUPPORTS_TV(dev)) |
11237 | intel_tv_init(dev); | 11954 | intel_tv_init(dev); |
11238 | 11955 | ||
11956 | intel_edp_psr_init(dev); | ||
11957 | |||
11239 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 11958 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
11240 | encoder->base.possible_crtcs = encoder->crtc_mask; | 11959 | encoder->base.possible_crtcs = encoder->crtc_mask; |
11241 | encoder->base.possible_clones = | 11960 | encoder->base.possible_clones = |
@@ -11249,11 +11968,14 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
11249 | 11968 | ||
11250 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 11969 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
11251 | { | 11970 | { |
11971 | struct drm_device *dev = fb->dev; | ||
11252 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 11972 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
11253 | 11973 | ||
11254 | drm_framebuffer_cleanup(fb); | 11974 | drm_framebuffer_cleanup(fb); |
11975 | mutex_lock(&dev->struct_mutex); | ||
11255 | WARN_ON(!intel_fb->obj->framebuffer_references--); | 11976 | WARN_ON(!intel_fb->obj->framebuffer_references--); |
11256 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); | 11977 | drm_gem_object_unreference(&intel_fb->obj->base); |
11978 | mutex_unlock(&dev->struct_mutex); | ||
11257 | kfree(intel_fb); | 11979 | kfree(intel_fb); |
11258 | } | 11980 | } |
11259 | 11981 | ||
@@ -12266,7 +12988,6 @@ void intel_connector_unregister(struct intel_connector *intel_connector) | |||
12266 | void intel_modeset_cleanup(struct drm_device *dev) | 12988 | void intel_modeset_cleanup(struct drm_device *dev) |
12267 | { | 12989 | { |
12268 | struct drm_i915_private *dev_priv = dev->dev_private; | 12990 | struct drm_i915_private *dev_priv = dev->dev_private; |
12269 | struct drm_crtc *crtc; | ||
12270 | struct drm_connector *connector; | 12991 | struct drm_connector *connector; |
12271 | 12992 | ||
12272 | /* | 12993 | /* |
@@ -12286,14 +13007,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
12286 | 13007 | ||
12287 | intel_unregister_dsm_handler(); | 13008 | intel_unregister_dsm_handler(); |
12288 | 13009 | ||
12289 | for_each_crtc(dev, crtc) { | ||
12290 | /* Skip inactive CRTCs */ | ||
12291 | if (!crtc->primary->fb) | ||
12292 | continue; | ||
12293 | |||
12294 | intel_increase_pllclock(crtc); | ||
12295 | } | ||
12296 | |||
12297 | intel_disable_fbc(dev); | 13010 | intel_disable_fbc(dev); |
12298 | 13011 | ||
12299 | intel_disable_gt_powersave(dev); | 13012 | intel_disable_gt_powersave(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 99f033f69189..b5ec48913b47 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1613,11 +1613,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, | |||
1613 | } | 1613 | } |
1614 | } | 1614 | } |
1615 | 1615 | ||
1616 | static bool is_edp_psr(struct drm_device *dev) | 1616 | static bool is_edp_psr(struct intel_dp *intel_dp) |
1617 | { | 1617 | { |
1618 | struct drm_i915_private *dev_priv = dev->dev_private; | 1618 | return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; |
1619 | |||
1620 | return dev_priv->psr.sink_support; | ||
1621 | } | 1619 | } |
1622 | 1620 | ||
1623 | static bool intel_edp_is_psr_enabled(struct drm_device *dev) | 1621 | static bool intel_edp_is_psr_enabled(struct drm_device *dev) |
@@ -1665,7 +1663,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) | |||
1665 | struct drm_i915_private *dev_priv = dev->dev_private; | 1663 | struct drm_i915_private *dev_priv = dev->dev_private; |
1666 | struct edp_vsc_psr psr_vsc; | 1664 | struct edp_vsc_psr psr_vsc; |
1667 | 1665 | ||
1668 | if (intel_dp->psr_setup_done) | 1666 | if (dev_priv->psr.setup_done) |
1669 | return; | 1667 | return; |
1670 | 1668 | ||
1671 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ | 1669 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
@@ -1680,21 +1678,26 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) | |||
1680 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | | 1678 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | |
1681 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); | 1679 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); |
1682 | 1680 | ||
1683 | intel_dp->psr_setup_done = true; | 1681 | dev_priv->psr.setup_done = true; |
1684 | } | 1682 | } |
1685 | 1683 | ||
1686 | static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) | 1684 | static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) |
1687 | { | 1685 | { |
1688 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1686 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1687 | struct drm_device *dev = dig_port->base.base.dev; | ||
1689 | struct drm_i915_private *dev_priv = dev->dev_private; | 1688 | struct drm_i915_private *dev_priv = dev->dev_private; |
1690 | uint32_t aux_clock_divider; | 1689 | uint32_t aux_clock_divider; |
1691 | int precharge = 0x3; | 1690 | int precharge = 0x3; |
1692 | int msg_size = 5; /* Header(4) + Message(1) */ | 1691 | int msg_size = 5; /* Header(4) + Message(1) */ |
1692 | bool only_standby = false; | ||
1693 | 1693 | ||
1694 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); | 1694 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
1695 | 1695 | ||
1696 | if (IS_BROADWELL(dev) && dig_port->port != PORT_A) | ||
1697 | only_standby = true; | ||
1698 | |||
1696 | /* Enable PSR in sink */ | 1699 | /* Enable PSR in sink */ |
1697 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) | 1700 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) |
1698 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, | 1701 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1699 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); | 1702 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); |
1700 | else | 1703 | else |
@@ -1713,18 +1716,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) | |||
1713 | 1716 | ||
1714 | static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) | 1717 | static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) |
1715 | { | 1718 | { |
1716 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1719 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1720 | struct drm_device *dev = dig_port->base.base.dev; | ||
1717 | struct drm_i915_private *dev_priv = dev->dev_private; | 1721 | struct drm_i915_private *dev_priv = dev->dev_private; |
1718 | uint32_t max_sleep_time = 0x1f; | 1722 | uint32_t max_sleep_time = 0x1f; |
1719 | uint32_t idle_frames = 1; | 1723 | uint32_t idle_frames = 1; |
1720 | uint32_t val = 0x0; | 1724 | uint32_t val = 0x0; |
1721 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | 1725 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
1726 | bool only_standby = false; | ||
1727 | |||
1728 | if (IS_BROADWELL(dev) && dig_port->port != PORT_A) | ||
1729 | only_standby = true; | ||
1722 | 1730 | ||
1723 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { | 1731 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { |
1724 | val |= EDP_PSR_LINK_STANDBY; | 1732 | val |= EDP_PSR_LINK_STANDBY; |
1725 | val |= EDP_PSR_TP2_TP3_TIME_0us; | 1733 | val |= EDP_PSR_TP2_TP3_TIME_0us; |
1726 | val |= EDP_PSR_TP1_TIME_0us; | 1734 | val |= EDP_PSR_TP1_TIME_0us; |
1727 | val |= EDP_PSR_SKIP_AUX_EXIT; | 1735 | val |= EDP_PSR_SKIP_AUX_EXIT; |
1736 | val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; | ||
1728 | } else | 1737 | } else |
1729 | val |= EDP_PSR_LINK_DISABLE; | 1738 | val |= EDP_PSR_LINK_DISABLE; |
1730 | 1739 | ||
@@ -1752,8 +1761,8 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) | |||
1752 | return false; | 1761 | return false; |
1753 | } | 1762 | } |
1754 | 1763 | ||
1755 | if ((intel_encoder->type != INTEL_OUTPUT_EDP) || | 1764 | if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP || |
1756 | (dig_port->port != PORT_A)) { | 1765 | dig_port->port != PORT_A)) { |
1757 | DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); | 1766 | DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); |
1758 | return false; | 1767 | return false; |
1759 | } | 1768 | } |
@@ -1782,6 +1791,10 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) | |||
1782 | return false; | 1791 | return false; |
1783 | } | 1792 | } |
1784 | 1793 | ||
1794 | /* Below limitations aren't valid for Broadwell */ | ||
1795 | if (IS_BROADWELL(dev)) | ||
1796 | goto out; | ||
1797 | |||
1785 | if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { | 1798 | if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { |
1786 | DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); | 1799 | DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); |
1787 | return false; | 1800 | return false; |
@@ -1798,34 +1811,48 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) | |||
1798 | return false; | 1811 | return false; |
1799 | } | 1812 | } |
1800 | 1813 | ||
1814 | out: | ||
1801 | dev_priv->psr.source_ok = true; | 1815 | dev_priv->psr.source_ok = true; |
1802 | return true; | 1816 | return true; |
1803 | } | 1817 | } |
1804 | 1818 | ||
1805 | static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) | 1819 | static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) |
1806 | { | 1820 | { |
1807 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1821 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1822 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
1823 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1808 | 1824 | ||
1809 | if (!intel_edp_psr_match_conditions(intel_dp) || | 1825 | if (intel_edp_is_psr_enabled(dev)) |
1810 | intel_edp_is_psr_enabled(dev)) | ||
1811 | return; | 1826 | return; |
1812 | 1827 | ||
1813 | /* Setup PSR once */ | ||
1814 | intel_edp_psr_setup(intel_dp); | ||
1815 | |||
1816 | /* Enable PSR on the panel */ | 1828 | /* Enable PSR on the panel */ |
1817 | intel_edp_psr_enable_sink(intel_dp); | 1829 | intel_edp_psr_enable_sink(intel_dp); |
1818 | 1830 | ||
1819 | /* Enable PSR on the host */ | 1831 | /* Enable PSR on the host */ |
1820 | intel_edp_psr_enable_source(intel_dp); | 1832 | intel_edp_psr_enable_source(intel_dp); |
1833 | |||
1834 | dev_priv->psr.enabled = true; | ||
1835 | dev_priv->psr.active = true; | ||
1821 | } | 1836 | } |
1822 | 1837 | ||
1823 | void intel_edp_psr_enable(struct intel_dp *intel_dp) | 1838 | void intel_edp_psr_enable(struct intel_dp *intel_dp) |
1824 | { | 1839 | { |
1825 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1840 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1826 | 1841 | ||
1827 | if (intel_edp_psr_match_conditions(intel_dp) && | 1842 | if (!HAS_PSR(dev)) { |
1828 | !intel_edp_is_psr_enabled(dev)) | 1843 | DRM_DEBUG_KMS("PSR not supported on this platform\n"); |
1844 | return; | ||
1845 | } | ||
1846 | |||
1847 | if (!is_edp_psr(intel_dp)) { | ||
1848 | DRM_DEBUG_KMS("PSR not supported by this panel\n"); | ||
1849 | return; | ||
1850 | } | ||
1851 | |||
1852 | /* Setup PSR once */ | ||
1853 | intel_edp_psr_setup(intel_dp); | ||
1854 | |||
1855 | if (intel_edp_psr_match_conditions(intel_dp)) | ||
1829 | intel_edp_psr_do_enable(intel_dp); | 1856 | intel_edp_psr_do_enable(intel_dp); |
1830 | } | 1857 | } |
1831 | 1858 | ||
@@ -1834,7 +1861,7 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp) | |||
1834 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1861 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1835 | struct drm_i915_private *dev_priv = dev->dev_private; | 1862 | struct drm_i915_private *dev_priv = dev->dev_private; |
1836 | 1863 | ||
1837 | if (!intel_edp_is_psr_enabled(dev)) | 1864 | if (!dev_priv->psr.enabled) |
1838 | return; | 1865 | return; |
1839 | 1866 | ||
1840 | I915_WRITE(EDP_PSR_CTL(dev), | 1867 | I915_WRITE(EDP_PSR_CTL(dev), |
@@ -1844,10 +1871,15 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp) | |||
1844 | if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & | 1871 | if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & |
1845 | EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) | 1872 | EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) |
1846 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | 1873 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); |
1874 | |||
1875 | dev_priv->psr.enabled = false; | ||
1847 | } | 1876 | } |
1848 | 1877 | ||
1849 | void intel_edp_psr_update(struct drm_device *dev) | 1878 | static void intel_edp_psr_work(struct work_struct *work) |
1850 | { | 1879 | { |
1880 | struct drm_i915_private *dev_priv = | ||
1881 | container_of(work, typeof(*dev_priv), psr.work.work); | ||
1882 | struct drm_device *dev = dev_priv->dev; | ||
1851 | struct intel_encoder *encoder; | 1883 | struct intel_encoder *encoder; |
1852 | struct intel_dp *intel_dp = NULL; | 1884 | struct intel_dp *intel_dp = NULL; |
1853 | 1885 | ||
@@ -1855,17 +1887,52 @@ void intel_edp_psr_update(struct drm_device *dev) | |||
1855 | if (encoder->type == INTEL_OUTPUT_EDP) { | 1887 | if (encoder->type == INTEL_OUTPUT_EDP) { |
1856 | intel_dp = enc_to_intel_dp(&encoder->base); | 1888 | intel_dp = enc_to_intel_dp(&encoder->base); |
1857 | 1889 | ||
1858 | if (!is_edp_psr(dev)) | ||
1859 | return; | ||
1860 | |||
1861 | if (!intel_edp_psr_match_conditions(intel_dp)) | 1890 | if (!intel_edp_psr_match_conditions(intel_dp)) |
1862 | intel_edp_psr_disable(intel_dp); | 1891 | intel_edp_psr_disable(intel_dp); |
1863 | else | 1892 | else |
1864 | if (!intel_edp_is_psr_enabled(dev)) | 1893 | intel_edp_psr_do_enable(intel_dp); |
1865 | intel_edp_psr_do_enable(intel_dp); | ||
1866 | } | 1894 | } |
1867 | } | 1895 | } |
1868 | 1896 | ||
1897 | static void intel_edp_psr_inactivate(struct drm_device *dev) | ||
1898 | { | ||
1899 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1900 | |||
1901 | dev_priv->psr.active = false; | ||
1902 | |||
1903 | I915_WRITE(EDP_PSR_CTL(dev), I915_READ(EDP_PSR_CTL(dev)) | ||
1904 | & ~EDP_PSR_ENABLE); | ||
1905 | } | ||
1906 | |||
1907 | void intel_edp_psr_exit(struct drm_device *dev) | ||
1908 | { | ||
1909 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1910 | |||
1911 | if (!HAS_PSR(dev)) | ||
1912 | return; | ||
1913 | |||
1914 | if (!dev_priv->psr.setup_done) | ||
1915 | return; | ||
1916 | |||
1917 | cancel_delayed_work_sync(&dev_priv->psr.work); | ||
1918 | |||
1919 | if (dev_priv->psr.active) | ||
1920 | intel_edp_psr_inactivate(dev); | ||
1921 | |||
1922 | schedule_delayed_work(&dev_priv->psr.work, | ||
1923 | msecs_to_jiffies(100)); | ||
1924 | } | ||
1925 | |||
1926 | void intel_edp_psr_init(struct drm_device *dev) | ||
1927 | { | ||
1928 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1929 | |||
1930 | if (!HAS_PSR(dev)) | ||
1931 | return; | ||
1932 | |||
1933 | INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work); | ||
1934 | } | ||
1935 | |||
1869 | static void intel_disable_dp(struct intel_encoder *encoder) | 1936 | static void intel_disable_dp(struct intel_encoder *encoder) |
1870 | { | 1937 | { |
1871 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1938 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
@@ -2119,6 +2186,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) | |||
2119 | vlv_wait_port_ready(dev_priv, dport); | 2186 | vlv_wait_port_ready(dev_priv, dport); |
2120 | } | 2187 | } |
2121 | 2188 | ||
2189 | static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) | ||
2190 | { | ||
2191 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | ||
2192 | struct drm_device *dev = encoder->base.dev; | ||
2193 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2194 | struct intel_crtc *intel_crtc = | ||
2195 | to_intel_crtc(encoder->base.crtc); | ||
2196 | enum dpio_channel ch = vlv_dport_to_channel(dport); | ||
2197 | enum pipe pipe = intel_crtc->pipe; | ||
2198 | u32 val; | ||
2199 | |||
2200 | mutex_lock(&dev_priv->dpio_lock); | ||
2201 | |||
2202 | /* program left/right clock distribution */ | ||
2203 | if (pipe != PIPE_B) { | ||
2204 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | ||
2205 | val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); | ||
2206 | if (ch == DPIO_CH0) | ||
2207 | val |= CHV_BUFLEFTENA1_FORCE; | ||
2208 | if (ch == DPIO_CH1) | ||
2209 | val |= CHV_BUFRIGHTENA1_FORCE; | ||
2210 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); | ||
2211 | } else { | ||
2212 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); | ||
2213 | val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); | ||
2214 | if (ch == DPIO_CH0) | ||
2215 | val |= CHV_BUFLEFTENA2_FORCE; | ||
2216 | if (ch == DPIO_CH1) | ||
2217 | val |= CHV_BUFRIGHTENA2_FORCE; | ||
2218 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); | ||
2219 | } | ||
2220 | |||
2221 | /* program clock channel usage */ | ||
2222 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); | ||
2223 | val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; | ||
2224 | if (pipe != PIPE_B) | ||
2225 | val &= ~CHV_PCS_USEDCLKCHANNEL; | ||
2226 | else | ||
2227 | val |= CHV_PCS_USEDCLKCHANNEL; | ||
2228 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); | ||
2229 | |||
2230 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); | ||
2231 | val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; | ||
2232 | if (pipe != PIPE_B) | ||
2233 | val &= ~CHV_PCS_USEDCLKCHANNEL; | ||
2234 | else | ||
2235 | val |= CHV_PCS_USEDCLKCHANNEL; | ||
2236 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); | ||
2237 | |||
2238 | /* | ||
2239 | * This a a bit weird since generally CL | ||
2240 | * matches the pipe, but here we need to | ||
2241 | * pick the CL based on the port. | ||
2242 | */ | ||
2243 | val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); | ||
2244 | if (pipe != PIPE_B) | ||
2245 | val &= ~CHV_CMN_USEDCLKCHANNEL; | ||
2246 | else | ||
2247 | val |= CHV_CMN_USEDCLKCHANNEL; | ||
2248 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); | ||
2249 | |||
2250 | mutex_unlock(&dev_priv->dpio_lock); | ||
2251 | } | ||
2252 | |||
2122 | /* | 2253 | /* |
2123 | * Native read with retry for link status and receiver capability reads for | 2254 | * Native read with retry for link status and receiver capability reads for |
2124 | * cases where the sink may still be asleep. | 2255 | * cases where the sink may still be asleep. |
@@ -2156,18 +2287,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ | |||
2156 | DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; | 2287 | DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; |
2157 | } | 2288 | } |
2158 | 2289 | ||
2159 | /* | 2290 | /* These are source-specific values. */ |
2160 | * These are source-specific values; current Intel hardware supports | ||
2161 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB | ||
2162 | */ | ||
2163 | |||
2164 | static uint8_t | 2291 | static uint8_t |
2165 | intel_dp_voltage_max(struct intel_dp *intel_dp) | 2292 | intel_dp_voltage_max(struct intel_dp *intel_dp) |
2166 | { | 2293 | { |
2167 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 2294 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2168 | enum port port = dp_to_dig_port(intel_dp)->port; | 2295 | enum port port = dp_to_dig_port(intel_dp)->port; |
2169 | 2296 | ||
2170 | if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) | 2297 | if (IS_VALLEYVIEW(dev)) |
2171 | return DP_TRAIN_VOLTAGE_SWING_1200; | 2298 | return DP_TRAIN_VOLTAGE_SWING_1200; |
2172 | else if (IS_GEN7(dev) && port == PORT_A) | 2299 | else if (IS_GEN7(dev) && port == PORT_A) |
2173 | return DP_TRAIN_VOLTAGE_SWING_800; | 2300 | return DP_TRAIN_VOLTAGE_SWING_800; |
@@ -2183,18 +2310,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) | |||
2183 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 2310 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2184 | enum port port = dp_to_dig_port(intel_dp)->port; | 2311 | enum port port = dp_to_dig_port(intel_dp)->port; |
2185 | 2312 | ||
2186 | if (IS_BROADWELL(dev)) { | 2313 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
2187 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
2188 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
2189 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
2190 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
2191 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
2192 | return DP_TRAIN_PRE_EMPHASIS_3_5; | ||
2193 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
2194 | default: | ||
2195 | return DP_TRAIN_PRE_EMPHASIS_0; | ||
2196 | } | ||
2197 | } else if (IS_HASWELL(dev)) { | ||
2198 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 2314 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
2199 | case DP_TRAIN_VOLTAGE_SWING_400: | 2315 | case DP_TRAIN_VOLTAGE_SWING_400: |
2200 | return DP_TRAIN_PRE_EMPHASIS_9_5; | 2316 | return DP_TRAIN_PRE_EMPHASIS_9_5; |
@@ -2666,41 +2782,6 @@ intel_hsw_signal_levels(uint8_t train_set) | |||
2666 | } | 2782 | } |
2667 | } | 2783 | } |
2668 | 2784 | ||
2669 | static uint32_t | ||
2670 | intel_bdw_signal_levels(uint8_t train_set) | ||
2671 | { | ||
2672 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | | ||
2673 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
2674 | switch (signal_levels) { | ||
2675 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | ||
2676 | return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ | ||
2677 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
2678 | return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ | ||
2679 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | ||
2680 | return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ | ||
2681 | |||
2682 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: | ||
2683 | return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ | ||
2684 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
2685 | return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ | ||
2686 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: | ||
2687 | return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ | ||
2688 | |||
2689 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | ||
2690 | return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ | ||
2691 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
2692 | return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ | ||
2693 | |||
2694 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: | ||
2695 | return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ | ||
2696 | |||
2697 | default: | ||
2698 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" | ||
2699 | "0x%x\n", signal_levels); | ||
2700 | return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ | ||
2701 | } | ||
2702 | } | ||
2703 | |||
2704 | /* Properly updates "DP" with the correct signal levels. */ | 2785 | /* Properly updates "DP" with the correct signal levels. */ |
2705 | static void | 2786 | static void |
2706 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) | 2787 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) |
@@ -2711,10 +2792,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) | |||
2711 | uint32_t signal_levels, mask; | 2792 | uint32_t signal_levels, mask; |
2712 | uint8_t train_set = intel_dp->train_set[0]; | 2793 | uint8_t train_set = intel_dp->train_set[0]; |
2713 | 2794 | ||
2714 | if (IS_BROADWELL(dev)) { | 2795 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
2715 | signal_levels = intel_bdw_signal_levels(train_set); | ||
2716 | mask = DDI_BUF_EMP_MASK; | ||
2717 | } else if (IS_HASWELL(dev)) { | ||
2718 | signal_levels = intel_hsw_signal_levels(train_set); | 2796 | signal_levels = intel_hsw_signal_levels(train_set); |
2719 | mask = DDI_BUF_EMP_MASK; | 2797 | mask = DDI_BUF_EMP_MASK; |
2720 | } else if (IS_CHERRYVIEW(dev)) { | 2798 | } else if (IS_CHERRYVIEW(dev)) { |
@@ -4279,8 +4357,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
4279 | 4357 | ||
4280 | intel_dp_aux_init(intel_dp, intel_connector); | 4358 | intel_dp_aux_init(intel_dp, intel_connector); |
4281 | 4359 | ||
4282 | intel_dp->psr_setup_done = false; | ||
4283 | |||
4284 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { | 4360 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { |
4285 | drm_dp_aux_unregister(&intel_dp->aux); | 4361 | drm_dp_aux_unregister(&intel_dp->aux); |
4286 | if (is_edp(intel_dp)) { | 4362 | if (is_edp(intel_dp)) { |
@@ -4337,6 +4413,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
4337 | intel_encoder->get_hw_state = intel_dp_get_hw_state; | 4413 | intel_encoder->get_hw_state = intel_dp_get_hw_state; |
4338 | intel_encoder->get_config = intel_dp_get_config; | 4414 | intel_encoder->get_config = intel_dp_get_config; |
4339 | if (IS_CHERRYVIEW(dev)) { | 4415 | if (IS_CHERRYVIEW(dev)) { |
4416 | intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; | ||
4340 | intel_encoder->pre_enable = chv_pre_enable_dp; | 4417 | intel_encoder->pre_enable = chv_pre_enable_dp; |
4341 | intel_encoder->enable = vlv_enable_dp; | 4418 | intel_encoder->enable = vlv_enable_dp; |
4342 | intel_encoder->post_disable = chv_post_disable_dp; | 4419 | intel_encoder->post_disable = chv_post_disable_dp; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index eaa27ee9e367..5f7c7bd94d90 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -358,6 +358,11 @@ struct intel_pipe_wm { | |||
358 | bool sprites_scaled; | 358 | bool sprites_scaled; |
359 | }; | 359 | }; |
360 | 360 | ||
361 | struct intel_mmio_flip { | ||
362 | u32 seqno; | ||
363 | u32 ring_id; | ||
364 | }; | ||
365 | |||
361 | struct intel_crtc { | 366 | struct intel_crtc { |
362 | struct drm_crtc base; | 367 | struct drm_crtc base; |
363 | enum pipe pipe; | 368 | enum pipe pipe; |
@@ -384,7 +389,6 @@ struct intel_crtc { | |||
384 | 389 | ||
385 | struct drm_i915_gem_object *cursor_bo; | 390 | struct drm_i915_gem_object *cursor_bo; |
386 | uint32_t cursor_addr; | 391 | uint32_t cursor_addr; |
387 | int16_t cursor_x, cursor_y; | ||
388 | int16_t cursor_width, cursor_height; | 392 | int16_t cursor_width, cursor_height; |
389 | uint32_t cursor_cntl; | 393 | uint32_t cursor_cntl; |
390 | uint32_t cursor_base; | 394 | uint32_t cursor_base; |
@@ -412,6 +416,7 @@ struct intel_crtc { | |||
412 | wait_queue_head_t vbl_wait; | 416 | wait_queue_head_t vbl_wait; |
413 | 417 | ||
414 | int scanline_offset; | 418 | int scanline_offset; |
419 | struct intel_mmio_flip mmio_flip; | ||
415 | }; | 420 | }; |
416 | 421 | ||
417 | struct intel_plane_wm_parameters { | 422 | struct intel_plane_wm_parameters { |
@@ -428,7 +433,6 @@ struct intel_plane { | |||
428 | struct drm_i915_gem_object *obj; | 433 | struct drm_i915_gem_object *obj; |
429 | bool can_scale; | 434 | bool can_scale; |
430 | int max_downscale; | 435 | int max_downscale; |
431 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; | ||
432 | int crtc_x, crtc_y; | 436 | int crtc_x, crtc_y; |
433 | unsigned int crtc_w, crtc_h; | 437 | unsigned int crtc_w, crtc_h; |
434 | uint32_t src_x, src_y; | 438 | uint32_t src_x, src_y; |
@@ -537,7 +541,6 @@ struct intel_dp { | |||
537 | unsigned long last_power_cycle; | 541 | unsigned long last_power_cycle; |
538 | unsigned long last_power_on; | 542 | unsigned long last_power_on; |
539 | unsigned long last_backlight_off; | 543 | unsigned long last_backlight_off; |
540 | bool psr_setup_done; | ||
541 | bool use_tps3; | 544 | bool use_tps3; |
542 | struct intel_connector *attached_connector; | 545 | struct intel_connector *attached_connector; |
543 | 546 | ||
@@ -721,8 +724,33 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev); | |||
721 | int intel_pch_rawclk(struct drm_device *dev); | 724 | int intel_pch_rawclk(struct drm_device *dev); |
722 | int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); | 725 | int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); |
723 | void intel_mark_busy(struct drm_device *dev); | 726 | void intel_mark_busy(struct drm_device *dev); |
724 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj, | 727 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, |
725 | struct intel_engine_cs *ring); | 728 | struct intel_engine_cs *ring); |
729 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, | ||
730 | unsigned frontbuffer_bits); | ||
731 | void intel_frontbuffer_flip_complete(struct drm_device *dev, | ||
732 | unsigned frontbuffer_bits); | ||
733 | void intel_frontbuffer_flush(struct drm_device *dev, | ||
734 | unsigned frontbuffer_bits); | ||
735 | /** | ||
736 | * intel_frontbuffer_flip - prepare frontbuffer flip | ||
737 | * @dev: DRM device | ||
738 | * @frontbuffer_bits: frontbuffer plane tracking bits | ||
739 | * | ||
740 | * This function gets called after scheduling a flip on @obj. This is for | ||
741 | * synchronous plane updates which will happen on the next vblank and which will | ||
742 | * not get delayed by pending gpu rendering. | ||
743 | * | ||
744 | * Can be called without any locks held. | ||
745 | */ | ||
746 | static inline | ||
747 | void intel_frontbuffer_flip(struct drm_device *dev, | ||
748 | unsigned frontbuffer_bits) | ||
749 | { | ||
750 | intel_frontbuffer_flush(dev, frontbuffer_bits); | ||
751 | } | ||
752 | |||
753 | void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); | ||
726 | void intel_mark_idle(struct drm_device *dev); | 754 | void intel_mark_idle(struct drm_device *dev); |
727 | void intel_crtc_restore_mode(struct drm_crtc *crtc); | 755 | void intel_crtc_restore_mode(struct drm_crtc *crtc); |
728 | void intel_crtc_update_dpms(struct drm_crtc *crtc); | 756 | void intel_crtc_update_dpms(struct drm_crtc *crtc); |
@@ -831,11 +859,13 @@ void intel_edp_panel_on(struct intel_dp *intel_dp); | |||
831 | void intel_edp_panel_off(struct intel_dp *intel_dp); | 859 | void intel_edp_panel_off(struct intel_dp *intel_dp); |
832 | void intel_edp_psr_enable(struct intel_dp *intel_dp); | 860 | void intel_edp_psr_enable(struct intel_dp *intel_dp); |
833 | void intel_edp_psr_disable(struct intel_dp *intel_dp); | 861 | void intel_edp_psr_disable(struct intel_dp *intel_dp); |
834 | void intel_edp_psr_update(struct drm_device *dev); | ||
835 | void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); | 862 | void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); |
863 | void intel_edp_psr_exit(struct drm_device *dev); | ||
864 | void intel_edp_psr_init(struct drm_device *dev); | ||
865 | |||
836 | 866 | ||
837 | /* intel_dsi.c */ | 867 | /* intel_dsi.c */ |
838 | bool intel_dsi_init(struct drm_device *dev); | 868 | void intel_dsi_init(struct drm_device *dev); |
839 | 869 | ||
840 | 870 | ||
841 | /* intel_dvo.c */ | 871 | /* intel_dvo.c */ |
@@ -961,6 +991,7 @@ void intel_init_gt_powersave(struct drm_device *dev); | |||
961 | void intel_cleanup_gt_powersave(struct drm_device *dev); | 991 | void intel_cleanup_gt_powersave(struct drm_device *dev); |
962 | void intel_enable_gt_powersave(struct drm_device *dev); | 992 | void intel_enable_gt_powersave(struct drm_device *dev); |
963 | void intel_disable_gt_powersave(struct drm_device *dev); | 993 | void intel_disable_gt_powersave(struct drm_device *dev); |
994 | void intel_suspend_gt_powersave(struct drm_device *dev); | ||
964 | void intel_reset_gt_powersave(struct drm_device *dev); | 995 | void intel_reset_gt_powersave(struct drm_device *dev); |
965 | void ironlake_teardown_rc6(struct drm_device *dev); | 996 | void ironlake_teardown_rc6(struct drm_device *dev); |
966 | void gen6_update_ring_freq(struct drm_device *dev); | 997 | void gen6_update_ring_freq(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 7c07ee07a8ee..2ee1722c0af4 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -657,7 +657,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { | |||
657 | .fill_modes = drm_helper_probe_single_connector_modes, | 657 | .fill_modes = drm_helper_probe_single_connector_modes, |
658 | }; | 658 | }; |
659 | 659 | ||
660 | bool intel_dsi_init(struct drm_device *dev) | 660 | void intel_dsi_init(struct drm_device *dev) |
661 | { | 661 | { |
662 | struct intel_dsi *intel_dsi; | 662 | struct intel_dsi *intel_dsi; |
663 | struct intel_encoder *intel_encoder; | 663 | struct intel_encoder *intel_encoder; |
@@ -673,29 +673,29 @@ bool intel_dsi_init(struct drm_device *dev) | |||
673 | 673 | ||
674 | /* There is no detection method for MIPI so rely on VBT */ | 674 | /* There is no detection method for MIPI so rely on VBT */ |
675 | if (!dev_priv->vbt.has_mipi) | 675 | if (!dev_priv->vbt.has_mipi) |
676 | return false; | 676 | return; |
677 | |||
678 | if (IS_VALLEYVIEW(dev)) { | ||
679 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; | ||
680 | } else { | ||
681 | DRM_ERROR("Unsupported Mipi device to reg base"); | ||
682 | return; | ||
683 | } | ||
677 | 684 | ||
678 | intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); | 685 | intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); |
679 | if (!intel_dsi) | 686 | if (!intel_dsi) |
680 | return false; | 687 | return; |
681 | 688 | ||
682 | intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); | 689 | intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); |
683 | if (!intel_connector) { | 690 | if (!intel_connector) { |
684 | kfree(intel_dsi); | 691 | kfree(intel_dsi); |
685 | return false; | 692 | return; |
686 | } | 693 | } |
687 | 694 | ||
688 | intel_encoder = &intel_dsi->base; | 695 | intel_encoder = &intel_dsi->base; |
689 | encoder = &intel_encoder->base; | 696 | encoder = &intel_encoder->base; |
690 | intel_dsi->attached_connector = intel_connector; | 697 | intel_dsi->attached_connector = intel_connector; |
691 | 698 | ||
692 | if (IS_VALLEYVIEW(dev)) { | ||
693 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; | ||
694 | } else { | ||
695 | DRM_ERROR("Unsupported Mipi device to reg base"); | ||
696 | return false; | ||
697 | } | ||
698 | |||
699 | connector = &intel_connector->base; | 699 | connector = &intel_connector->base; |
700 | 700 | ||
701 | drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); | 701 | drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); |
@@ -753,12 +753,10 @@ bool intel_dsi_init(struct drm_device *dev) | |||
753 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | 753 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
754 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); | 754 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); |
755 | 755 | ||
756 | return true; | 756 | return; |
757 | 757 | ||
758 | err: | 758 | err: |
759 | drm_encoder_cleanup(&intel_encoder->base); | 759 | drm_encoder_cleanup(&intel_encoder->base); |
760 | kfree(intel_dsi); | 760 | kfree(intel_dsi); |
761 | kfree(intel_connector); | 761 | kfree(intel_connector); |
762 | |||
763 | return false; | ||
764 | } | 762 | } |
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 21a0d348cedc..47c7584a4aa0 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | |||
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) | |||
143 | case MIPI_DSI_DCS_LONG_WRITE: | 143 | case MIPI_DSI_DCS_LONG_WRITE: |
144 | dsi_vc_dcs_write(intel_dsi, vc, data, len); | 144 | dsi_vc_dcs_write(intel_dsi, vc, data, len); |
145 | break; | 145 | break; |
146 | }; | 146 | } |
147 | 147 | ||
148 | data += len; | 148 | data += len; |
149 | 149 | ||
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi) | |||
294 | intel_dsi->rst_timer_val = mipi_config->device_reset_timer; | 294 | intel_dsi->rst_timer_val = mipi_config->device_reset_timer; |
295 | intel_dsi->init_count = mipi_config->master_init_timer; | 295 | intel_dsi->init_count = mipi_config->master_init_timer; |
296 | intel_dsi->bw_timer = mipi_config->dbi_bw_timer; | 296 | intel_dsi->bw_timer = mipi_config->dbi_bw_timer; |
297 | intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; | 297 | intel_dsi->video_frmt_cfg_bits = |
298 | mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; | ||
298 | 299 | ||
299 | switch (intel_dsi->escape_clk_div) { | 300 | switch (intel_dsi->escape_clk_div) { |
300 | case 0: | 301 | case 0: |
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi) | |||
351 | * | 352 | * |
352 | * prepare count | 353 | * prepare count |
353 | */ | 354 | */ |
354 | ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); | 355 | ths_prepare_ns = max(mipi_config->ths_prepare, |
356 | mipi_config->tclk_prepare); | ||
355 | prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2); | 357 | prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2); |
356 | 358 | ||
357 | /* exit zero count */ | 359 | /* exit zero count */ |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 3fb71a04e14f..56b47d2ffaf7 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) | |||
112 | 112 | ||
113 | static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) | 113 | static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) |
114 | { | 114 | { |
115 | struct drm_device *dev = connector->base.dev; | ||
116 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
115 | struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); | 117 | struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); |
118 | u32 tmp; | ||
119 | |||
120 | tmp = I915_READ(intel_dvo->dev.dvo_reg); | ||
121 | |||
122 | if (!(tmp & DVO_ENABLE)) | ||
123 | return false; | ||
116 | 124 | ||
117 | return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); | 125 | return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); |
118 | } | 126 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 347d16220cd0..44e17fd781b8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -43,10 +43,36 @@ | |||
43 | #include <drm/i915_drm.h> | 43 | #include <drm/i915_drm.h> |
44 | #include "i915_drv.h" | 44 | #include "i915_drv.h" |
45 | 45 | ||
46 | static int intel_fbdev_set_par(struct fb_info *info) | ||
47 | { | ||
48 | struct drm_fb_helper *fb_helper = info->par; | ||
49 | struct intel_fbdev *ifbdev = | ||
50 | container_of(fb_helper, struct intel_fbdev, helper); | ||
51 | int ret; | ||
52 | |||
53 | ret = drm_fb_helper_set_par(info); | ||
54 | |||
55 | if (ret == 0) { | ||
56 | /* | ||
57 | * FIXME: fbdev presumes that all callbacks also work from | ||
58 | * atomic contexts and relies on that for emergency oops | ||
59 | * printing. KMS totally doesn't do that and the locking here is | ||
60 | * by far not the only place this goes wrong. Ignore this for | ||
61 | * now until we solve this for real. | ||
62 | */ | ||
63 | mutex_lock(&fb_helper->dev->struct_mutex); | ||
64 | ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj, | ||
65 | true); | ||
66 | mutex_unlock(&fb_helper->dev->struct_mutex); | ||
67 | } | ||
68 | |||
69 | return ret; | ||
70 | } | ||
71 | |||
46 | static struct fb_ops intelfb_ops = { | 72 | static struct fb_ops intelfb_ops = { |
47 | .owner = THIS_MODULE, | 73 | .owner = THIS_MODULE, |
48 | .fb_check_var = drm_fb_helper_check_var, | 74 | .fb_check_var = drm_fb_helper_check_var, |
49 | .fb_set_par = drm_fb_helper_set_par, | 75 | .fb_set_par = intel_fbdev_set_par, |
50 | .fb_fillrect = cfb_fillrect, | 76 | .fb_fillrect = cfb_fillrect, |
51 | .fb_copyarea = cfb_copyarea, | 77 | .fb_copyarea = cfb_copyarea, |
52 | .fb_imageblit = cfb_imageblit, | 78 | .fb_imageblit = cfb_imageblit, |
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
417 | } | 443 | } |
418 | crtcs[i] = new_crtc; | 444 | crtcs[i] = new_crtc; |
419 | 445 | ||
420 | DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n", | 446 | DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", |
421 | connector->name, | 447 | connector->name, |
422 | pipe_name(to_intel_crtc(encoder->crtc)->pipe), | 448 | pipe_name(to_intel_crtc(encoder->crtc)->pipe), |
423 | encoder->crtc->base.id, | 449 | encoder->crtc->base.id, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 0b603102cb3b..24224131ebf1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1229,6 +1229,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
1229 | mutex_unlock(&dev_priv->dpio_lock); | 1229 | mutex_unlock(&dev_priv->dpio_lock); |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) | ||
1233 | { | ||
1234 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | ||
1235 | struct drm_device *dev = encoder->base.dev; | ||
1236 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1237 | struct intel_crtc *intel_crtc = | ||
1238 | to_intel_crtc(encoder->base.crtc); | ||
1239 | enum dpio_channel ch = vlv_dport_to_channel(dport); | ||
1240 | enum pipe pipe = intel_crtc->pipe; | ||
1241 | u32 val; | ||
1242 | |||
1243 | mutex_lock(&dev_priv->dpio_lock); | ||
1244 | |||
1245 | /* program left/right clock distribution */ | ||
1246 | if (pipe != PIPE_B) { | ||
1247 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); | ||
1248 | val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); | ||
1249 | if (ch == DPIO_CH0) | ||
1250 | val |= CHV_BUFLEFTENA1_FORCE; | ||
1251 | if (ch == DPIO_CH1) | ||
1252 | val |= CHV_BUFRIGHTENA1_FORCE; | ||
1253 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); | ||
1254 | } else { | ||
1255 | val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); | ||
1256 | val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); | ||
1257 | if (ch == DPIO_CH0) | ||
1258 | val |= CHV_BUFLEFTENA2_FORCE; | ||
1259 | if (ch == DPIO_CH1) | ||
1260 | val |= CHV_BUFRIGHTENA2_FORCE; | ||
1261 | vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); | ||
1262 | } | ||
1263 | |||
1264 | /* program clock channel usage */ | ||
1265 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); | ||
1266 | val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; | ||
1267 | if (pipe != PIPE_B) | ||
1268 | val &= ~CHV_PCS_USEDCLKCHANNEL; | ||
1269 | else | ||
1270 | val |= CHV_PCS_USEDCLKCHANNEL; | ||
1271 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); | ||
1272 | |||
1273 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); | ||
1274 | val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; | ||
1275 | if (pipe != PIPE_B) | ||
1276 | val &= ~CHV_PCS_USEDCLKCHANNEL; | ||
1277 | else | ||
1278 | val |= CHV_PCS_USEDCLKCHANNEL; | ||
1279 | vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); | ||
1280 | |||
1281 | /* | ||
1282 | * This a a bit weird since generally CL | ||
1283 | * matches the pipe, but here we need to | ||
1284 | * pick the CL based on the port. | ||
1285 | */ | ||
1286 | val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); | ||
1287 | if (pipe != PIPE_B) | ||
1288 | val &= ~CHV_CMN_USEDCLKCHANNEL; | ||
1289 | else | ||
1290 | val |= CHV_CMN_USEDCLKCHANNEL; | ||
1291 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); | ||
1292 | |||
1293 | mutex_unlock(&dev_priv->dpio_lock); | ||
1294 | } | ||
1295 | |||
1232 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) | 1296 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) |
1233 | { | 1297 | { |
1234 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1298 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
@@ -1528,6 +1592,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) | |||
1528 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; | 1592 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; |
1529 | intel_encoder->get_config = intel_hdmi_get_config; | 1593 | intel_encoder->get_config = intel_hdmi_get_config; |
1530 | if (IS_CHERRYVIEW(dev)) { | 1594 | if (IS_CHERRYVIEW(dev)) { |
1595 | intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable; | ||
1531 | intel_encoder->pre_enable = chv_hdmi_pre_enable; | 1596 | intel_encoder->pre_enable = chv_hdmi_pre_enable; |
1532 | intel_encoder->enable = vlv_enable_hdmi; | 1597 | intel_encoder->enable = vlv_enable_hdmi; |
1533 | intel_encoder->post_disable = chv_hdmi_post_disable; | 1598 | intel_encoder->post_disable = chv_hdmi_post_disable; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index daa118978eec..307c2f1842b7 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
415 | } | 415 | } |
416 | 416 | ||
417 | intel_overlay_release_old_vid_tail(overlay); | 417 | intel_overlay_release_old_vid_tail(overlay); |
418 | |||
419 | |||
420 | i915_gem_track_fb(overlay->old_vid_bo, NULL, | ||
421 | INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); | ||
418 | return 0; | 422 | return 0; |
419 | } | 423 | } |
420 | 424 | ||
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
686 | bool scale_changed = false; | 690 | bool scale_changed = false; |
687 | struct drm_device *dev = overlay->dev; | 691 | struct drm_device *dev = overlay->dev; |
688 | u32 swidth, swidthsw, sheight, ostride; | 692 | u32 swidth, swidthsw, sheight, ostride; |
693 | enum pipe pipe = overlay->crtc->pipe; | ||
689 | 694 | ||
690 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 695 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
691 | BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | 696 | BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); |
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
713 | oconfig = OCONF_CC_OUT_8BIT; | 718 | oconfig = OCONF_CC_OUT_8BIT; |
714 | if (IS_GEN4(overlay->dev)) | 719 | if (IS_GEN4(overlay->dev)) |
715 | oconfig |= OCONF_CSC_MODE_BT709; | 720 | oconfig |= OCONF_CSC_MODE_BT709; |
716 | oconfig |= overlay->crtc->pipe == 0 ? | 721 | oconfig |= pipe == 0 ? |
717 | OCONF_PIPE_A : OCONF_PIPE_B; | 722 | OCONF_PIPE_A : OCONF_PIPE_B; |
718 | iowrite32(oconfig, ®s->OCONFIG); | 723 | iowrite32(oconfig, ®s->OCONFIG); |
719 | intel_overlay_unmap_regs(overlay, regs); | 724 | intel_overlay_unmap_regs(overlay, regs); |
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
776 | if (ret) | 781 | if (ret) |
777 | goto out_unpin; | 782 | goto out_unpin; |
778 | 783 | ||
784 | i915_gem_track_fb(overlay->vid_bo, new_bo, | ||
785 | INTEL_FRONTBUFFER_OVERLAY(pipe)); | ||
786 | |||
779 | overlay->old_vid_bo = overlay->vid_bo; | 787 | overlay->old_vid_bo = overlay->vid_bo; |
780 | overlay->vid_bo = new_bo; | 788 | overlay->vid_bo = new_bo; |
781 | 789 | ||
790 | intel_frontbuffer_flip(dev, | ||
791 | INTEL_FRONTBUFFER_OVERLAY(pipe)); | ||
792 | |||
782 | return 0; | 793 | return 0; |
783 | 794 | ||
784 | out_unpin: | 795 | out_unpin: |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ee72807069e4..b6e09f226230 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -529,7 +529,10 @@ void intel_update_fbc(struct drm_device *dev) | |||
529 | goto out_disable; | 529 | goto out_disable; |
530 | } | 530 | } |
531 | 531 | ||
532 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 532 | if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { |
533 | max_width = 4096; | ||
534 | max_height = 4096; | ||
535 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | ||
533 | max_width = 4096; | 536 | max_width = 4096; |
534 | max_height = 2048; | 537 | max_height = 2048; |
535 | } else { | 538 | } else { |
@@ -864,95 +867,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
864 | 867 | ||
865 | /* Pineview has different values for various configs */ | 868 | /* Pineview has different values for various configs */ |
866 | static const struct intel_watermark_params pineview_display_wm = { | 869 | static const struct intel_watermark_params pineview_display_wm = { |
867 | PINEVIEW_DISPLAY_FIFO, | 870 | .fifo_size = PINEVIEW_DISPLAY_FIFO, |
868 | PINEVIEW_MAX_WM, | 871 | .max_wm = PINEVIEW_MAX_WM, |
869 | PINEVIEW_DFT_WM, | 872 | .default_wm = PINEVIEW_DFT_WM, |
870 | PINEVIEW_GUARD_WM, | 873 | .guard_size = PINEVIEW_GUARD_WM, |
871 | PINEVIEW_FIFO_LINE_SIZE | 874 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
872 | }; | 875 | }; |
873 | static const struct intel_watermark_params pineview_display_hplloff_wm = { | 876 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
874 | PINEVIEW_DISPLAY_FIFO, | 877 | .fifo_size = PINEVIEW_DISPLAY_FIFO, |
875 | PINEVIEW_MAX_WM, | 878 | .max_wm = PINEVIEW_MAX_WM, |
876 | PINEVIEW_DFT_HPLLOFF_WM, | 879 | .default_wm = PINEVIEW_DFT_HPLLOFF_WM, |
877 | PINEVIEW_GUARD_WM, | 880 | .guard_size = PINEVIEW_GUARD_WM, |
878 | PINEVIEW_FIFO_LINE_SIZE | 881 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
879 | }; | 882 | }; |
880 | static const struct intel_watermark_params pineview_cursor_wm = { | 883 | static const struct intel_watermark_params pineview_cursor_wm = { |
881 | PINEVIEW_CURSOR_FIFO, | 884 | .fifo_size = PINEVIEW_CURSOR_FIFO, |
882 | PINEVIEW_CURSOR_MAX_WM, | 885 | .max_wm = PINEVIEW_CURSOR_MAX_WM, |
883 | PINEVIEW_CURSOR_DFT_WM, | 886 | .default_wm = PINEVIEW_CURSOR_DFT_WM, |
884 | PINEVIEW_CURSOR_GUARD_WM, | 887 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, |
885 | PINEVIEW_FIFO_LINE_SIZE, | 888 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
886 | }; | 889 | }; |
887 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { | 890 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
888 | PINEVIEW_CURSOR_FIFO, | 891 | .fifo_size = PINEVIEW_CURSOR_FIFO, |
889 | PINEVIEW_CURSOR_MAX_WM, | 892 | .max_wm = PINEVIEW_CURSOR_MAX_WM, |
890 | PINEVIEW_CURSOR_DFT_WM, | 893 | .default_wm = PINEVIEW_CURSOR_DFT_WM, |
891 | PINEVIEW_CURSOR_GUARD_WM, | 894 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, |
892 | PINEVIEW_FIFO_LINE_SIZE | 895 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
893 | }; | 896 | }; |
894 | static const struct intel_watermark_params g4x_wm_info = { | 897 | static const struct intel_watermark_params g4x_wm_info = { |
895 | G4X_FIFO_SIZE, | 898 | .fifo_size = G4X_FIFO_SIZE, |
896 | G4X_MAX_WM, | 899 | .max_wm = G4X_MAX_WM, |
897 | G4X_MAX_WM, | 900 | .default_wm = G4X_MAX_WM, |
898 | 2, | 901 | .guard_size = 2, |
899 | G4X_FIFO_LINE_SIZE, | 902 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
900 | }; | 903 | }; |
901 | static const struct intel_watermark_params g4x_cursor_wm_info = { | 904 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
902 | I965_CURSOR_FIFO, | 905 | .fifo_size = I965_CURSOR_FIFO, |
903 | I965_CURSOR_MAX_WM, | 906 | .max_wm = I965_CURSOR_MAX_WM, |
904 | I965_CURSOR_DFT_WM, | 907 | .default_wm = I965_CURSOR_DFT_WM, |
905 | 2, | 908 | .guard_size = 2, |
906 | G4X_FIFO_LINE_SIZE, | 909 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
907 | }; | 910 | }; |
908 | static const struct intel_watermark_params valleyview_wm_info = { | 911 | static const struct intel_watermark_params valleyview_wm_info = { |
909 | VALLEYVIEW_FIFO_SIZE, | 912 | .fifo_size = VALLEYVIEW_FIFO_SIZE, |
910 | VALLEYVIEW_MAX_WM, | 913 | .max_wm = VALLEYVIEW_MAX_WM, |
911 | VALLEYVIEW_MAX_WM, | 914 | .default_wm = VALLEYVIEW_MAX_WM, |
912 | 2, | 915 | .guard_size = 2, |
913 | G4X_FIFO_LINE_SIZE, | 916 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
914 | }; | 917 | }; |
915 | static const struct intel_watermark_params valleyview_cursor_wm_info = { | 918 | static const struct intel_watermark_params valleyview_cursor_wm_info = { |
916 | I965_CURSOR_FIFO, | 919 | .fifo_size = I965_CURSOR_FIFO, |
917 | VALLEYVIEW_CURSOR_MAX_WM, | 920 | .max_wm = VALLEYVIEW_CURSOR_MAX_WM, |
918 | I965_CURSOR_DFT_WM, | 921 | .default_wm = I965_CURSOR_DFT_WM, |
919 | 2, | 922 | .guard_size = 2, |
920 | G4X_FIFO_LINE_SIZE, | 923 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
921 | }; | 924 | }; |
922 | static const struct intel_watermark_params i965_cursor_wm_info = { | 925 | static const struct intel_watermark_params i965_cursor_wm_info = { |
923 | I965_CURSOR_FIFO, | 926 | .fifo_size = I965_CURSOR_FIFO, |
924 | I965_CURSOR_MAX_WM, | 927 | .max_wm = I965_CURSOR_MAX_WM, |
925 | I965_CURSOR_DFT_WM, | 928 | .default_wm = I965_CURSOR_DFT_WM, |
926 | 2, | 929 | .guard_size = 2, |
927 | I915_FIFO_LINE_SIZE, | 930 | .cacheline_size = I915_FIFO_LINE_SIZE, |
928 | }; | 931 | }; |
929 | static const struct intel_watermark_params i945_wm_info = { | 932 | static const struct intel_watermark_params i945_wm_info = { |
930 | I945_FIFO_SIZE, | 933 | .fifo_size = I945_FIFO_SIZE, |
931 | I915_MAX_WM, | 934 | .max_wm = I915_MAX_WM, |
932 | 1, | 935 | .default_wm = 1, |
933 | 2, | 936 | .guard_size = 2, |
934 | I915_FIFO_LINE_SIZE | 937 | .cacheline_size = I915_FIFO_LINE_SIZE, |
935 | }; | 938 | }; |
936 | static const struct intel_watermark_params i915_wm_info = { | 939 | static const struct intel_watermark_params i915_wm_info = { |
937 | I915_FIFO_SIZE, | 940 | .fifo_size = I915_FIFO_SIZE, |
938 | I915_MAX_WM, | 941 | .max_wm = I915_MAX_WM, |
939 | 1, | 942 | .default_wm = 1, |
940 | 2, | 943 | .guard_size = 2, |
941 | I915_FIFO_LINE_SIZE | 944 | .cacheline_size = I915_FIFO_LINE_SIZE, |
942 | }; | 945 | }; |
943 | static const struct intel_watermark_params i830_wm_info = { | 946 | static const struct intel_watermark_params i830_wm_info = { |
944 | I855GM_FIFO_SIZE, | 947 | .fifo_size = I855GM_FIFO_SIZE, |
945 | I915_MAX_WM, | 948 | .max_wm = I915_MAX_WM, |
946 | 1, | 949 | .default_wm = 1, |
947 | 2, | 950 | .guard_size = 2, |
948 | I830_FIFO_LINE_SIZE | 951 | .cacheline_size = I830_FIFO_LINE_SIZE, |
949 | }; | 952 | }; |
950 | static const struct intel_watermark_params i845_wm_info = { | 953 | static const struct intel_watermark_params i845_wm_info = { |
951 | I830_FIFO_SIZE, | 954 | .fifo_size = I830_FIFO_SIZE, |
952 | I915_MAX_WM, | 955 | .max_wm = I915_MAX_WM, |
953 | 1, | 956 | .default_wm = 1, |
954 | 2, | 957 | .guard_size = 2, |
955 | I830_FIFO_LINE_SIZE | 958 | .cacheline_size = I830_FIFO_LINE_SIZE, |
956 | }; | 959 | }; |
957 | 960 | ||
958 | /** | 961 | /** |
@@ -3348,6 +3351,13 @@ static void gen6_disable_rps(struct drm_device *dev) | |||
3348 | gen6_disable_rps_interrupts(dev); | 3351 | gen6_disable_rps_interrupts(dev); |
3349 | } | 3352 | } |
3350 | 3353 | ||
3354 | static void cherryview_disable_rps(struct drm_device *dev) | ||
3355 | { | ||
3356 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3357 | |||
3358 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
3359 | } | ||
3360 | |||
3351 | static void valleyview_disable_rps(struct drm_device *dev) | 3361 | static void valleyview_disable_rps(struct drm_device *dev) |
3352 | { | 3362 | { |
3353 | struct drm_i915_private *dev_priv = dev->dev_private; | 3363 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3727,6 +3737,35 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
3727 | mutex_unlock(&dev_priv->rps.hw_lock); | 3737 | mutex_unlock(&dev_priv->rps.hw_lock); |
3728 | } | 3738 | } |
3729 | 3739 | ||
3740 | int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) | ||
3741 | { | ||
3742 | u32 val, rp0; | ||
3743 | |||
3744 | val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); | ||
3745 | rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; | ||
3746 | |||
3747 | return rp0; | ||
3748 | } | ||
3749 | |||
3750 | static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) | ||
3751 | { | ||
3752 | u32 val, rpe; | ||
3753 | |||
3754 | val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); | ||
3755 | rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; | ||
3756 | |||
3757 | return rpe; | ||
3758 | } | ||
3759 | |||
3760 | int cherryview_rps_min_freq(struct drm_i915_private *dev_priv) | ||
3761 | { | ||
3762 | u32 val, rpn; | ||
3763 | |||
3764 | val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); | ||
3765 | rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK; | ||
3766 | return rpn; | ||
3767 | } | ||
3768 | |||
3730 | int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) | 3769 | int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) |
3731 | { | 3770 | { |
3732 | u32 val, rp0; | 3771 | u32 val, rp0; |
@@ -3766,6 +3805,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv) | |||
3766 | dev_priv->vlv_pctx->stolen->start); | 3805 | dev_priv->vlv_pctx->stolen->start); |
3767 | } | 3806 | } |
3768 | 3807 | ||
3808 | |||
3809 | /* Check that the pcbr address is not empty. */ | ||
3810 | static void cherryview_check_pctx(struct drm_i915_private *dev_priv) | ||
3811 | { | ||
3812 | unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; | ||
3813 | |||
3814 | WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); | ||
3815 | } | ||
3816 | |||
3817 | static void cherryview_setup_pctx(struct drm_device *dev) | ||
3818 | { | ||
3819 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3820 | unsigned long pctx_paddr, paddr; | ||
3821 | struct i915_gtt *gtt = &dev_priv->gtt; | ||
3822 | u32 pcbr; | ||
3823 | int pctx_size = 32*1024; | ||
3824 | |||
3825 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
3826 | |||
3827 | pcbr = I915_READ(VLV_PCBR); | ||
3828 | if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { | ||
3829 | paddr = (dev_priv->mm.stolen_base + | ||
3830 | (gtt->stolen_size - pctx_size)); | ||
3831 | |||
3832 | pctx_paddr = (paddr & (~4095)); | ||
3833 | I915_WRITE(VLV_PCBR, pctx_paddr); | ||
3834 | } | ||
3835 | } | ||
3836 | |||
3769 | static void valleyview_setup_pctx(struct drm_device *dev) | 3837 | static void valleyview_setup_pctx(struct drm_device *dev) |
3770 | { | 3838 | { |
3771 | struct drm_i915_private *dev_priv = dev->dev_private; | 3839 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3855,11 +3923,135 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) | |||
3855 | mutex_unlock(&dev_priv->rps.hw_lock); | 3923 | mutex_unlock(&dev_priv->rps.hw_lock); |
3856 | } | 3924 | } |
3857 | 3925 | ||
3926 | static void cherryview_init_gt_powersave(struct drm_device *dev) | ||
3927 | { | ||
3928 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3929 | |||
3930 | cherryview_setup_pctx(dev); | ||
3931 | |||
3932 | mutex_lock(&dev_priv->rps.hw_lock); | ||
3933 | |||
3934 | dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); | ||
3935 | dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; | ||
3936 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | ||
3937 | vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), | ||
3938 | dev_priv->rps.max_freq); | ||
3939 | |||
3940 | dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); | ||
3941 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | ||
3942 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | ||
3943 | dev_priv->rps.efficient_freq); | ||
3944 | |||
3945 | dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); | ||
3946 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | ||
3947 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), | ||
3948 | dev_priv->rps.min_freq); | ||
3949 | |||
3950 | /* Preserve min/max settings in case of re-init */ | ||
3951 | if (dev_priv->rps.max_freq_softlimit == 0) | ||
3952 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; | ||
3953 | |||
3954 | if (dev_priv->rps.min_freq_softlimit == 0) | ||
3955 | dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; | ||
3956 | |||
3957 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
3958 | } | ||
3959 | |||
3858 | static void valleyview_cleanup_gt_powersave(struct drm_device *dev) | 3960 | static void valleyview_cleanup_gt_powersave(struct drm_device *dev) |
3859 | { | 3961 | { |
3860 | valleyview_cleanup_pctx(dev); | 3962 | valleyview_cleanup_pctx(dev); |
3861 | } | 3963 | } |
3862 | 3964 | ||
3965 | static void cherryview_enable_rps(struct drm_device *dev) | ||
3966 | { | ||
3967 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3968 | struct intel_engine_cs *ring; | ||
3969 | u32 gtfifodbg, val, rc6_mode = 0, pcbr; | ||
3970 | int i; | ||
3971 | |||
3972 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
3973 | |||
3974 | gtfifodbg = I915_READ(GTFIFODBG); | ||
3975 | if (gtfifodbg) { | ||
3976 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", | ||
3977 | gtfifodbg); | ||
3978 | I915_WRITE(GTFIFODBG, gtfifodbg); | ||
3979 | } | ||
3980 | |||
3981 | cherryview_check_pctx(dev_priv); | ||
3982 | |||
3983 | /* 1a & 1b: Get forcewake during program sequence. Although the driver | ||
3984 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | ||
3985 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
3986 | |||
3987 | /* 2a: Program RC6 thresholds.*/ | ||
3988 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); | ||
3989 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ | ||
3990 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ | ||
3991 | |||
3992 | for_each_ring(ring, dev_priv, i) | ||
3993 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | ||
3994 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
3995 | |||
3996 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ | ||
3997 | |||
3998 | /* allows RC6 residency counter to work */ | ||
3999 | I915_WRITE(VLV_COUNTER_CONTROL, | ||
4000 | _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | | ||
4001 | VLV_MEDIA_RC6_COUNT_EN | | ||
4002 | VLV_RENDER_RC6_COUNT_EN)); | ||
4003 | |||
4004 | /* For now we assume BIOS is allocating and populating the PCBR */ | ||
4005 | pcbr = I915_READ(VLV_PCBR); | ||
4006 | |||
4007 | DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr); | ||
4008 | |||
4009 | /* 3: Enable RC6 */ | ||
4010 | if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && | ||
4011 | (pcbr >> VLV_PCBR_ADDR_SHIFT)) | ||
4012 | rc6_mode = GEN6_RC_CTL_EI_MODE(1); | ||
4013 | |||
4014 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | ||
4015 | |||
4016 | /* 4 Program defaults and thresholds for RPS*/ | ||
4017 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | ||
4018 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | ||
4019 | I915_WRITE(GEN6_RP_UP_EI, 66000); | ||
4020 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); | ||
4021 | |||
4022 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
4023 | |||
4024 | /* WaDisablePwrmtrEvent:chv (pre-production hw) */ | ||
4025 | I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff); | ||
4026 | I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00); | ||
4027 | |||
4028 | /* 5: Enable RPS */ | ||
4029 | I915_WRITE(GEN6_RP_CONTROL, | ||
4030 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | ||
4031 | GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */ | ||
4032 | GEN6_RP_ENABLE | | ||
4033 | GEN6_RP_UP_BUSY_AVG | | ||
4034 | GEN6_RP_DOWN_IDLE_AVG); | ||
4035 | |||
4036 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
4037 | |||
4038 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | ||
4039 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | ||
4040 | |||
4041 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; | ||
4042 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | ||
4043 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), | ||
4044 | dev_priv->rps.cur_freq); | ||
4045 | |||
4046 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | ||
4047 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | ||
4048 | dev_priv->rps.efficient_freq); | ||
4049 | |||
4050 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); | ||
4051 | |||
4052 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); | ||
4053 | } | ||
4054 | |||
3863 | static void valleyview_enable_rps(struct drm_device *dev) | 4055 | static void valleyview_enable_rps(struct drm_device *dev) |
3864 | { | 4056 | { |
3865 | struct drm_i915_private *dev_priv = dev->dev_private; | 4057 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4668,33 +4860,57 @@ void intel_init_gt_powersave(struct drm_device *dev) | |||
4668 | { | 4860 | { |
4669 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); | 4861 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); |
4670 | 4862 | ||
4671 | if (IS_VALLEYVIEW(dev)) | 4863 | if (IS_CHERRYVIEW(dev)) |
4864 | cherryview_init_gt_powersave(dev); | ||
4865 | else if (IS_VALLEYVIEW(dev)) | ||
4672 | valleyview_init_gt_powersave(dev); | 4866 | valleyview_init_gt_powersave(dev); |
4673 | } | 4867 | } |
4674 | 4868 | ||
4675 | void intel_cleanup_gt_powersave(struct drm_device *dev) | 4869 | void intel_cleanup_gt_powersave(struct drm_device *dev) |
4676 | { | 4870 | { |
4677 | if (IS_VALLEYVIEW(dev)) | 4871 | if (IS_CHERRYVIEW(dev)) |
4872 | return; | ||
4873 | else if (IS_VALLEYVIEW(dev)) | ||
4678 | valleyview_cleanup_gt_powersave(dev); | 4874 | valleyview_cleanup_gt_powersave(dev); |
4679 | } | 4875 | } |
4680 | 4876 | ||
4877 | /** | ||
4878 | * intel_suspend_gt_powersave - suspend PM work and helper threads | ||
4879 | * @dev: drm device | ||
4880 | * | ||
4881 | * We don't want to disable RC6 or other features here, we just want | ||
4882 | * to make sure any work we've queued has finished and won't bother | ||
4883 | * us while we're suspended. | ||
4884 | */ | ||
4885 | void intel_suspend_gt_powersave(struct drm_device *dev) | ||
4886 | { | ||
4887 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4888 | |||
4889 | /* Interrupts should be disabled already to avoid re-arming. */ | ||
4890 | WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); | ||
4891 | |||
4892 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
4893 | |||
4894 | cancel_work_sync(&dev_priv->rps.work); | ||
4895 | } | ||
4896 | |||
4681 | void intel_disable_gt_powersave(struct drm_device *dev) | 4897 | void intel_disable_gt_powersave(struct drm_device *dev) |
4682 | { | 4898 | { |
4683 | struct drm_i915_private *dev_priv = dev->dev_private; | 4899 | struct drm_i915_private *dev_priv = dev->dev_private; |
4684 | 4900 | ||
4685 | /* Interrupts should be disabled already to avoid re-arming. */ | 4901 | /* Interrupts should be disabled already to avoid re-arming. */ |
4686 | WARN_ON(dev->irq_enabled); | 4902 | WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); |
4687 | 4903 | ||
4688 | if (IS_IRONLAKE_M(dev)) { | 4904 | if (IS_IRONLAKE_M(dev)) { |
4689 | ironlake_disable_drps(dev); | 4905 | ironlake_disable_drps(dev); |
4690 | ironlake_disable_rc6(dev); | 4906 | ironlake_disable_rc6(dev); |
4691 | } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { | 4907 | } else if (INTEL_INFO(dev)->gen >= 6) { |
4692 | if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work)) | 4908 | intel_suspend_gt_powersave(dev); |
4693 | intel_runtime_pm_put(dev_priv); | ||
4694 | 4909 | ||
4695 | cancel_work_sync(&dev_priv->rps.work); | ||
4696 | mutex_lock(&dev_priv->rps.hw_lock); | 4910 | mutex_lock(&dev_priv->rps.hw_lock); |
4697 | if (IS_VALLEYVIEW(dev)) | 4911 | if (IS_CHERRYVIEW(dev)) |
4912 | cherryview_disable_rps(dev); | ||
4913 | else if (IS_VALLEYVIEW(dev)) | ||
4698 | valleyview_disable_rps(dev); | 4914 | valleyview_disable_rps(dev); |
4699 | else | 4915 | else |
4700 | gen6_disable_rps(dev); | 4916 | gen6_disable_rps(dev); |
@@ -4712,7 +4928,9 @@ static void intel_gen6_powersave_work(struct work_struct *work) | |||
4712 | 4928 | ||
4713 | mutex_lock(&dev_priv->rps.hw_lock); | 4929 | mutex_lock(&dev_priv->rps.hw_lock); |
4714 | 4930 | ||
4715 | if (IS_VALLEYVIEW(dev)) { | 4931 | if (IS_CHERRYVIEW(dev)) { |
4932 | cherryview_enable_rps(dev); | ||
4933 | } else if (IS_VALLEYVIEW(dev)) { | ||
4716 | valleyview_enable_rps(dev); | 4934 | valleyview_enable_rps(dev); |
4717 | } else if (IS_BROADWELL(dev)) { | 4935 | } else if (IS_BROADWELL(dev)) { |
4718 | gen8_enable_rps(dev); | 4936 | gen8_enable_rps(dev); |
@@ -4737,7 +4955,7 @@ void intel_enable_gt_powersave(struct drm_device *dev) | |||
4737 | ironlake_enable_rc6(dev); | 4955 | ironlake_enable_rc6(dev); |
4738 | intel_init_emon(dev); | 4956 | intel_init_emon(dev); |
4739 | mutex_unlock(&dev->struct_mutex); | 4957 | mutex_unlock(&dev->struct_mutex); |
4740 | } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { | 4958 | } else if (INTEL_INFO(dev)->gen >= 6) { |
4741 | /* | 4959 | /* |
4742 | * PCU communication is slow and this doesn't need to be | 4960 | * PCU communication is slow and this doesn't need to be |
4743 | * done at any specific time, so do this out of our fast path | 4961 | * done at any specific time, so do this out of our fast path |
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h index a5e783a9928a..fd4f66231d30 100644 --- a/drivers/gpu/drm/i915/intel_renderstate.h +++ b/drivers/gpu/drm/i915/intel_renderstate.h | |||
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | struct intel_renderstate_rodata { | 29 | struct intel_renderstate_rodata { |
30 | const u32 *reloc; | 30 | const u32 *reloc; |
31 | const u32 reloc_items; | ||
32 | const u32 *batch; | 31 | const u32 *batch; |
33 | const u32 batch_items; | 32 | const u32 batch_items; |
34 | }; | 33 | }; |
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state; | |||
40 | #define RO_RENDERSTATE(_g) \ | 39 | #define RO_RENDERSTATE(_g) \ |
41 | const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ | 40 | const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ |
42 | .reloc = gen ## _g ## _null_state_relocs, \ | 41 | .reloc = gen ## _g ## _null_state_relocs, \ |
43 | .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \ | ||
44 | .batch = gen ## _g ## _null_state_batch, \ | 42 | .batch = gen ## _g ## _null_state_batch, \ |
45 | .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ | 43 | .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ |
46 | } | 44 | } |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c index 740538ad0977..56c1429d8a60 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c | |||
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = { | |||
6 | 0x0000002c, | 6 | 0x0000002c, |
7 | 0x000001e0, | 7 | 0x000001e0, |
8 | 0x000001e4, | 8 | 0x000001e4, |
9 | -1, | ||
9 | }; | 10 | }; |
10 | 11 | ||
11 | static const u32 gen6_null_state_batch[] = { | 12 | static const u32 gen6_null_state_batch[] = { |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c index 6fa7ff2a1298..419e35a7b0ff 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c | |||
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = { | |||
5 | 0x00000010, | 5 | 0x00000010, |
6 | 0x00000018, | 6 | 0x00000018, |
7 | 0x000001ec, | 7 | 0x000001ec, |
8 | -1, | ||
8 | }; | 9 | }; |
9 | 10 | ||
10 | static const u32 gen7_null_state_batch[] = { | 11 | static const u32 gen7_null_state_batch[] = { |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c index 5c875615d42a..75ef1b5de45c 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c | |||
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = { | |||
5 | 0x00000050, | 5 | 0x00000050, |
6 | 0x00000060, | 6 | 0x00000060, |
7 | 0x000003ec, | 7 | 0x000003ec, |
8 | -1, | ||
8 | }; | 9 | }; |
9 | 10 | ||
10 | static const u32 gen8_null_state_batch[] = { | 11 | static const u32 gen8_null_state_batch[] = { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 279488addf3f..2faef2605e97 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -604,6 +604,8 @@ static int init_render_ring(struct intel_engine_cs *ring) | |||
604 | struct drm_device *dev = ring->dev; | 604 | struct drm_device *dev = ring->dev; |
605 | struct drm_i915_private *dev_priv = dev->dev_private; | 605 | struct drm_i915_private *dev_priv = dev->dev_private; |
606 | int ret = init_ring_common(ring); | 606 | int ret = init_ring_common(ring); |
607 | if (ret) | ||
608 | return ret; | ||
607 | 609 | ||
608 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ | 610 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ |
609 | if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) | 611 | if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) |
@@ -1397,6 +1399,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) | |||
1397 | if (obj == NULL) | 1399 | if (obj == NULL) |
1398 | return -ENOMEM; | 1400 | return -ENOMEM; |
1399 | 1401 | ||
1402 | /* mark ring buffers as read-only from GPU side by default */ | ||
1403 | obj->gt_ro = 1; | ||
1404 | |||
1400 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); | 1405 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); |
1401 | if (ret) | 1406 | if (ret) |
1402 | goto err_unref; | 1407 | goto err_unref; |
@@ -1746,14 +1751,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring) | |||
1746 | 1751 | ||
1747 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) | 1752 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) |
1748 | { | 1753 | { |
1749 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1754 | struct drm_device *dev = ring->dev; |
1755 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1750 | 1756 | ||
1751 | BUG_ON(ring->outstanding_lazy_seqno); | 1757 | BUG_ON(ring->outstanding_lazy_seqno); |
1752 | 1758 | ||
1753 | if (INTEL_INFO(ring->dev)->gen >= 6) { | 1759 | if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { |
1754 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); | 1760 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); |
1755 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); | 1761 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); |
1756 | if (HAS_VEBOX(ring->dev)) | 1762 | if (HAS_VEBOX(dev)) |
1757 | I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); | 1763 | I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); |
1758 | } | 1764 | } |
1759 | 1765 | ||
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9a17b4e92ef4..985317eb1dc9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -819,6 +819,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
819 | struct drm_device *dev = plane->dev; | 819 | struct drm_device *dev = plane->dev; |
820 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 820 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
821 | struct intel_plane *intel_plane = to_intel_plane(plane); | 821 | struct intel_plane *intel_plane = to_intel_plane(plane); |
822 | enum pipe pipe = intel_crtc->pipe; | ||
822 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 823 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
823 | struct drm_i915_gem_object *obj = intel_fb->obj; | 824 | struct drm_i915_gem_object *obj = intel_fb->obj; |
824 | struct drm_i915_gem_object *old_obj = intel_plane->obj; | 825 | struct drm_i915_gem_object *old_obj = intel_plane->obj; |
@@ -1006,6 +1007,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
1006 | */ | 1007 | */ |
1007 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); | 1008 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); |
1008 | 1009 | ||
1010 | i915_gem_track_fb(old_obj, obj, | ||
1011 | INTEL_FRONTBUFFER_SPRITE(pipe)); | ||
1009 | mutex_unlock(&dev->struct_mutex); | 1012 | mutex_unlock(&dev->struct_mutex); |
1010 | 1013 | ||
1011 | if (ret) | 1014 | if (ret) |
@@ -1039,6 +1042,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
1039 | else | 1042 | else |
1040 | intel_plane->disable_plane(plane, crtc); | 1043 | intel_plane->disable_plane(plane, crtc); |
1041 | 1044 | ||
1045 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe)); | ||
1046 | |||
1042 | if (!primary_was_enabled && primary_enabled) | 1047 | if (!primary_was_enabled && primary_enabled) |
1043 | intel_post_enable_primary(crtc); | 1048 | intel_post_enable_primary(crtc); |
1044 | } | 1049 | } |
@@ -1068,6 +1073,7 @@ intel_disable_plane(struct drm_plane *plane) | |||
1068 | struct drm_device *dev = plane->dev; | 1073 | struct drm_device *dev = plane->dev; |
1069 | struct intel_plane *intel_plane = to_intel_plane(plane); | 1074 | struct intel_plane *intel_plane = to_intel_plane(plane); |
1070 | struct intel_crtc *intel_crtc; | 1075 | struct intel_crtc *intel_crtc; |
1076 | enum pipe pipe; | ||
1071 | 1077 | ||
1072 | if (!plane->fb) | 1078 | if (!plane->fb) |
1073 | return 0; | 1079 | return 0; |
@@ -1076,6 +1082,7 @@ intel_disable_plane(struct drm_plane *plane) | |||
1076 | return -EINVAL; | 1082 | return -EINVAL; |
1077 | 1083 | ||
1078 | intel_crtc = to_intel_crtc(plane->crtc); | 1084 | intel_crtc = to_intel_crtc(plane->crtc); |
1085 | pipe = intel_crtc->pipe; | ||
1079 | 1086 | ||
1080 | if (intel_crtc->active) { | 1087 | if (intel_crtc->active) { |
1081 | bool primary_was_enabled = intel_crtc->primary_enabled; | 1088 | bool primary_was_enabled = intel_crtc->primary_enabled; |
@@ -1094,6 +1101,8 @@ intel_disable_plane(struct drm_plane *plane) | |||
1094 | 1101 | ||
1095 | mutex_lock(&dev->struct_mutex); | 1102 | mutex_lock(&dev->struct_mutex); |
1096 | intel_unpin_fb_obj(intel_plane->obj); | 1103 | intel_unpin_fb_obj(intel_plane->obj); |
1104 | i915_gem_track_fb(intel_plane->obj, NULL, | ||
1105 | INTEL_FRONTBUFFER_SPRITE(pipe)); | ||
1097 | mutex_unlock(&dev->struct_mutex); | 1106 | mutex_unlock(&dev->struct_mutex); |
1098 | 1107 | ||
1099 | intel_plane->obj = NULL; | 1108 | intel_plane->obj = NULL; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f6fef7ac069..29145df8ef64 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, | |||
231 | } | 231 | } |
232 | 232 | ||
233 | /* WaRsForcewakeWaitTC0:vlv */ | 233 | /* WaRsForcewakeWaitTC0:vlv */ |
234 | __gen6_gt_wait_for_thread_c0(dev_priv); | 234 | if (!IS_CHERRYVIEW(dev_priv->dev)) |
235 | 235 | __gen6_gt_wait_for_thread_c0(dev_priv); | |
236 | } | 236 | } |
237 | 237 | ||
238 | static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, | 238 | static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, |
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, | |||
250 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | 250 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, |
251 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | 251 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
252 | 252 | ||
253 | /* The below doubles as a POSTING_READ */ | 253 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
254 | gen6_gt_check_fifodbg(dev_priv); | 254 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
255 | 255 | if (!IS_CHERRYVIEW(dev_priv->dev)) | |
256 | gen6_gt_check_fifodbg(dev_priv); | ||
256 | } | 257 | } |
257 | 258 | ||
258 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) | 259 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) |
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg) | |||
315 | intel_runtime_pm_put(dev_priv); | 316 | intel_runtime_pm_put(dev_priv); |
316 | } | 317 | } |
317 | 318 | ||
318 | static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | 319 | void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) |
319 | { | 320 | { |
320 | struct drm_i915_private *dev_priv = dev->dev_private; | 321 | struct drm_i915_private *dev_priv = dev->dev_private; |
321 | unsigned long irqflags; | 322 | unsigned long irqflags; |
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | |||
357 | dev_priv->uncore.fifo_count = | 358 | dev_priv->uncore.fifo_count = |
358 | __raw_i915_read32(dev_priv, GTFIFOCTL) & | 359 | __raw_i915_read32(dev_priv, GTFIFOCTL) & |
359 | GT_FIFO_FREE_ENTRIES_MASK; | 360 | GT_FIFO_FREE_ENTRIES_MASK; |
360 | } else { | ||
361 | dev_priv->uncore.forcewake_count = 0; | ||
362 | dev_priv->uncore.fw_rendercount = 0; | ||
363 | dev_priv->uncore.fw_mediacount = 0; | ||
364 | } | 361 | } |
365 | 362 | ||
366 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 363 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
367 | } | 364 | } |
368 | 365 | ||
369 | void intel_uncore_early_sanitize(struct drm_device *dev) | 366 | void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) |
370 | { | 367 | { |
371 | struct drm_i915_private *dev_priv = dev->dev_private; | 368 | struct drm_i915_private *dev_priv = dev->dev_private; |
372 | 369 | ||
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) | |||
389 | __raw_i915_write32(dev_priv, GTFIFODBG, | 386 | __raw_i915_write32(dev_priv, GTFIFODBG, |
390 | __raw_i915_read32(dev_priv, GTFIFODBG)); | 387 | __raw_i915_read32(dev_priv, GTFIFODBG)); |
391 | 388 | ||
392 | intel_uncore_forcewake_reset(dev, false); | 389 | intel_uncore_forcewake_reset(dev, restore_forcewake); |
393 | } | 390 | } |
394 | 391 | ||
395 | void intel_uncore_sanitize(struct drm_device *dev) | 392 | void intel_uncore_sanitize(struct drm_device *dev) |
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) | |||
469 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | 466 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
470 | ((reg) < 0x40000 && (reg) != FORCEWAKE) | 467 | ((reg) < 0x40000 && (reg) != FORCEWAKE) |
471 | 468 | ||
472 | #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ | 469 | #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) |
473 | (((reg) >= 0x2000 && (reg) < 0x4000) ||\ | ||
474 | ((reg) >= 0x5000 && (reg) < 0x8000) ||\ | ||
475 | ((reg) >= 0xB000 && (reg) < 0x12000) ||\ | ||
476 | ((reg) >= 0x2E000 && (reg) < 0x30000)) | ||
477 | 470 | ||
478 | #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ | 471 | #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ |
479 | (((reg) >= 0x12000 && (reg) < 0x14000) ||\ | 472 | (REG_RANGE((reg), 0x2000, 0x4000) || \ |
480 | ((reg) >= 0x22000 && (reg) < 0x24000) ||\ | 473 | REG_RANGE((reg), 0x5000, 0x8000) || \ |
481 | ((reg) >= 0x30000 && (reg) < 0x40000)) | 474 | REG_RANGE((reg), 0xB000, 0x12000) || \ |
475 | REG_RANGE((reg), 0x2E000, 0x30000)) | ||
476 | |||
477 | #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ | ||
478 | (REG_RANGE((reg), 0x12000, 0x14000) || \ | ||
479 | REG_RANGE((reg), 0x22000, 0x24000) || \ | ||
480 | REG_RANGE((reg), 0x30000, 0x40000)) | ||
481 | |||
482 | #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ | ||
483 | (REG_RANGE((reg), 0x2000, 0x4000) || \ | ||
484 | REG_RANGE((reg), 0x5000, 0x8000) || \ | ||
485 | REG_RANGE((reg), 0x8300, 0x8500) || \ | ||
486 | REG_RANGE((reg), 0xB000, 0xC000) || \ | ||
487 | REG_RANGE((reg), 0xE000, 0xE800)) | ||
488 | |||
489 | #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ | ||
490 | (REG_RANGE((reg), 0x8800, 0x8900) || \ | ||
491 | REG_RANGE((reg), 0xD000, 0xD800) || \ | ||
492 | REG_RANGE((reg), 0x12000, 0x14000) || \ | ||
493 | REG_RANGE((reg), 0x1A000, 0x1C000) || \ | ||
494 | REG_RANGE((reg), 0x1E800, 0x1EA00) || \ | ||
495 | REG_RANGE((reg), 0x30000, 0x40000)) | ||
496 | |||
497 | #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ | ||
498 | (REG_RANGE((reg), 0x4000, 0x5000) || \ | ||
499 | REG_RANGE((reg), 0x8000, 0x8300) || \ | ||
500 | REG_RANGE((reg), 0x8500, 0x8600) || \ | ||
501 | REG_RANGE((reg), 0x9000, 0xB000) || \ | ||
502 | REG_RANGE((reg), 0xC000, 0xC800) || \ | ||
503 | REG_RANGE((reg), 0xF000, 0x10000) || \ | ||
504 | REG_RANGE((reg), 0x14000, 0x14400) || \ | ||
505 | REG_RANGE((reg), 0x22000, 0x24000)) | ||
482 | 506 | ||
483 | static void | 507 | static void |
484 | ilk_dummy_write(struct drm_i915_private *dev_priv) | 508 | ilk_dummy_write(struct drm_i915_private *dev_priv) |
@@ -573,7 +597,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | |||
573 | REG_READ_FOOTER; \ | 597 | REG_READ_FOOTER; \ |
574 | } | 598 | } |
575 | 599 | ||
600 | #define __chv_read(x) \ | ||
601 | static u##x \ | ||
602 | chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | ||
603 | unsigned fwengine = 0; \ | ||
604 | REG_READ_HEADER(x); \ | ||
605 | if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ | ||
606 | if (dev_priv->uncore.fw_rendercount == 0) \ | ||
607 | fwengine = FORCEWAKE_RENDER; \ | ||
608 | } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ | ||
609 | if (dev_priv->uncore.fw_mediacount == 0) \ | ||
610 | fwengine = FORCEWAKE_MEDIA; \ | ||
611 | } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ | ||
612 | if (dev_priv->uncore.fw_rendercount == 0) \ | ||
613 | fwengine |= FORCEWAKE_RENDER; \ | ||
614 | if (dev_priv->uncore.fw_mediacount == 0) \ | ||
615 | fwengine |= FORCEWAKE_MEDIA; \ | ||
616 | } \ | ||
617 | if (fwengine) \ | ||
618 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ | ||
619 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
620 | if (fwengine) \ | ||
621 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ | ||
622 | REG_READ_FOOTER; \ | ||
623 | } | ||
576 | 624 | ||
625 | __chv_read(8) | ||
626 | __chv_read(16) | ||
627 | __chv_read(32) | ||
628 | __chv_read(64) | ||
577 | __vlv_read(8) | 629 | __vlv_read(8) |
578 | __vlv_read(16) | 630 | __vlv_read(16) |
579 | __vlv_read(32) | 631 | __vlv_read(32) |
@@ -591,6 +643,7 @@ __gen4_read(16) | |||
591 | __gen4_read(32) | 643 | __gen4_read(32) |
592 | __gen4_read(64) | 644 | __gen4_read(64) |
593 | 645 | ||
646 | #undef __chv_read | ||
594 | #undef __vlv_read | 647 | #undef __vlv_read |
595 | #undef __gen6_read | 648 | #undef __gen6_read |
596 | #undef __gen5_read | 649 | #undef __gen5_read |
@@ -695,6 +748,38 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace | |||
695 | REG_WRITE_FOOTER; \ | 748 | REG_WRITE_FOOTER; \ |
696 | } | 749 | } |
697 | 750 | ||
751 | #define __chv_write(x) \ | ||
752 | static void \ | ||
753 | chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | ||
754 | unsigned fwengine = 0; \ | ||
755 | bool shadowed = is_gen8_shadowed(dev_priv, reg); \ | ||
756 | REG_WRITE_HEADER; \ | ||
757 | if (!shadowed) { \ | ||
758 | if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ | ||
759 | if (dev_priv->uncore.fw_rendercount == 0) \ | ||
760 | fwengine = FORCEWAKE_RENDER; \ | ||
761 | } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ | ||
762 | if (dev_priv->uncore.fw_mediacount == 0) \ | ||
763 | fwengine = FORCEWAKE_MEDIA; \ | ||
764 | } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ | ||
765 | if (dev_priv->uncore.fw_rendercount == 0) \ | ||
766 | fwengine |= FORCEWAKE_RENDER; \ | ||
767 | if (dev_priv->uncore.fw_mediacount == 0) \ | ||
768 | fwengine |= FORCEWAKE_MEDIA; \ | ||
769 | } \ | ||
770 | } \ | ||
771 | if (fwengine) \ | ||
772 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ | ||
773 | __raw_i915_write##x(dev_priv, reg, val); \ | ||
774 | if (fwengine) \ | ||
775 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ | ||
776 | REG_WRITE_FOOTER; \ | ||
777 | } | ||
778 | |||
779 | __chv_write(8) | ||
780 | __chv_write(16) | ||
781 | __chv_write(32) | ||
782 | __chv_write(64) | ||
698 | __gen8_write(8) | 783 | __gen8_write(8) |
699 | __gen8_write(16) | 784 | __gen8_write(16) |
700 | __gen8_write(32) | 785 | __gen8_write(32) |
@@ -716,6 +801,7 @@ __gen4_write(16) | |||
716 | __gen4_write(32) | 801 | __gen4_write(32) |
717 | __gen4_write(64) | 802 | __gen4_write(64) |
718 | 803 | ||
804 | #undef __chv_write | ||
719 | #undef __gen8_write | 805 | #undef __gen8_write |
720 | #undef __hsw_write | 806 | #undef __hsw_write |
721 | #undef __gen6_write | 807 | #undef __gen6_write |
@@ -731,7 +817,7 @@ void intel_uncore_init(struct drm_device *dev) | |||
731 | setup_timer(&dev_priv->uncore.force_wake_timer, | 817 | setup_timer(&dev_priv->uncore.force_wake_timer, |
732 | gen6_force_wake_timer, (unsigned long)dev_priv); | 818 | gen6_force_wake_timer, (unsigned long)dev_priv); |
733 | 819 | ||
734 | intel_uncore_early_sanitize(dev); | 820 | intel_uncore_early_sanitize(dev, false); |
735 | 821 | ||
736 | if (IS_VALLEYVIEW(dev)) { | 822 | if (IS_VALLEYVIEW(dev)) { |
737 | dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; | 823 | dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; |
@@ -779,14 +865,26 @@ void intel_uncore_init(struct drm_device *dev) | |||
779 | 865 | ||
780 | switch (INTEL_INFO(dev)->gen) { | 866 | switch (INTEL_INFO(dev)->gen) { |
781 | default: | 867 | default: |
782 | dev_priv->uncore.funcs.mmio_writeb = gen8_write8; | 868 | if (IS_CHERRYVIEW(dev)) { |
783 | dev_priv->uncore.funcs.mmio_writew = gen8_write16; | 869 | dev_priv->uncore.funcs.mmio_writeb = chv_write8; |
784 | dev_priv->uncore.funcs.mmio_writel = gen8_write32; | 870 | dev_priv->uncore.funcs.mmio_writew = chv_write16; |
785 | dev_priv->uncore.funcs.mmio_writeq = gen8_write64; | 871 | dev_priv->uncore.funcs.mmio_writel = chv_write32; |
786 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; | 872 | dev_priv->uncore.funcs.mmio_writeq = chv_write64; |
787 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; | 873 | dev_priv->uncore.funcs.mmio_readb = chv_read8; |
788 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; | 874 | dev_priv->uncore.funcs.mmio_readw = chv_read16; |
789 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; | 875 | dev_priv->uncore.funcs.mmio_readl = chv_read32; |
876 | dev_priv->uncore.funcs.mmio_readq = chv_read64; | ||
877 | |||
878 | } else { | ||
879 | dev_priv->uncore.funcs.mmio_writeb = gen8_write8; | ||
880 | dev_priv->uncore.funcs.mmio_writew = gen8_write16; | ||
881 | dev_priv->uncore.funcs.mmio_writel = gen8_write32; | ||
882 | dev_priv->uncore.funcs.mmio_writeq = gen8_write64; | ||
883 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; | ||
884 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; | ||
885 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; | ||
886 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; | ||
887 | } | ||
790 | break; | 888 | break; |
791 | case 7: | 889 | case 7: |
792 | case 6: | 890 | case 6: |
@@ -1053,18 +1151,16 @@ static int gen6_do_reset(struct drm_device *dev) | |||
1053 | 1151 | ||
1054 | int intel_gpu_reset(struct drm_device *dev) | 1152 | int intel_gpu_reset(struct drm_device *dev) |
1055 | { | 1153 | { |
1056 | switch (INTEL_INFO(dev)->gen) { | 1154 | if (INTEL_INFO(dev)->gen >= 6) |
1057 | case 8: | 1155 | return gen6_do_reset(dev); |
1058 | case 7: | 1156 | else if (IS_GEN5(dev)) |
1059 | case 6: return gen6_do_reset(dev); | 1157 | return ironlake_do_reset(dev); |
1060 | case 5: return ironlake_do_reset(dev); | 1158 | else if (IS_G4X(dev)) |
1061 | case 4: | 1159 | return g4x_do_reset(dev); |
1062 | if (IS_G4X(dev)) | 1160 | else if (IS_GEN4(dev)) |
1063 | return g4x_do_reset(dev); | 1161 | return i965_do_reset(dev); |
1064 | else | 1162 | else |
1065 | return i965_do_reset(dev); | 1163 | return -ENODEV; |
1066 | default: return -ENODEV; | ||
1067 | } | ||
1068 | } | 1164 | } |
1069 | 1165 | ||
1070 | void intel_uncore_check_errors(struct drm_device *dev) | 1166 | void intel_uncore_check_errors(struct drm_device *dev) |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 4da62072701c..e529b68d5037 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -331,6 +331,10 @@ struct drm_crtc { | |||
331 | struct drm_plane *primary; | 331 | struct drm_plane *primary; |
332 | struct drm_plane *cursor; | 332 | struct drm_plane *cursor; |
333 | 333 | ||
334 | /* position of cursor plane on crtc */ | ||
335 | int cursor_x; | ||
336 | int cursor_y; | ||
337 | |||
334 | /* Temporary tracking of the old fb while a modeset is ongoing. Used | 338 | /* Temporary tracking of the old fb while a modeset is ongoing. Used |
335 | * by drm_mode_set_config_internal to implement correct refcounting. */ | 339 | * by drm_mode_set_config_internal to implement correct refcounting. */ |
336 | struct drm_framebuffer *old_fb; | 340 | struct drm_framebuffer *old_fb; |
@@ -858,7 +862,7 @@ struct drm_prop_enum_list { | |||
858 | extern int drm_crtc_init_with_planes(struct drm_device *dev, | 862 | extern int drm_crtc_init_with_planes(struct drm_device *dev, |
859 | struct drm_crtc *crtc, | 863 | struct drm_crtc *crtc, |
860 | struct drm_plane *primary, | 864 | struct drm_plane *primary, |
861 | void *cursor, | 865 | struct drm_plane *cursor, |
862 | const struct drm_crtc_funcs *funcs); | 866 | const struct drm_crtc_funcs *funcs); |
863 | extern int drm_crtc_init(struct drm_device *dev, | 867 | extern int drm_crtc_init(struct drm_device *dev, |
864 | struct drm_crtc *crtc, | 868 | struct drm_crtc *crtc, |