diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 517 |
1 files changed, 174 insertions, 343 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21f939074abc..c36d17659ebe 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #include "i915_gem.h" | 61 | #include "i915_gem.h" |
62 | #include "i915_gem_gtt.h" | 62 | #include "i915_gem_gtt.h" |
63 | #include "i915_gem_render_state.h" | 63 | #include "i915_gem_render_state.h" |
64 | #include "i915_gem_request.h" | ||
64 | 65 | ||
65 | #include "intel_gvt.h" | 66 | #include "intel_gvt.h" |
66 | 67 | ||
@@ -69,7 +70,7 @@ | |||
69 | 70 | ||
70 | #define DRIVER_NAME "i915" | 71 | #define DRIVER_NAME "i915" |
71 | #define DRIVER_DESC "Intel Graphics" | 72 | #define DRIVER_DESC "Intel Graphics" |
72 | #define DRIVER_DATE "20160711" | 73 | #define DRIVER_DATE "20160808" |
73 | 74 | ||
74 | #undef WARN_ON | 75 | #undef WARN_ON |
75 | /* Many gcc seem to no see through this and fall over :( */ | 76 | /* Many gcc seem to no see through this and fall over :( */ |
@@ -401,7 +402,7 @@ struct drm_i915_file_private { | |||
401 | unsigned boosts; | 402 | unsigned boosts; |
402 | } rps; | 403 | } rps; |
403 | 404 | ||
404 | unsigned int bsd_ring; | 405 | unsigned int bsd_engine; |
405 | }; | 406 | }; |
406 | 407 | ||
407 | /* Used by dp and fdi links */ | 408 | /* Used by dp and fdi links */ |
@@ -431,8 +432,6 @@ void intel_link_compute_m_n(int bpp, int nlanes, | |||
431 | #define DRIVER_MINOR 6 | 432 | #define DRIVER_MINOR 6 |
432 | #define DRIVER_PATCHLEVEL 0 | 433 | #define DRIVER_PATCHLEVEL 0 |
433 | 434 | ||
434 | #define WATCH_LISTS 0 | ||
435 | |||
436 | struct opregion_header; | 435 | struct opregion_header; |
437 | struct opregion_acpi; | 436 | struct opregion_acpi; |
438 | struct opregion_swsci; | 437 | struct opregion_swsci; |
@@ -511,13 +510,13 @@ struct drm_i915_error_state { | |||
511 | struct intel_display_error_state *display; | 510 | struct intel_display_error_state *display; |
512 | struct drm_i915_error_object *semaphore_obj; | 511 | struct drm_i915_error_object *semaphore_obj; |
513 | 512 | ||
514 | struct drm_i915_error_ring { | 513 | struct drm_i915_error_engine { |
515 | bool valid; | 514 | int engine_id; |
516 | /* Software tracked state */ | 515 | /* Software tracked state */ |
517 | bool waiting; | 516 | bool waiting; |
518 | int num_waiters; | 517 | int num_waiters; |
519 | int hangcheck_score; | 518 | int hangcheck_score; |
520 | enum intel_ring_hangcheck_action hangcheck_action; | 519 | enum intel_engine_hangcheck_action hangcheck_action; |
521 | int num_requests; | 520 | int num_requests; |
522 | 521 | ||
523 | /* our own tracking of ring head and tail */ | 522 | /* our own tracking of ring head and tail */ |
@@ -577,7 +576,7 @@ struct drm_i915_error_state { | |||
577 | 576 | ||
578 | pid_t pid; | 577 | pid_t pid; |
579 | char comm[TASK_COMM_LEN]; | 578 | char comm[TASK_COMM_LEN]; |
580 | } ring[I915_NUM_ENGINES]; | 579 | } engine[I915_NUM_ENGINES]; |
581 | 580 | ||
582 | struct drm_i915_error_buffer { | 581 | struct drm_i915_error_buffer { |
583 | u32 size; | 582 | u32 size; |
@@ -592,7 +591,7 @@ struct drm_i915_error_state { | |||
592 | u32 dirty:1; | 591 | u32 dirty:1; |
593 | u32 purgeable:1; | 592 | u32 purgeable:1; |
594 | u32 userptr:1; | 593 | u32 userptr:1; |
595 | s32 ring:4; | 594 | s32 engine:4; |
596 | u32 cache_level:3; | 595 | u32 cache_level:3; |
597 | } **active_bo, **pinned_bo; | 596 | } **active_bo, **pinned_bo; |
598 | 597 | ||
@@ -893,7 +892,7 @@ struct i915_gem_context { | |||
893 | 892 | ||
894 | struct intel_context { | 893 | struct intel_context { |
895 | struct drm_i915_gem_object *state; | 894 | struct drm_i915_gem_object *state; |
896 | struct intel_ringbuffer *ringbuf; | 895 | struct intel_ring *ring; |
897 | struct i915_vma *lrc_vma; | 896 | struct i915_vma *lrc_vma; |
898 | uint32_t *lrc_reg_state; | 897 | uint32_t *lrc_reg_state; |
899 | u64 lrc_desc; | 898 | u64 lrc_desc; |
@@ -908,6 +907,7 @@ struct i915_gem_context { | |||
908 | struct list_head link; | 907 | struct list_head link; |
909 | 908 | ||
910 | u8 remap_slice; | 909 | u8 remap_slice; |
910 | bool closed:1; | ||
911 | }; | 911 | }; |
912 | 912 | ||
913 | enum fb_op_origin { | 913 | enum fb_op_origin { |
@@ -1173,6 +1173,7 @@ struct intel_gen6_power_mgmt { | |||
1173 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ | 1173 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ |
1174 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ | 1174 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ |
1175 | u8 min_freq; /* AKA RPn. Minimum frequency */ | 1175 | u8 min_freq; /* AKA RPn. Minimum frequency */ |
1176 | u8 boost_freq; /* Frequency to request when wait boosting */ | ||
1176 | u8 idle_freq; /* Frequency to request when we are idle */ | 1177 | u8 idle_freq; /* Frequency to request when we are idle */ |
1177 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ | 1178 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ |
1178 | u8 rp1_freq; /* "less than" RP0 power/freqency */ | 1179 | u8 rp1_freq; /* "less than" RP0 power/freqency */ |
@@ -1190,11 +1191,9 @@ struct intel_gen6_power_mgmt { | |||
1190 | bool client_boost; | 1191 | bool client_boost; |
1191 | 1192 | ||
1192 | bool enabled; | 1193 | bool enabled; |
1193 | struct delayed_work delayed_resume_work; | 1194 | struct delayed_work autoenable_work; |
1194 | unsigned boosts; | 1195 | unsigned boosts; |
1195 | 1196 | ||
1196 | struct intel_rps_client semaphores, mmioflips; | ||
1197 | |||
1198 | /* manual wa residency calculations */ | 1197 | /* manual wa residency calculations */ |
1199 | struct intel_rps_ei up_ei, down_ei; | 1198 | struct intel_rps_ei up_ei, down_ei; |
1200 | 1199 | ||
@@ -1319,7 +1318,6 @@ struct i915_gem_mm { | |||
1319 | struct notifier_block oom_notifier; | 1318 | struct notifier_block oom_notifier; |
1320 | struct notifier_block vmap_notifier; | 1319 | struct notifier_block vmap_notifier; |
1321 | struct shrinker shrinker; | 1320 | struct shrinker shrinker; |
1322 | bool shrinker_no_lock_stealing; | ||
1323 | 1321 | ||
1324 | /** LRU list of objects with fence regs on them. */ | 1322 | /** LRU list of objects with fence regs on them. */ |
1325 | struct list_head fence_list; | 1323 | struct list_head fence_list; |
@@ -1331,7 +1329,7 @@ struct i915_gem_mm { | |||
1331 | bool interruptible; | 1329 | bool interruptible; |
1332 | 1330 | ||
1333 | /* the indicator for dispatch video commands on two BSD rings */ | 1331 | /* the indicator for dispatch video commands on two BSD rings */ |
1334 | unsigned int bsd_ring_dispatch_index; | 1332 | unsigned int bsd_engine_dispatch_index; |
1335 | 1333 | ||
1336 | /** Bit 6 swizzling required for X tiling */ | 1334 | /** Bit 6 swizzling required for X tiling */ |
1337 | uint32_t bit_6_swizzle_x; | 1335 | uint32_t bit_6_swizzle_x; |
@@ -1670,7 +1668,7 @@ struct intel_pipe_crc { | |||
1670 | }; | 1668 | }; |
1671 | 1669 | ||
1672 | struct i915_frontbuffer_tracking { | 1670 | struct i915_frontbuffer_tracking { |
1673 | struct mutex lock; | 1671 | spinlock_t lock; |
1674 | 1672 | ||
1675 | /* | 1673 | /* |
1676 | * Tracking bits for delayed frontbuffer flushing du to gpu activity or | 1674 | * Tracking bits for delayed frontbuffer flushing du to gpu activity or |
@@ -1705,18 +1703,6 @@ struct i915_virtual_gpu { | |||
1705 | bool active; | 1703 | bool active; |
1706 | }; | 1704 | }; |
1707 | 1705 | ||
1708 | struct i915_execbuffer_params { | ||
1709 | struct drm_device *dev; | ||
1710 | struct drm_file *file; | ||
1711 | uint32_t dispatch_flags; | ||
1712 | uint32_t args_batch_start_offset; | ||
1713 | uint64_t batch_obj_vm_offset; | ||
1714 | struct intel_engine_cs *engine; | ||
1715 | struct drm_i915_gem_object *batch_obj; | ||
1716 | struct i915_gem_context *ctx; | ||
1717 | struct drm_i915_gem_request *request; | ||
1718 | }; | ||
1719 | |||
1720 | /* used in computing the new watermarks state */ | 1706 | /* used in computing the new watermarks state */ |
1721 | struct intel_wm_config { | 1707 | struct intel_wm_config { |
1722 | unsigned int num_pipes_active; | 1708 | unsigned int num_pipes_active; |
@@ -1769,7 +1755,7 @@ struct drm_i915_private { | |||
1769 | struct i915_gem_context *kernel_context; | 1755 | struct i915_gem_context *kernel_context; |
1770 | struct intel_engine_cs engine[I915_NUM_ENGINES]; | 1756 | struct intel_engine_cs engine[I915_NUM_ENGINES]; |
1771 | struct drm_i915_gem_object *semaphore_obj; | 1757 | struct drm_i915_gem_object *semaphore_obj; |
1772 | uint32_t last_seqno, next_seqno; | 1758 | u32 next_seqno; |
1773 | 1759 | ||
1774 | struct drm_dma_handle *status_page_dmah; | 1760 | struct drm_dma_handle *status_page_dmah; |
1775 | struct resource mch_res; | 1761 | struct resource mch_res; |
@@ -2016,12 +2002,7 @@ struct drm_i915_private { | |||
2016 | 2002 | ||
2017 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ | 2003 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
2018 | struct { | 2004 | struct { |
2019 | int (*execbuf_submit)(struct i915_execbuffer_params *params, | ||
2020 | struct drm_i915_gem_execbuffer2 *args, | ||
2021 | struct list_head *vmas); | ||
2022 | int (*init_engines)(struct drm_device *dev); | ||
2023 | void (*cleanup_engine)(struct intel_engine_cs *engine); | 2005 | void (*cleanup_engine)(struct intel_engine_cs *engine); |
2024 | void (*stop_engine)(struct intel_engine_cs *engine); | ||
2025 | 2006 | ||
2026 | /** | 2007 | /** |
2027 | * Is the GPU currently considered idle, or busy executing | 2008 | * Is the GPU currently considered idle, or busy executing |
@@ -2144,8 +2125,6 @@ struct drm_i915_gem_object_ops { | |||
2144 | */ | 2125 | */ |
2145 | #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 | 2126 | #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 |
2146 | #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 | 2127 | #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 |
2147 | #define INTEL_FRONTBUFFER_BITS \ | ||
2148 | (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) | ||
2149 | #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ | 2128 | #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ |
2150 | (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) | 2129 | (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
2151 | #define INTEL_FRONTBUFFER_CURSOR(pipe) \ | 2130 | #define INTEL_FRONTBUFFER_CURSOR(pipe) \ |
@@ -2169,18 +2148,21 @@ struct drm_i915_gem_object { | |||
2169 | struct drm_mm_node *stolen; | 2148 | struct drm_mm_node *stolen; |
2170 | struct list_head global_list; | 2149 | struct list_head global_list; |
2171 | 2150 | ||
2172 | struct list_head engine_list[I915_NUM_ENGINES]; | ||
2173 | /** Used in execbuf to temporarily hold a ref */ | 2151 | /** Used in execbuf to temporarily hold a ref */ |
2174 | struct list_head obj_exec_link; | 2152 | struct list_head obj_exec_link; |
2175 | 2153 | ||
2176 | struct list_head batch_pool_link; | 2154 | struct list_head batch_pool_link; |
2177 | 2155 | ||
2156 | unsigned long flags; | ||
2178 | /** | 2157 | /** |
2179 | * This is set if the object is on the active lists (has pending | 2158 | * This is set if the object is on the active lists (has pending |
2180 | * rendering and so a non-zero seqno), and is not set if it i s on | 2159 | * rendering and so a non-zero seqno), and is not set if it i s on |
2181 | * inactive (ready to be unbound) list. | 2160 | * inactive (ready to be unbound) list. |
2182 | */ | 2161 | */ |
2183 | unsigned int active:I915_NUM_ENGINES; | 2162 | #define I915_BO_ACTIVE_SHIFT 0 |
2163 | #define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1) | ||
2164 | #define __I915_BO_ACTIVE(bo) \ | ||
2165 | ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK) | ||
2184 | 2166 | ||
2185 | /** | 2167 | /** |
2186 | * This is set if the object has been written to since last bound | 2168 | * This is set if the object has been written to since last bound |
@@ -2201,10 +2183,6 @@ struct drm_i915_gem_object { | |||
2201 | unsigned int madv:2; | 2183 | unsigned int madv:2; |
2202 | 2184 | ||
2203 | /** | 2185 | /** |
2204 | * Current tiling mode for the object. | ||
2205 | */ | ||
2206 | unsigned int tiling_mode:2; | ||
2207 | /** | ||
2208 | * Whether the tiling parameters for the currently associated fence | 2186 | * Whether the tiling parameters for the currently associated fence |
2209 | * register have changed. Note that for the purposes of tracking | 2187 | * register have changed. Note that for the purposes of tracking |
2210 | * tiling changes we also treat the unfenced register, the register | 2188 | * tiling changes we also treat the unfenced register, the register |
@@ -2234,9 +2212,17 @@ struct drm_i915_gem_object { | |||
2234 | unsigned int cache_level:3; | 2212 | unsigned int cache_level:3; |
2235 | unsigned int cache_dirty:1; | 2213 | unsigned int cache_dirty:1; |
2236 | 2214 | ||
2237 | unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; | 2215 | atomic_t frontbuffer_bits; |
2216 | |||
2217 | /** Current tiling stride for the object, if it's tiled. */ | ||
2218 | unsigned int tiling_and_stride; | ||
2219 | #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ | ||
2220 | #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) | ||
2221 | #define STRIDE_MASK (~TILING_MASK) | ||
2238 | 2222 | ||
2239 | unsigned int has_wc_mmap; | 2223 | unsigned int has_wc_mmap; |
2224 | /** Count of VMA actually bound by this object */ | ||
2225 | unsigned int bind_count; | ||
2240 | unsigned int pin_display; | 2226 | unsigned int pin_display; |
2241 | 2227 | ||
2242 | struct sg_table *pages; | 2228 | struct sg_table *pages; |
@@ -2256,14 +2242,10 @@ struct drm_i915_gem_object { | |||
2256 | * requests on one ring where the write request is older than the | 2242 | * requests on one ring where the write request is older than the |
2257 | * read request. This allows for the CPU to read from an active | 2243 | * read request. This allows for the CPU to read from an active |
2258 | * buffer by only waiting for the write to complete. | 2244 | * buffer by only waiting for the write to complete. |
2259 | * */ | 2245 | */ |
2260 | struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; | 2246 | struct i915_gem_active last_read[I915_NUM_ENGINES]; |
2261 | struct drm_i915_gem_request *last_write_req; | 2247 | struct i915_gem_active last_write; |
2262 | /** Breadcrumb of last fenced GPU access to the buffer. */ | 2248 | struct i915_gem_active last_fence; |
2263 | struct drm_i915_gem_request *last_fenced_req; | ||
2264 | |||
2265 | /** Current tiling stride for the object, if it's tiled. */ | ||
2266 | uint32_t stride; | ||
2267 | 2249 | ||
2268 | /** References from framebuffers, locks out tiling changes. */ | 2250 | /** References from framebuffers, locks out tiling changes. */ |
2269 | unsigned long framebuffer_references; | 2251 | unsigned long framebuffer_references; |
@@ -2287,7 +2269,56 @@ struct drm_i915_gem_object { | |||
2287 | } userptr; | 2269 | } userptr; |
2288 | }; | 2270 | }; |
2289 | }; | 2271 | }; |
2290 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) | 2272 | |
2273 | static inline struct drm_i915_gem_object * | ||
2274 | to_intel_bo(struct drm_gem_object *gem) | ||
2275 | { | ||
2276 | /* Assert that to_intel_bo(NULL) == NULL */ | ||
2277 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); | ||
2278 | |||
2279 | return container_of(gem, struct drm_i915_gem_object, base); | ||
2280 | } | ||
2281 | |||
2282 | static inline struct drm_i915_gem_object * | ||
2283 | i915_gem_object_lookup(struct drm_file *file, u32 handle) | ||
2284 | { | ||
2285 | return to_intel_bo(drm_gem_object_lookup(file, handle)); | ||
2286 | } | ||
2287 | |||
2288 | __deprecated | ||
2289 | extern struct drm_gem_object * | ||
2290 | drm_gem_object_lookup(struct drm_file *file, u32 handle); | ||
2291 | |||
2292 | __attribute__((nonnull)) | ||
2293 | static inline struct drm_i915_gem_object * | ||
2294 | i915_gem_object_get(struct drm_i915_gem_object *obj) | ||
2295 | { | ||
2296 | drm_gem_object_reference(&obj->base); | ||
2297 | return obj; | ||
2298 | } | ||
2299 | |||
2300 | __deprecated | ||
2301 | extern void drm_gem_object_reference(struct drm_gem_object *); | ||
2302 | |||
2303 | __attribute__((nonnull)) | ||
2304 | static inline void | ||
2305 | i915_gem_object_put(struct drm_i915_gem_object *obj) | ||
2306 | { | ||
2307 | drm_gem_object_unreference(&obj->base); | ||
2308 | } | ||
2309 | |||
2310 | __deprecated | ||
2311 | extern void drm_gem_object_unreference(struct drm_gem_object *); | ||
2312 | |||
2313 | __attribute__((nonnull)) | ||
2314 | static inline void | ||
2315 | i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj) | ||
2316 | { | ||
2317 | drm_gem_object_unreference_unlocked(&obj->base); | ||
2318 | } | ||
2319 | |||
2320 | __deprecated | ||
2321 | extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); | ||
2291 | 2322 | ||
2292 | static inline bool | 2323 | static inline bool |
2293 | i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | 2324 | i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) |
@@ -2295,6 +2326,55 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | |||
2295 | return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; | 2326 | return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; |
2296 | } | 2327 | } |
2297 | 2328 | ||
2329 | static inline unsigned long | ||
2330 | i915_gem_object_get_active(const struct drm_i915_gem_object *obj) | ||
2331 | { | ||
2332 | return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK; | ||
2333 | } | ||
2334 | |||
2335 | static inline bool | ||
2336 | i915_gem_object_is_active(const struct drm_i915_gem_object *obj) | ||
2337 | { | ||
2338 | return i915_gem_object_get_active(obj); | ||
2339 | } | ||
2340 | |||
2341 | static inline void | ||
2342 | i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine) | ||
2343 | { | ||
2344 | obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT); | ||
2345 | } | ||
2346 | |||
2347 | static inline void | ||
2348 | i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine) | ||
2349 | { | ||
2350 | obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT); | ||
2351 | } | ||
2352 | |||
2353 | static inline bool | ||
2354 | i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj, | ||
2355 | int engine) | ||
2356 | { | ||
2357 | return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT); | ||
2358 | } | ||
2359 | |||
2360 | static inline unsigned int | ||
2361 | i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) | ||
2362 | { | ||
2363 | return obj->tiling_and_stride & TILING_MASK; | ||
2364 | } | ||
2365 | |||
2366 | static inline bool | ||
2367 | i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) | ||
2368 | { | ||
2369 | return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; | ||
2370 | } | ||
2371 | |||
2372 | static inline unsigned int | ||
2373 | i915_gem_object_get_stride(struct drm_i915_gem_object *obj) | ||
2374 | { | ||
2375 | return obj->tiling_and_stride & STRIDE_MASK; | ||
2376 | } | ||
2377 | |||
2298 | /* | 2378 | /* |
2299 | * Optimised SGL iterator for GEM objects | 2379 | * Optimised SGL iterator for GEM objects |
2300 | */ | 2380 | */ |
@@ -2365,171 +2445,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) | |||
2365 | (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ | 2445 | (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ |
2366 | ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) | 2446 | ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) |
2367 | 2447 | ||
2368 | /** | ||
2369 | * Request queue structure. | ||
2370 | * | ||
2371 | * The request queue allows us to note sequence numbers that have been emitted | ||
2372 | * and may be associated with active buffers to be retired. | ||
2373 | * | ||
2374 | * By keeping this list, we can avoid having to do questionable sequence | ||
2375 | * number comparisons on buffer last_read|write_seqno. It also allows an | ||
2376 | * emission time to be associated with the request for tracking how far ahead | ||
2377 | * of the GPU the submission is. | ||
2378 | * | ||
2379 | * The requests are reference counted, so upon creation they should have an | ||
2380 | * initial reference taken using kref_init | ||
2381 | */ | ||
2382 | struct drm_i915_gem_request { | ||
2383 | struct kref ref; | ||
2384 | |||
2385 | /** On Which ring this request was generated */ | ||
2386 | struct drm_i915_private *i915; | ||
2387 | struct intel_engine_cs *engine; | ||
2388 | struct intel_signal_node signaling; | ||
2389 | |||
2390 | /** GEM sequence number associated with the previous request, | ||
2391 | * when the HWS breadcrumb is equal to this the GPU is processing | ||
2392 | * this request. | ||
2393 | */ | ||
2394 | u32 previous_seqno; | ||
2395 | |||
2396 | /** GEM sequence number associated with this request, | ||
2397 | * when the HWS breadcrumb is equal or greater than this the GPU | ||
2398 | * has finished processing this request. | ||
2399 | */ | ||
2400 | u32 seqno; | ||
2401 | |||
2402 | /** Position in the ringbuffer of the start of the request */ | ||
2403 | u32 head; | ||
2404 | |||
2405 | /** | ||
2406 | * Position in the ringbuffer of the start of the postfix. | ||
2407 | * This is required to calculate the maximum available ringbuffer | ||
2408 | * space without overwriting the postfix. | ||
2409 | */ | ||
2410 | u32 postfix; | ||
2411 | |||
2412 | /** Position in the ringbuffer of the end of the whole request */ | ||
2413 | u32 tail; | ||
2414 | |||
2415 | /** Preallocate space in the ringbuffer for the emitting the request */ | ||
2416 | u32 reserved_space; | ||
2417 | |||
2418 | /** | ||
2419 | * Context and ring buffer related to this request | ||
2420 | * Contexts are refcounted, so when this request is associated with a | ||
2421 | * context, we must increment the context's refcount, to guarantee that | ||
2422 | * it persists while any request is linked to it. Requests themselves | ||
2423 | * are also refcounted, so the request will only be freed when the last | ||
2424 | * reference to it is dismissed, and the code in | ||
2425 | * i915_gem_request_free() will then decrement the refcount on the | ||
2426 | * context. | ||
2427 | */ | ||
2428 | struct i915_gem_context *ctx; | ||
2429 | struct intel_ringbuffer *ringbuf; | ||
2430 | |||
2431 | /** | ||
2432 | * Context related to the previous request. | ||
2433 | * As the contexts are accessed by the hardware until the switch is | ||
2434 | * completed to a new context, the hardware may still be writing | ||
2435 | * to the context object after the breadcrumb is visible. We must | ||
2436 | * not unpin/unbind/prune that object whilst still active and so | ||
2437 | * we keep the previous context pinned until the following (this) | ||
2438 | * request is retired. | ||
2439 | */ | ||
2440 | struct i915_gem_context *previous_context; | ||
2441 | |||
2442 | /** Batch buffer related to this request if any (used for | ||
2443 | error state dump only) */ | ||
2444 | struct drm_i915_gem_object *batch_obj; | ||
2445 | |||
2446 | /** Time at which this request was emitted, in jiffies. */ | ||
2447 | unsigned long emitted_jiffies; | ||
2448 | |||
2449 | /** global list entry for this request */ | ||
2450 | struct list_head list; | ||
2451 | |||
2452 | struct drm_i915_file_private *file_priv; | ||
2453 | /** file_priv list entry for this request */ | ||
2454 | struct list_head client_list; | ||
2455 | |||
2456 | /** process identifier submitting this request */ | ||
2457 | struct pid *pid; | ||
2458 | |||
2459 | /** | ||
2460 | * The ELSP only accepts two elements at a time, so we queue | ||
2461 | * context/tail pairs on a given queue (ring->execlist_queue) until the | ||
2462 | * hardware is available. The queue serves a double purpose: we also use | ||
2463 | * it to keep track of the up to 2 contexts currently in the hardware | ||
2464 | * (usually one in execution and the other queued up by the GPU): We | ||
2465 | * only remove elements from the head of the queue when the hardware | ||
2466 | * informs us that an element has been completed. | ||
2467 | * | ||
2468 | * All accesses to the queue are mediated by a spinlock | ||
2469 | * (ring->execlist_lock). | ||
2470 | */ | ||
2471 | |||
2472 | /** Execlist link in the submission queue.*/ | ||
2473 | struct list_head execlist_link; | ||
2474 | |||
2475 | /** Execlists no. of times this request has been sent to the ELSP */ | ||
2476 | int elsp_submitted; | ||
2477 | |||
2478 | /** Execlists context hardware id. */ | ||
2479 | unsigned ctx_hw_id; | ||
2480 | }; | ||
2481 | |||
2482 | struct drm_i915_gem_request * __must_check | ||
2483 | i915_gem_request_alloc(struct intel_engine_cs *engine, | ||
2484 | struct i915_gem_context *ctx); | ||
2485 | void i915_gem_request_free(struct kref *req_ref); | ||
2486 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, | ||
2487 | struct drm_file *file); | ||
2488 | |||
2489 | static inline uint32_t | ||
2490 | i915_gem_request_get_seqno(struct drm_i915_gem_request *req) | ||
2491 | { | ||
2492 | return req ? req->seqno : 0; | ||
2493 | } | ||
2494 | |||
2495 | static inline struct intel_engine_cs * | ||
2496 | i915_gem_request_get_engine(struct drm_i915_gem_request *req) | ||
2497 | { | ||
2498 | return req ? req->engine : NULL; | ||
2499 | } | ||
2500 | |||
2501 | static inline struct drm_i915_gem_request * | ||
2502 | i915_gem_request_reference(struct drm_i915_gem_request *req) | ||
2503 | { | ||
2504 | if (req) | ||
2505 | kref_get(&req->ref); | ||
2506 | return req; | ||
2507 | } | ||
2508 | |||
2509 | static inline void | ||
2510 | i915_gem_request_unreference(struct drm_i915_gem_request *req) | ||
2511 | { | ||
2512 | kref_put(&req->ref, i915_gem_request_free); | ||
2513 | } | ||
2514 | |||
2515 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, | ||
2516 | struct drm_i915_gem_request *src) | ||
2517 | { | ||
2518 | if (src) | ||
2519 | i915_gem_request_reference(src); | ||
2520 | |||
2521 | if (*pdst) | ||
2522 | i915_gem_request_unreference(*pdst); | ||
2523 | |||
2524 | *pdst = src; | ||
2525 | } | ||
2526 | |||
2527 | /* | ||
2528 | * XXX: i915_gem_request_completed should be here but currently needs the | ||
2529 | * definition of i915_seqno_passed() which is below. It will be moved in | ||
2530 | * a later patch when the call to i915_seqno_passed() is obsoleted... | ||
2531 | */ | ||
2532 | |||
2533 | /* | 2448 | /* |
2534 | * A command that requires special handling by the command parser. | 2449 | * A command that requires special handling by the command parser. |
2535 | */ | 2450 | */ |
@@ -2617,8 +2532,9 @@ struct drm_i915_cmd_descriptor { | |||
2617 | /* | 2532 | /* |
2618 | * A table of commands requiring special handling by the command parser. | 2533 | * A table of commands requiring special handling by the command parser. |
2619 | * | 2534 | * |
2620 | * Each ring has an array of tables. Each table consists of an array of command | 2535 | * Each engine has an array of tables. Each table consists of an array of |
2621 | * descriptors, which must be sorted with command opcodes in ascending order. | 2536 | * command descriptors, which must be sorted with command opcodes in |
2537 | * ascending order. | ||
2622 | */ | 2538 | */ |
2623 | struct drm_i915_cmd_table { | 2539 | struct drm_i915_cmd_table { |
2624 | const struct drm_i915_cmd_descriptor *table; | 2540 | const struct drm_i915_cmd_descriptor *table; |
@@ -2932,6 +2848,8 @@ extern int i915_resume_switcheroo(struct drm_device *dev); | |||
2932 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, | 2848 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
2933 | int enable_ppgtt); | 2849 | int enable_ppgtt); |
2934 | 2850 | ||
2851 | bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); | ||
2852 | |||
2935 | /* i915_drv.c */ | 2853 | /* i915_drv.c */ |
2936 | void __printf(3, 4) | 2854 | void __printf(3, 4) |
2937 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | 2855 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, |
@@ -3107,11 +3025,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
3107 | struct drm_file *file_priv); | 3025 | struct drm_file *file_priv); |
3108 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 3026 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
3109 | struct drm_file *file_priv); | 3027 | struct drm_file *file_priv); |
3110 | void i915_gem_execbuffer_move_to_active(struct list_head *vmas, | ||
3111 | struct drm_i915_gem_request *req); | ||
3112 | int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, | ||
3113 | struct drm_i915_gem_execbuffer2 *args, | ||
3114 | struct list_head *vmas); | ||
3115 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | 3028 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
3116 | struct drm_file *file_priv); | 3029 | struct drm_file *file_priv); |
3117 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | 3030 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
@@ -3150,40 +3063,24 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, | |||
3150 | size_t size); | 3063 | size_t size); |
3151 | struct drm_i915_gem_object *i915_gem_object_create_from_data( | 3064 | struct drm_i915_gem_object *i915_gem_object_create_from_data( |
3152 | struct drm_device *dev, const void *data, size_t size); | 3065 | struct drm_device *dev, const void *data, size_t size); |
3066 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); | ||
3153 | void i915_gem_free_object(struct drm_gem_object *obj); | 3067 | void i915_gem_free_object(struct drm_gem_object *obj); |
3154 | void i915_gem_vma_destroy(struct i915_vma *vma); | 3068 | |
3155 | |||
3156 | /* Flags used by pin/bind&friends. */ | ||
3157 | #define PIN_MAPPABLE (1<<0) | ||
3158 | #define PIN_NONBLOCK (1<<1) | ||
3159 | #define PIN_GLOBAL (1<<2) | ||
3160 | #define PIN_OFFSET_BIAS (1<<3) | ||
3161 | #define PIN_USER (1<<4) | ||
3162 | #define PIN_UPDATE (1<<5) | ||
3163 | #define PIN_ZONE_4G (1<<6) | ||
3164 | #define PIN_HIGH (1<<7) | ||
3165 | #define PIN_OFFSET_FIXED (1<<8) | ||
3166 | #define PIN_OFFSET_MASK (~4095) | ||
3167 | int __must_check | ||
3168 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | ||
3169 | struct i915_address_space *vm, | ||
3170 | uint32_t alignment, | ||
3171 | uint64_t flags); | ||
3172 | int __must_check | 3069 | int __must_check |
3173 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | 3070 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
3174 | const struct i915_ggtt_view *view, | 3071 | const struct i915_ggtt_view *view, |
3175 | uint32_t alignment, | 3072 | u64 size, |
3176 | uint64_t flags); | 3073 | u64 alignment, |
3074 | u64 flags); | ||
3177 | 3075 | ||
3178 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | 3076 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, |
3179 | u32 flags); | 3077 | u32 flags); |
3180 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); | 3078 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); |
3181 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 3079 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
3182 | /* | 3080 | void i915_vma_close(struct i915_vma *vma); |
3183 | * BEWARE: Do not use the function below unless you can _absolutely_ | 3081 | void i915_vma_destroy(struct i915_vma *vma); |
3184 | * _guarantee_ VMA in question is _not in use_ anywhere. | 3082 | |
3185 | */ | 3083 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
3186 | int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); | ||
3187 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | 3084 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
3188 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); | 3085 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
3189 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | 3086 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
@@ -3285,10 +3182,10 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) | |||
3285 | 3182 | ||
3286 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 3183 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
3287 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, | 3184 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
3288 | struct intel_engine_cs *to, | 3185 | struct drm_i915_gem_request *to); |
3289 | struct drm_i915_gem_request **to_req); | ||
3290 | void i915_vma_move_to_active(struct i915_vma *vma, | 3186 | void i915_vma_move_to_active(struct i915_vma *vma, |
3291 | struct drm_i915_gem_request *req); | 3187 | struct drm_i915_gem_request *req, |
3188 | unsigned int flags); | ||
3292 | int i915_gem_dumb_create(struct drm_file *file_priv, | 3189 | int i915_gem_dumb_create(struct drm_file *file_priv, |
3293 | struct drm_device *dev, | 3190 | struct drm_device *dev, |
3294 | struct drm_mode_create_dumb *args); | 3191 | struct drm_mode_create_dumb *args); |
@@ -3299,44 +3196,12 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
3299 | struct drm_i915_gem_object *new, | 3196 | struct drm_i915_gem_object *new, |
3300 | unsigned frontbuffer_bits); | 3197 | unsigned frontbuffer_bits); |
3301 | 3198 | ||
3302 | /** | ||
3303 | * Returns true if seq1 is later than seq2. | ||
3304 | */ | ||
3305 | static inline bool | ||
3306 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | ||
3307 | { | ||
3308 | return (int32_t)(seq1 - seq2) >= 0; | ||
3309 | } | ||
3310 | |||
3311 | static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req) | ||
3312 | { | ||
3313 | return i915_seqno_passed(intel_engine_get_seqno(req->engine), | ||
3314 | req->previous_seqno); | ||
3315 | } | ||
3316 | |||
3317 | static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req) | ||
3318 | { | ||
3319 | return i915_seqno_passed(intel_engine_get_seqno(req->engine), | ||
3320 | req->seqno); | ||
3321 | } | ||
3322 | |||
3323 | bool __i915_spin_request(const struct drm_i915_gem_request *request, | ||
3324 | int state, unsigned long timeout_us); | ||
3325 | static inline bool i915_spin_request(const struct drm_i915_gem_request *request, | ||
3326 | int state, unsigned long timeout_us) | ||
3327 | { | ||
3328 | return (i915_gem_request_started(request) && | ||
3329 | __i915_spin_request(request, state, timeout_us)); | ||
3330 | } | ||
3331 | |||
3332 | int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); | ||
3333 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); | 3199 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
3334 | 3200 | ||
3335 | struct drm_i915_gem_request * | 3201 | struct drm_i915_gem_request * |
3336 | i915_gem_find_active_request(struct intel_engine_cs *engine); | 3202 | i915_gem_find_active_request(struct intel_engine_cs *engine); |
3337 | 3203 | ||
3338 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv); | 3204 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv); |
3339 | void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); | ||
3340 | 3205 | ||
3341 | static inline u32 i915_reset_counter(struct i915_gpu_error *error) | 3206 | static inline u32 i915_reset_counter(struct i915_gpu_error *error) |
3342 | { | 3207 | { |
@@ -3381,24 +3246,13 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) | |||
3381 | void i915_gem_reset(struct drm_device *dev); | 3246 | void i915_gem_reset(struct drm_device *dev); |
3382 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); | 3247 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
3383 | int __must_check i915_gem_init(struct drm_device *dev); | 3248 | int __must_check i915_gem_init(struct drm_device *dev); |
3384 | int i915_gem_init_engines(struct drm_device *dev); | ||
3385 | int __must_check i915_gem_init_hw(struct drm_device *dev); | 3249 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
3386 | void i915_gem_init_swizzling(struct drm_device *dev); | 3250 | void i915_gem_init_swizzling(struct drm_device *dev); |
3387 | void i915_gem_cleanup_engines(struct drm_device *dev); | 3251 | void i915_gem_cleanup_engines(struct drm_device *dev); |
3388 | int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv); | 3252 | int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, |
3253 | bool interruptible); | ||
3389 | int __must_check i915_gem_suspend(struct drm_device *dev); | 3254 | int __must_check i915_gem_suspend(struct drm_device *dev); |
3390 | void __i915_add_request(struct drm_i915_gem_request *req, | 3255 | void i915_gem_resume(struct drm_device *dev); |
3391 | struct drm_i915_gem_object *batch_obj, | ||
3392 | bool flush_caches); | ||
3393 | #define i915_add_request(req) \ | ||
3394 | __i915_add_request(req, NULL, true) | ||
3395 | #define i915_add_request_no_flush(req) \ | ||
3396 | __i915_add_request(req, NULL, false) | ||
3397 | int __i915_wait_request(struct drm_i915_gem_request *req, | ||
3398 | bool interruptible, | ||
3399 | s64 *timeout, | ||
3400 | struct intel_rps_client *rps); | ||
3401 | int __must_check i915_wait_request(struct drm_i915_gem_request *req); | ||
3402 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 3256 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
3403 | int __must_check | 3257 | int __must_check |
3404 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 3258 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
@@ -3419,11 +3273,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | |||
3419 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); | 3273 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); |
3420 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 3274 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
3421 | 3275 | ||
3422 | uint32_t | 3276 | u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, |
3423 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); | 3277 | int tiling_mode); |
3424 | uint32_t | 3278 | u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, |
3425 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, | 3279 | int tiling_mode, bool fenced); |
3426 | int tiling_mode, bool fenced); | ||
3427 | 3280 | ||
3428 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | 3281 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
3429 | enum i915_cache_level cache_level); | 3282 | enum i915_cache_level cache_level); |
@@ -3444,7 +3297,6 @@ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) | |||
3444 | return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); | 3297 | return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); |
3445 | } | 3298 | } |
3446 | 3299 | ||
3447 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); | ||
3448 | bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, | 3300 | bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, |
3449 | const struct i915_ggtt_view *view); | 3301 | const struct i915_ggtt_view *view); |
3450 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, | 3302 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
@@ -3478,7 +3330,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) | |||
3478 | return container_of(vm, struct i915_hw_ppgtt, base); | 3330 | return container_of(vm, struct i915_hw_ppgtt, base); |
3479 | } | 3331 | } |
3480 | 3332 | ||
3481 | |||
3482 | static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) | 3333 | static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) |
3483 | { | 3334 | { |
3484 | return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); | 3335 | return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); |
@@ -3487,18 +3338,6 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) | |||
3487 | unsigned long | 3338 | unsigned long |
3488 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); | 3339 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); |
3489 | 3340 | ||
3490 | static inline int __must_check | ||
3491 | i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, | ||
3492 | uint32_t alignment, | ||
3493 | unsigned flags) | ||
3494 | { | ||
3495 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | ||
3496 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | ||
3497 | |||
3498 | return i915_gem_object_pin(obj, &ggtt->base, | ||
3499 | alignment, flags | PIN_GLOBAL); | ||
3500 | } | ||
3501 | |||
3502 | void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, | 3341 | void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, |
3503 | const struct i915_ggtt_view *view); | 3342 | const struct i915_ggtt_view *view); |
3504 | static inline void | 3343 | static inline void |
@@ -3528,6 +3367,7 @@ void i915_gem_context_reset(struct drm_device *dev); | |||
3528 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); | 3367 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); |
3529 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); | 3368 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
3530 | int i915_switch_context(struct drm_i915_gem_request *req); | 3369 | int i915_switch_context(struct drm_i915_gem_request *req); |
3370 | int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); | ||
3531 | void i915_gem_context_free(struct kref *ctx_ref); | 3371 | void i915_gem_context_free(struct kref *ctx_ref); |
3532 | struct drm_i915_gem_object * | 3372 | struct drm_i915_gem_object * |
3533 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); | 3373 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); |
@@ -3548,12 +3388,14 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) | |||
3548 | return ctx; | 3388 | return ctx; |
3549 | } | 3389 | } |
3550 | 3390 | ||
3551 | static inline void i915_gem_context_reference(struct i915_gem_context *ctx) | 3391 | static inline struct i915_gem_context * |
3392 | i915_gem_context_get(struct i915_gem_context *ctx) | ||
3552 | { | 3393 | { |
3553 | kref_get(&ctx->ref); | 3394 | kref_get(&ctx->ref); |
3395 | return ctx; | ||
3554 | } | 3396 | } |
3555 | 3397 | ||
3556 | static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) | 3398 | static inline void i915_gem_context_put(struct i915_gem_context *ctx) |
3557 | { | 3399 | { |
3558 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); | 3400 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); |
3559 | kref_put(&ctx->ref, i915_gem_context_free); | 3401 | kref_put(&ctx->ref, i915_gem_context_free); |
@@ -3576,13 +3418,10 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, | |||
3576 | struct drm_file *file); | 3418 | struct drm_file *file); |
3577 | 3419 | ||
3578 | /* i915_gem_evict.c */ | 3420 | /* i915_gem_evict.c */ |
3579 | int __must_check i915_gem_evict_something(struct drm_device *dev, | 3421 | int __must_check i915_gem_evict_something(struct i915_address_space *vm, |
3580 | struct i915_address_space *vm, | 3422 | u64 min_size, u64 alignment, |
3581 | int min_size, | ||
3582 | unsigned alignment, | ||
3583 | unsigned cache_level, | 3423 | unsigned cache_level, |
3584 | unsigned long start, | 3424 | u64 start, u64 end, |
3585 | unsigned long end, | ||
3586 | unsigned flags); | 3425 | unsigned flags); |
3587 | int __must_check i915_gem_evict_for_vma(struct i915_vma *target); | 3426 | int __must_check i915_gem_evict_for_vma(struct i915_vma *target); |
3588 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | 3427 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
@@ -3634,16 +3473,9 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec | |||
3634 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 3473 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
3635 | 3474 | ||
3636 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 3475 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
3637 | obj->tiling_mode != I915_TILING_NONE; | 3476 | i915_gem_object_is_tiled(obj); |
3638 | } | 3477 | } |
3639 | 3478 | ||
3640 | /* i915_gem_debug.c */ | ||
3641 | #if WATCH_LISTS | ||
3642 | int i915_verify_lists(struct drm_device *dev); | ||
3643 | #else | ||
3644 | #define i915_verify_lists(dev) 0 | ||
3645 | #endif | ||
3646 | |||
3647 | /* i915_debugfs.c */ | 3479 | /* i915_debugfs.c */ |
3648 | #ifdef CONFIG_DEBUG_FS | 3480 | #ifdef CONFIG_DEBUG_FS |
3649 | int i915_debugfs_register(struct drm_i915_private *dev_priv); | 3481 | int i915_debugfs_register(struct drm_i915_private *dev_priv); |
@@ -3684,15 +3516,15 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); | |||
3684 | 3516 | ||
3685 | /* i915_cmd_parser.c */ | 3517 | /* i915_cmd_parser.c */ |
3686 | int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); | 3518 | int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); |
3687 | int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); | 3519 | int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); |
3688 | void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); | 3520 | void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); |
3689 | bool i915_needs_cmd_parser(struct intel_engine_cs *engine); | 3521 | bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); |
3690 | int i915_parse_cmds(struct intel_engine_cs *engine, | 3522 | int intel_engine_cmd_parser(struct intel_engine_cs *engine, |
3691 | struct drm_i915_gem_object *batch_obj, | 3523 | struct drm_i915_gem_object *batch_obj, |
3692 | struct drm_i915_gem_object *shadow_batch_obj, | 3524 | struct drm_i915_gem_object *shadow_batch_obj, |
3693 | u32 batch_start_offset, | 3525 | u32 batch_start_offset, |
3694 | u32 batch_len, | 3526 | u32 batch_len, |
3695 | bool is_master); | 3527 | bool is_master); |
3696 | 3528 | ||
3697 | /* i915_suspend.c */ | 3529 | /* i915_suspend.c */ |
3698 | extern int i915_save_state(struct drm_device *dev); | 3530 | extern int i915_save_state(struct drm_device *dev); |
@@ -3800,7 +3632,6 @@ extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); | |||
3800 | extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, | 3632 | extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, |
3801 | bool enable); | 3633 | bool enable); |
3802 | 3634 | ||
3803 | extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); | ||
3804 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, | 3635 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
3805 | struct drm_file *file); | 3636 | struct drm_file *file); |
3806 | 3637 | ||