diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 645 |
1 files changed, 336 insertions, 309 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 28f91df2604d..4a9faea626db 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) | |||
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | int | 162 | int |
| 163 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | ||
| 164 | struct drm_file *file) | ||
| 165 | { | ||
| 166 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 167 | struct drm_i915_gem_init *args = data; | ||
| 168 | |||
| 169 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 170 | return -ENODEV; | ||
| 171 | |||
| 172 | if (args->gtt_start >= args->gtt_end || | ||
| 173 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) | ||
| 174 | return -EINVAL; | ||
| 175 | |||
| 176 | /* GEM with user mode setting was never supported on ilk and later. */ | ||
| 177 | if (INTEL_INFO(dev)->gen >= 5) | ||
| 178 | return -ENODEV; | ||
| 179 | |||
| 180 | mutex_lock(&dev->struct_mutex); | ||
| 181 | i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, | ||
| 182 | args->gtt_end); | ||
| 183 | dev_priv->gtt.mappable_end = args->gtt_end; | ||
| 184 | mutex_unlock(&dev->struct_mutex); | ||
| 185 | |||
| 186 | return 0; | ||
| 187 | } | ||
| 188 | |||
| 189 | int | ||
| 190 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 163 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
| 191 | struct drm_file *file) | 164 | struct drm_file *file) |
| 192 | { | 165 | { |
| @@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
| 208 | return 0; | 181 | return 0; |
| 209 | } | 182 | } |
| 210 | 183 | ||
| 211 | static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) | 184 | static int |
| 185 | i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) | ||
| 212 | { | 186 | { |
| 213 | drm_dma_handle_t *phys = obj->phys_handle; | 187 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; |
| 188 | char *vaddr = obj->phys_handle->vaddr; | ||
| 189 | struct sg_table *st; | ||
| 190 | struct scatterlist *sg; | ||
| 191 | int i; | ||
| 214 | 192 | ||
| 215 | if (!phys) | 193 | if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) |
| 216 | return; | 194 | return -EINVAL; |
| 217 | 195 | ||
| 218 | if (obj->madv == I915_MADV_WILLNEED) { | 196 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { |
| 197 | struct page *page; | ||
| 198 | char *src; | ||
| 199 | |||
| 200 | page = shmem_read_mapping_page(mapping, i); | ||
| 201 | if (IS_ERR(page)) | ||
| 202 | return PTR_ERR(page); | ||
| 203 | |||
| 204 | src = kmap_atomic(page); | ||
| 205 | memcpy(vaddr, src, PAGE_SIZE); | ||
| 206 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | ||
| 207 | kunmap_atomic(src); | ||
| 208 | |||
| 209 | page_cache_release(page); | ||
| 210 | vaddr += PAGE_SIZE; | ||
| 211 | } | ||
| 212 | |||
| 213 | i915_gem_chipset_flush(obj->base.dev); | ||
| 214 | |||
| 215 | st = kmalloc(sizeof(*st), GFP_KERNEL); | ||
| 216 | if (st == NULL) | ||
| 217 | return -ENOMEM; | ||
| 218 | |||
| 219 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { | ||
| 220 | kfree(st); | ||
| 221 | return -ENOMEM; | ||
| 222 | } | ||
| 223 | |||
| 224 | sg = st->sgl; | ||
| 225 | sg->offset = 0; | ||
| 226 | sg->length = obj->base.size; | ||
| 227 | |||
| 228 | sg_dma_address(sg) = obj->phys_handle->busaddr; | ||
| 229 | sg_dma_len(sg) = obj->base.size; | ||
| 230 | |||
| 231 | obj->pages = st; | ||
| 232 | obj->has_dma_mapping = true; | ||
| 233 | return 0; | ||
| 234 | } | ||
| 235 | |||
| 236 | static void | ||
| 237 | i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) | ||
| 238 | { | ||
| 239 | int ret; | ||
| 240 | |||
| 241 | BUG_ON(obj->madv == __I915_MADV_PURGED); | ||
| 242 | |||
| 243 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | ||
| 244 | if (ret) { | ||
| 245 | /* In the event of a disaster, abandon all caches and | ||
| 246 | * hope for the best. | ||
| 247 | */ | ||
| 248 | WARN_ON(ret != -EIO); | ||
| 249 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | ||
| 250 | } | ||
| 251 | |||
| 252 | if (obj->madv == I915_MADV_DONTNEED) | ||
| 253 | obj->dirty = 0; | ||
| 254 | |||
| 255 | if (obj->dirty) { | ||
| 219 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | 256 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; |
| 220 | char *vaddr = phys->vaddr; | 257 | char *vaddr = obj->phys_handle->vaddr; |
| 221 | int i; | 258 | int i; |
| 222 | 259 | ||
| 223 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | 260 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { |
| 224 | struct page *page = shmem_read_mapping_page(mapping, i); | 261 | struct page *page; |
| 225 | if (!IS_ERR(page)) { | 262 | char *dst; |
| 226 | char *dst = kmap_atomic(page); | 263 | |
| 227 | memcpy(dst, vaddr, PAGE_SIZE); | 264 | page = shmem_read_mapping_page(mapping, i); |
| 228 | drm_clflush_virt_range(dst, PAGE_SIZE); | 265 | if (IS_ERR(page)) |
| 229 | kunmap_atomic(dst); | 266 | continue; |
| 230 | 267 | ||
| 231 | set_page_dirty(page); | 268 | dst = kmap_atomic(page); |
| 269 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | ||
| 270 | memcpy(dst, vaddr, PAGE_SIZE); | ||
| 271 | kunmap_atomic(dst); | ||
| 272 | |||
| 273 | set_page_dirty(page); | ||
| 274 | if (obj->madv == I915_MADV_WILLNEED) | ||
| 232 | mark_page_accessed(page); | 275 | mark_page_accessed(page); |
| 233 | page_cache_release(page); | 276 | page_cache_release(page); |
| 234 | } | ||
| 235 | vaddr += PAGE_SIZE; | 277 | vaddr += PAGE_SIZE; |
| 236 | } | 278 | } |
| 237 | i915_gem_chipset_flush(obj->base.dev); | 279 | obj->dirty = 0; |
| 238 | } | 280 | } |
| 239 | 281 | ||
| 240 | #ifdef CONFIG_X86 | 282 | sg_free_table(obj->pages); |
| 241 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | 283 | kfree(obj->pages); |
| 242 | #endif | 284 | |
| 243 | drm_pci_free(obj->base.dev, phys); | 285 | obj->has_dma_mapping = false; |
| 244 | obj->phys_handle = NULL; | 286 | } |
| 287 | |||
| 288 | static void | ||
| 289 | i915_gem_object_release_phys(struct drm_i915_gem_object *obj) | ||
| 290 | { | ||
| 291 | drm_pci_free(obj->base.dev, obj->phys_handle); | ||
| 292 | } | ||
| 293 | |||
| 294 | static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { | ||
| 295 | .get_pages = i915_gem_object_get_pages_phys, | ||
| 296 | .put_pages = i915_gem_object_put_pages_phys, | ||
| 297 | .release = i915_gem_object_release_phys, | ||
| 298 | }; | ||
| 299 | |||
| 300 | static int | ||
| 301 | drop_pages(struct drm_i915_gem_object *obj) | ||
| 302 | { | ||
| 303 | struct i915_vma *vma, *next; | ||
| 304 | int ret; | ||
| 305 | |||
| 306 | drm_gem_object_reference(&obj->base); | ||
| 307 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) | ||
| 308 | if (i915_vma_unbind(vma)) | ||
| 309 | break; | ||
| 310 | |||
| 311 | ret = i915_gem_object_put_pages(obj); | ||
| 312 | drm_gem_object_unreference(&obj->base); | ||
| 313 | |||
| 314 | return ret; | ||
| 245 | } | 315 | } |
| 246 | 316 | ||
| 247 | int | 317 | int |
| @@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | |||
| 249 | int align) | 319 | int align) |
| 250 | { | 320 | { |
| 251 | drm_dma_handle_t *phys; | 321 | drm_dma_handle_t *phys; |
| 252 | struct address_space *mapping; | 322 | int ret; |
| 253 | char *vaddr; | ||
| 254 | int i; | ||
| 255 | 323 | ||
| 256 | if (obj->phys_handle) { | 324 | if (obj->phys_handle) { |
| 257 | if ((unsigned long)obj->phys_handle->vaddr & (align -1)) | 325 | if ((unsigned long)obj->phys_handle->vaddr & (align -1)) |
| @@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | |||
| 266 | if (obj->base.filp == NULL) | 334 | if (obj->base.filp == NULL) |
| 267 | return -EINVAL; | 335 | return -EINVAL; |
| 268 | 336 | ||
| 337 | ret = drop_pages(obj); | ||
| 338 | if (ret) | ||
| 339 | return ret; | ||
| 340 | |||
| 269 | /* create a new object */ | 341 | /* create a new object */ |
| 270 | phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); | 342 | phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); |
| 271 | if (!phys) | 343 | if (!phys) |
| 272 | return -ENOMEM; | 344 | return -ENOMEM; |
| 273 | 345 | ||
| 274 | vaddr = phys->vaddr; | ||
| 275 | #ifdef CONFIG_X86 | ||
| 276 | set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); | ||
| 277 | #endif | ||
| 278 | mapping = file_inode(obj->base.filp)->i_mapping; | ||
| 279 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
| 280 | struct page *page; | ||
| 281 | char *src; | ||
| 282 | |||
| 283 | page = shmem_read_mapping_page(mapping, i); | ||
| 284 | if (IS_ERR(page)) { | ||
| 285 | #ifdef CONFIG_X86 | ||
| 286 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
| 287 | #endif | ||
| 288 | drm_pci_free(obj->base.dev, phys); | ||
| 289 | return PTR_ERR(page); | ||
| 290 | } | ||
| 291 | |||
| 292 | src = kmap_atomic(page); | ||
| 293 | memcpy(vaddr, src, PAGE_SIZE); | ||
| 294 | kunmap_atomic(src); | ||
| 295 | |||
| 296 | mark_page_accessed(page); | ||
| 297 | page_cache_release(page); | ||
| 298 | |||
| 299 | vaddr += PAGE_SIZE; | ||
| 300 | } | ||
| 301 | |||
| 302 | obj->phys_handle = phys; | 346 | obj->phys_handle = phys; |
| 303 | return 0; | 347 | obj->ops = &i915_gem_phys_ops; |
| 348 | |||
| 349 | return i915_gem_object_get_pages(obj); | ||
| 304 | } | 350 | } |
| 305 | 351 | ||
| 306 | static int | 352 | static int |
| @@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
| 311 | struct drm_device *dev = obj->base.dev; | 357 | struct drm_device *dev = obj->base.dev; |
| 312 | void *vaddr = obj->phys_handle->vaddr + args->offset; | 358 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
| 313 | char __user *user_data = to_user_ptr(args->data_ptr); | 359 | char __user *user_data = to_user_ptr(args->data_ptr); |
| 360 | int ret; | ||
| 361 | |||
| 362 | /* We manually control the domain here and pretend that it | ||
| 363 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | ||
| 364 | */ | ||
| 365 | ret = i915_gem_object_wait_rendering(obj, false); | ||
| 366 | if (ret) | ||
| 367 | return ret; | ||
| 314 | 368 | ||
| 315 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 369 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
| 316 | unsigned long unwritten; | 370 | unsigned long unwritten; |
| @@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
| 326 | return -EFAULT; | 380 | return -EFAULT; |
| 327 | } | 381 | } |
| 328 | 382 | ||
| 383 | drm_clflush_virt_range(vaddr, args->size); | ||
| 329 | i915_gem_chipset_flush(dev); | 384 | i915_gem_chipset_flush(dev); |
| 330 | return 0; | 385 | return 0; |
| 331 | } | 386 | } |
| @@ -346,6 +401,7 @@ static int | |||
| 346 | i915_gem_create(struct drm_file *file, | 401 | i915_gem_create(struct drm_file *file, |
| 347 | struct drm_device *dev, | 402 | struct drm_device *dev, |
| 348 | uint64_t size, | 403 | uint64_t size, |
| 404 | bool dumb, | ||
| 349 | uint32_t *handle_p) | 405 | uint32_t *handle_p) |
| 350 | { | 406 | { |
| 351 | struct drm_i915_gem_object *obj; | 407 | struct drm_i915_gem_object *obj; |
| @@ -361,6 +417,7 @@ i915_gem_create(struct drm_file *file, | |||
| 361 | if (obj == NULL) | 417 | if (obj == NULL) |
| 362 | return -ENOMEM; | 418 | return -ENOMEM; |
| 363 | 419 | ||
| 420 | obj->base.dumb = dumb; | ||
| 364 | ret = drm_gem_handle_create(file, &obj->base, &handle); | 421 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
| 365 | /* drop reference from allocate - handle holds it now */ | 422 | /* drop reference from allocate - handle holds it now */ |
| 366 | drm_gem_object_unreference_unlocked(&obj->base); | 423 | drm_gem_object_unreference_unlocked(&obj->base); |
| @@ -380,7 +437,7 @@ i915_gem_dumb_create(struct drm_file *file, | |||
| 380 | args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); | 437 | args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); |
| 381 | args->size = args->pitch * args->height; | 438 | args->size = args->pitch * args->height; |
| 382 | return i915_gem_create(file, dev, | 439 | return i915_gem_create(file, dev, |
| 383 | args->size, &args->handle); | 440 | args->size, true, &args->handle); |
| 384 | } | 441 | } |
| 385 | 442 | ||
| 386 | /** | 443 | /** |
| @@ -393,7 +450,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
| 393 | struct drm_i915_gem_create *args = data; | 450 | struct drm_i915_gem_create *args = data; |
| 394 | 451 | ||
| 395 | return i915_gem_create(file, dev, | 452 | return i915_gem_create(file, dev, |
| 396 | args->size, &args->handle); | 453 | args->size, false, &args->handle); |
| 397 | } | 454 | } |
| 398 | 455 | ||
| 399 | static inline int | 456 | static inline int |
| @@ -1046,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 1046 | * pread/pwrite currently are reading and writing from the CPU | 1103 | * pread/pwrite currently are reading and writing from the CPU |
| 1047 | * perspective, requiring manual detiling by the client. | 1104 | * perspective, requiring manual detiling by the client. |
| 1048 | */ | 1105 | */ |
| 1049 | if (obj->phys_handle) { | ||
| 1050 | ret = i915_gem_phys_pwrite(obj, args, file); | ||
| 1051 | goto out; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | if (obj->tiling_mode == I915_TILING_NONE && | 1106 | if (obj->tiling_mode == I915_TILING_NONE && |
| 1055 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && | 1107 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && |
| 1056 | cpu_write_needs_clflush(obj)) { | 1108 | cpu_write_needs_clflush(obj)) { |
| @@ -1060,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 1060 | * textures). Fallback to the shmem path in that case. */ | 1112 | * textures). Fallback to the shmem path in that case. */ |
| 1061 | } | 1113 | } |
| 1062 | 1114 | ||
| 1063 | if (ret == -EFAULT || ret == -ENOSPC) | 1115 | if (ret == -EFAULT || ret == -ENOSPC) { |
| 1064 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | 1116 | if (obj->phys_handle) |
| 1117 | ret = i915_gem_phys_pwrite(obj, args, file); | ||
| 1118 | else | ||
| 1119 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | ||
| 1120 | } | ||
| 1065 | 1121 | ||
| 1066 | out: | 1122 | out: |
| 1067 | drm_gem_object_unreference(&obj->base); | 1123 | drm_gem_object_unreference(&obj->base); |
| @@ -1134,7 +1190,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) | |||
| 1134 | } | 1190 | } |
| 1135 | 1191 | ||
| 1136 | /** | 1192 | /** |
| 1137 | * __wait_seqno - wait until execution of seqno has finished | 1193 | * __i915_wait_seqno - wait until execution of seqno has finished |
| 1138 | * @ring: the ring expected to report seqno | 1194 | * @ring: the ring expected to report seqno |
| 1139 | * @seqno: duh! | 1195 | * @seqno: duh! |
| 1140 | * @reset_counter: reset sequence associated with the given seqno | 1196 | * @reset_counter: reset sequence associated with the given seqno |
| @@ -1151,7 +1207,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) | |||
| 1151 | * Returns 0 if the seqno was found within the alloted time. Else returns the | 1207 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
| 1152 | * errno with remaining time filled in timeout argument. | 1208 | * errno with remaining time filled in timeout argument. |
| 1153 | */ | 1209 | */ |
| 1154 | static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | 1210 | int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
| 1155 | unsigned reset_counter, | 1211 | unsigned reset_counter, |
| 1156 | bool interruptible, | 1212 | bool interruptible, |
| 1157 | s64 *timeout, | 1213 | s64 *timeout, |
| @@ -1171,7 +1227,8 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
| 1171 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | 1227 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
| 1172 | return 0; | 1228 | return 0; |
| 1173 | 1229 | ||
| 1174 | timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; | 1230 | timeout_expire = timeout ? |
| 1231 | jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; | ||
| 1175 | 1232 | ||
| 1176 | if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { | 1233 | if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { |
| 1177 | gen6_rps_boost(dev_priv); | 1234 | gen6_rps_boost(dev_priv); |
| @@ -1247,6 +1304,16 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
| 1247 | s64 tres = *timeout - (now - before); | 1304 | s64 tres = *timeout - (now - before); |
| 1248 | 1305 | ||
| 1249 | *timeout = tres < 0 ? 0 : tres; | 1306 | *timeout = tres < 0 ? 0 : tres; |
| 1307 | |||
| 1308 | /* | ||
| 1309 | * Apparently ktime isn't accurate enough and occasionally has a | ||
| 1310 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch | ||
| 1311 | * things up to make the test happy. We allow up to 1 jiffy. | ||
| 1312 | * | ||
| 1313 | * This is a regrssion from the timespec->ktime conversion. | ||
| 1314 | */ | ||
| 1315 | if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) | ||
| 1316 | *timeout = 0; | ||
| 1250 | } | 1317 | } |
| 1251 | 1318 | ||
| 1252 | return ret; | 1319 | return ret; |
| @@ -1262,6 +1329,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) | |||
| 1262 | struct drm_device *dev = ring->dev; | 1329 | struct drm_device *dev = ring->dev; |
| 1263 | struct drm_i915_private *dev_priv = dev->dev_private; | 1330 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1264 | bool interruptible = dev_priv->mm.interruptible; | 1331 | bool interruptible = dev_priv->mm.interruptible; |
| 1332 | unsigned reset_counter; | ||
| 1265 | int ret; | 1333 | int ret; |
| 1266 | 1334 | ||
| 1267 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1335 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
| @@ -1275,14 +1343,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) | |||
| 1275 | if (ret) | 1343 | if (ret) |
| 1276 | return ret; | 1344 | return ret; |
| 1277 | 1345 | ||
| 1278 | return __wait_seqno(ring, seqno, | 1346 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
| 1279 | atomic_read(&dev_priv->gpu_error.reset_counter), | 1347 | return __i915_wait_seqno(ring, seqno, reset_counter, interruptible, |
| 1280 | interruptible, NULL, NULL); | 1348 | NULL, NULL); |
| 1281 | } | 1349 | } |
| 1282 | 1350 | ||
| 1283 | static int | 1351 | static int |
| 1284 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, | 1352 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) |
| 1285 | struct intel_engine_cs *ring) | ||
| 1286 | { | 1353 | { |
| 1287 | if (!obj->active) | 1354 | if (!obj->active) |
| 1288 | return 0; | 1355 | return 0; |
| @@ -1319,7 +1386,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
| 1319 | if (ret) | 1386 | if (ret) |
| 1320 | return ret; | 1387 | return ret; |
| 1321 | 1388 | ||
| 1322 | return i915_gem_object_wait_rendering__tail(obj, ring); | 1389 | return i915_gem_object_wait_rendering__tail(obj); |
| 1323 | } | 1390 | } |
| 1324 | 1391 | ||
| 1325 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | 1392 | /* A nonblocking variant of the above wait. This is a highly dangerous routine |
| @@ -1354,12 +1421,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
| 1354 | 1421 | ||
| 1355 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 1422 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
| 1356 | mutex_unlock(&dev->struct_mutex); | 1423 | mutex_unlock(&dev->struct_mutex); |
| 1357 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); | 1424 | ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, |
| 1425 | file_priv); | ||
| 1358 | mutex_lock(&dev->struct_mutex); | 1426 | mutex_lock(&dev->struct_mutex); |
| 1359 | if (ret) | 1427 | if (ret) |
| 1360 | return ret; | 1428 | return ret; |
| 1361 | 1429 | ||
| 1362 | return i915_gem_object_wait_rendering__tail(obj, ring); | 1430 | return i915_gem_object_wait_rendering__tail(obj); |
| 1363 | } | 1431 | } |
| 1364 | 1432 | ||
| 1365 | /** | 1433 | /** |
| @@ -1466,6 +1534,16 @@ unlock: | |||
| 1466 | * | 1534 | * |
| 1467 | * While the mapping holds a reference on the contents of the object, it doesn't | 1535 | * While the mapping holds a reference on the contents of the object, it doesn't |
| 1468 | * imply a ref on the object itself. | 1536 | * imply a ref on the object itself. |
| 1537 | * | ||
| 1538 | * IMPORTANT: | ||
| 1539 | * | ||
| 1540 | * DRM driver writers who look a this function as an example for how to do GEM | ||
| 1541 | * mmap support, please don't implement mmap support like here. The modern way | ||
| 1542 | * to implement DRM mmap support is with an mmap offset ioctl (like | ||
| 1543 | * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. | ||
| 1544 | * That way debug tooling like valgrind will understand what's going on, hiding | ||
| 1545 | * the mmap call in a driver private ioctl will break that. The i915 driver only | ||
| 1546 | * does cpu mmaps this way because we didn't know better. | ||
| 1469 | */ | 1547 | */ |
| 1470 | int | 1548 | int |
| 1471 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1549 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| @@ -1762,10 +1840,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) | |||
| 1762 | drm_gem_free_mmap_offset(&obj->base); | 1840 | drm_gem_free_mmap_offset(&obj->base); |
| 1763 | } | 1841 | } |
| 1764 | 1842 | ||
| 1765 | int | 1843 | static int |
| 1766 | i915_gem_mmap_gtt(struct drm_file *file, | 1844 | i915_gem_mmap_gtt(struct drm_file *file, |
| 1767 | struct drm_device *dev, | 1845 | struct drm_device *dev, |
| 1768 | uint32_t handle, | 1846 | uint32_t handle, bool dumb, |
| 1769 | uint64_t *offset) | 1847 | uint64_t *offset) |
| 1770 | { | 1848 | { |
| 1771 | struct drm_i915_private *dev_priv = dev->dev_private; | 1849 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -1782,6 +1860,13 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
| 1782 | goto unlock; | 1860 | goto unlock; |
| 1783 | } | 1861 | } |
| 1784 | 1862 | ||
| 1863 | /* | ||
| 1864 | * We don't allow dumb mmaps on objects created using another | ||
| 1865 | * interface. | ||
| 1866 | */ | ||
| 1867 | WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach), | ||
| 1868 | "Illegal dumb map of accelerated buffer.\n"); | ||
| 1869 | |||
| 1785 | if (obj->base.size > dev_priv->gtt.mappable_end) { | 1870 | if (obj->base.size > dev_priv->gtt.mappable_end) { |
| 1786 | ret = -E2BIG; | 1871 | ret = -E2BIG; |
| 1787 | goto out; | 1872 | goto out; |
| @@ -1806,6 +1891,15 @@ unlock: | |||
| 1806 | return ret; | 1891 | return ret; |
| 1807 | } | 1892 | } |
| 1808 | 1893 | ||
| 1894 | int | ||
| 1895 | i915_gem_dumb_map_offset(struct drm_file *file, | ||
| 1896 | struct drm_device *dev, | ||
| 1897 | uint32_t handle, | ||
| 1898 | uint64_t *offset) | ||
| 1899 | { | ||
| 1900 | return i915_gem_mmap_gtt(file, dev, handle, true, offset); | ||
| 1901 | } | ||
| 1902 | |||
| 1809 | /** | 1903 | /** |
| 1810 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 1904 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
| 1811 | * @dev: DRM device | 1905 | * @dev: DRM device |
| @@ -1827,7 +1921,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
| 1827 | { | 1921 | { |
| 1828 | struct drm_i915_gem_mmap_gtt *args = data; | 1922 | struct drm_i915_gem_mmap_gtt *args = data; |
| 1829 | 1923 | ||
| 1830 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); | 1924 | return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); |
| 1831 | } | 1925 | } |
| 1832 | 1926 | ||
| 1833 | static inline int | 1927 | static inline int |
| @@ -1945,7 +2039,14 @@ unsigned long | |||
| 1945 | i915_gem_shrink(struct drm_i915_private *dev_priv, | 2039 | i915_gem_shrink(struct drm_i915_private *dev_priv, |
| 1946 | long target, unsigned flags) | 2040 | long target, unsigned flags) |
| 1947 | { | 2041 | { |
| 1948 | const bool purgeable_only = flags & I915_SHRINK_PURGEABLE; | 2042 | const struct { |
| 2043 | struct list_head *list; | ||
| 2044 | unsigned int bit; | ||
| 2045 | } phases[] = { | ||
| 2046 | { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, | ||
| 2047 | { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, | ||
| 2048 | { NULL, 0 }, | ||
| 2049 | }, *phase; | ||
| 1949 | unsigned long count = 0; | 2050 | unsigned long count = 0; |
| 1950 | 2051 | ||
| 1951 | /* | 2052 | /* |
| @@ -1967,48 +2068,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, | |||
| 1967 | * dev->struct_mutex and so we won't ever be able to observe an | 2068 | * dev->struct_mutex and so we won't ever be able to observe an |
| 1968 | * object on the bound_list with a reference count equals 0. | 2069 | * object on the bound_list with a reference count equals 0. |
| 1969 | */ | 2070 | */ |
| 1970 | if (flags & I915_SHRINK_UNBOUND) { | 2071 | for (phase = phases; phase->list; phase++) { |
| 1971 | struct list_head still_in_list; | 2072 | struct list_head still_in_list; |
| 1972 | 2073 | ||
| 1973 | INIT_LIST_HEAD(&still_in_list); | 2074 | if ((flags & phase->bit) == 0) |
| 1974 | while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { | 2075 | continue; |
| 1975 | struct drm_i915_gem_object *obj; | ||
| 1976 | |||
| 1977 | obj = list_first_entry(&dev_priv->mm.unbound_list, | ||
| 1978 | typeof(*obj), global_list); | ||
| 1979 | list_move_tail(&obj->global_list, &still_in_list); | ||
| 1980 | |||
| 1981 | if (!i915_gem_object_is_purgeable(obj) && purgeable_only) | ||
| 1982 | continue; | ||
| 1983 | |||
| 1984 | drm_gem_object_reference(&obj->base); | ||
| 1985 | |||
| 1986 | if (i915_gem_object_put_pages(obj) == 0) | ||
| 1987 | count += obj->base.size >> PAGE_SHIFT; | ||
| 1988 | |||
| 1989 | drm_gem_object_unreference(&obj->base); | ||
| 1990 | } | ||
| 1991 | list_splice(&still_in_list, &dev_priv->mm.unbound_list); | ||
| 1992 | } | ||
| 1993 | |||
| 1994 | if (flags & I915_SHRINK_BOUND) { | ||
| 1995 | struct list_head still_in_list; | ||
| 1996 | 2076 | ||
| 1997 | INIT_LIST_HEAD(&still_in_list); | 2077 | INIT_LIST_HEAD(&still_in_list); |
| 1998 | while (count < target && !list_empty(&dev_priv->mm.bound_list)) { | 2078 | while (count < target && !list_empty(phase->list)) { |
| 1999 | struct drm_i915_gem_object *obj; | 2079 | struct drm_i915_gem_object *obj; |
| 2000 | struct i915_vma *vma, *v; | 2080 | struct i915_vma *vma, *v; |
| 2001 | 2081 | ||
| 2002 | obj = list_first_entry(&dev_priv->mm.bound_list, | 2082 | obj = list_first_entry(phase->list, |
| 2003 | typeof(*obj), global_list); | 2083 | typeof(*obj), global_list); |
| 2004 | list_move_tail(&obj->global_list, &still_in_list); | 2084 | list_move_tail(&obj->global_list, &still_in_list); |
| 2005 | 2085 | ||
| 2006 | if (!i915_gem_object_is_purgeable(obj) && purgeable_only) | 2086 | if (flags & I915_SHRINK_PURGEABLE && |
| 2087 | !i915_gem_object_is_purgeable(obj)) | ||
| 2007 | continue; | 2088 | continue; |
| 2008 | 2089 | ||
| 2009 | drm_gem_object_reference(&obj->base); | 2090 | drm_gem_object_reference(&obj->base); |
| 2010 | 2091 | ||
| 2011 | list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) | 2092 | /* For the unbound phase, this should be a no-op! */ |
| 2093 | list_for_each_entry_safe(vma, v, | ||
| 2094 | &obj->vma_list, vma_link) | ||
| 2012 | if (i915_vma_unbind(vma)) | 2095 | if (i915_vma_unbind(vma)) |
| 2013 | break; | 2096 | break; |
| 2014 | 2097 | ||
| @@ -2017,7 +2100,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, | |||
| 2017 | 2100 | ||
| 2018 | drm_gem_object_unreference(&obj->base); | 2101 | drm_gem_object_unreference(&obj->base); |
| 2019 | } | 2102 | } |
| 2020 | list_splice(&still_in_list, &dev_priv->mm.bound_list); | 2103 | list_splice(&still_in_list, phase->list); |
| 2021 | } | 2104 | } |
| 2022 | 2105 | ||
| 2023 | return count; | 2106 | return count; |
| @@ -2122,6 +2205,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |||
| 2122 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 2205 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
| 2123 | i915_gem_object_do_bit_17_swizzle(obj); | 2206 | i915_gem_object_do_bit_17_swizzle(obj); |
| 2124 | 2207 | ||
| 2208 | if (obj->tiling_mode != I915_TILING_NONE && | ||
| 2209 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) | ||
| 2210 | i915_gem_object_pin_pages(obj); | ||
| 2211 | |||
| 2125 | return 0; | 2212 | return 0; |
| 2126 | 2213 | ||
| 2127 | err_pages: | 2214 | err_pages: |
| @@ -2420,15 +2507,13 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
| 2420 | ring->outstanding_lazy_seqno = 0; | 2507 | ring->outstanding_lazy_seqno = 0; |
| 2421 | ring->preallocated_lazy_request = NULL; | 2508 | ring->preallocated_lazy_request = NULL; |
| 2422 | 2509 | ||
| 2423 | if (!dev_priv->ums.mm_suspended) { | 2510 | i915_queue_hangcheck(ring->dev); |
| 2424 | i915_queue_hangcheck(ring->dev); | ||
| 2425 | 2511 | ||
| 2426 | cancel_delayed_work_sync(&dev_priv->mm.idle_work); | 2512 | cancel_delayed_work_sync(&dev_priv->mm.idle_work); |
| 2427 | queue_delayed_work(dev_priv->wq, | 2513 | queue_delayed_work(dev_priv->wq, |
| 2428 | &dev_priv->mm.retire_work, | 2514 | &dev_priv->mm.retire_work, |
| 2429 | round_jiffies_up_relative(HZ)); | 2515 | round_jiffies_up_relative(HZ)); |
| 2430 | intel_mark_busy(dev_priv->dev); | 2516 | intel_mark_busy(dev_priv->dev); |
| 2431 | } | ||
| 2432 | 2517 | ||
| 2433 | if (out_seqno) | 2518 | if (out_seqno) |
| 2434 | *out_seqno = request->seqno; | 2519 | *out_seqno = request->seqno; |
| @@ -2495,12 +2580,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv, | |||
| 2495 | 2580 | ||
| 2496 | static void i915_gem_free_request(struct drm_i915_gem_request *request) | 2581 | static void i915_gem_free_request(struct drm_i915_gem_request *request) |
| 2497 | { | 2582 | { |
| 2583 | struct intel_context *ctx = request->ctx; | ||
| 2584 | |||
| 2498 | list_del(&request->list); | 2585 | list_del(&request->list); |
| 2499 | i915_gem_request_remove_from_client(request); | 2586 | i915_gem_request_remove_from_client(request); |
| 2500 | 2587 | ||
| 2501 | if (request->ctx) | 2588 | if (ctx) { |
| 2502 | i915_gem_context_unreference(request->ctx); | 2589 | if (i915.enable_execlists) { |
| 2590 | struct intel_engine_cs *ring = request->ring; | ||
| 2503 | 2591 | ||
| 2592 | if (ctx != ring->default_context) | ||
| 2593 | intel_lr_context_unpin(ring, ctx); | ||
| 2594 | } | ||
| 2595 | i915_gem_context_unreference(ctx); | ||
| 2596 | } | ||
| 2504 | kfree(request); | 2597 | kfree(request); |
| 2505 | } | 2598 | } |
| 2506 | 2599 | ||
| @@ -2555,6 +2648,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
| 2555 | } | 2648 | } |
| 2556 | 2649 | ||
| 2557 | /* | 2650 | /* |
| 2651 | * Clear the execlists queue up before freeing the requests, as those | ||
| 2652 | * are the ones that keep the context and ringbuffer backing objects | ||
| 2653 | * pinned in place. | ||
| 2654 | */ | ||
| 2655 | while (!list_empty(&ring->execlist_queue)) { | ||
| 2656 | struct intel_ctx_submit_request *submit_req; | ||
| 2657 | |||
| 2658 | submit_req = list_first_entry(&ring->execlist_queue, | ||
| 2659 | struct intel_ctx_submit_request, | ||
| 2660 | execlist_link); | ||
| 2661 | list_del(&submit_req->execlist_link); | ||
| 2662 | intel_runtime_pm_put(dev_priv); | ||
| 2663 | i915_gem_context_unreference(submit_req->ctx); | ||
| 2664 | kfree(submit_req); | ||
| 2665 | } | ||
| 2666 | |||
| 2667 | /* | ||
| 2558 | * We must free the requests after all the corresponding objects have | 2668 | * We must free the requests after all the corresponding objects have |
| 2559 | * been moved off active lists. Which is the same order as the normal | 2669 | * been moved off active lists. Which is the same order as the normal |
| 2560 | * retire_requests function does. This is important if object hold | 2670 | * retire_requests function does. This is important if object hold |
| @@ -2571,18 +2681,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
| 2571 | i915_gem_free_request(request); | 2681 | i915_gem_free_request(request); |
| 2572 | } | 2682 | } |
| 2573 | 2683 | ||
| 2574 | while (!list_empty(&ring->execlist_queue)) { | ||
| 2575 | struct intel_ctx_submit_request *submit_req; | ||
| 2576 | |||
| 2577 | submit_req = list_first_entry(&ring->execlist_queue, | ||
| 2578 | struct intel_ctx_submit_request, | ||
| 2579 | execlist_link); | ||
| 2580 | list_del(&submit_req->execlist_link); | ||
| 2581 | intel_runtime_pm_put(dev_priv); | ||
| 2582 | i915_gem_context_unreference(submit_req->ctx); | ||
| 2583 | kfree(submit_req); | ||
| 2584 | } | ||
| 2585 | |||
| 2586 | /* These may not have been flush before the reset, do so now */ | 2684 | /* These may not have been flush before the reset, do so now */ |
| 2587 | kfree(ring->preallocated_lazy_request); | 2685 | kfree(ring->preallocated_lazy_request); |
| 2588 | ring->preallocated_lazy_request = NULL; | 2686 | ring->preallocated_lazy_request = NULL; |
| @@ -2719,6 +2817,15 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
| 2719 | for_each_ring(ring, dev_priv, i) { | 2817 | for_each_ring(ring, dev_priv, i) { |
| 2720 | i915_gem_retire_requests_ring(ring); | 2818 | i915_gem_retire_requests_ring(ring); |
| 2721 | idle &= list_empty(&ring->request_list); | 2819 | idle &= list_empty(&ring->request_list); |
| 2820 | if (i915.enable_execlists) { | ||
| 2821 | unsigned long flags; | ||
| 2822 | |||
| 2823 | spin_lock_irqsave(&ring->execlist_lock, flags); | ||
| 2824 | idle &= list_empty(&ring->execlist_queue); | ||
| 2825 | spin_unlock_irqrestore(&ring->execlist_lock, flags); | ||
| 2826 | |||
| 2827 | intel_execlists_retire_requests(ring); | ||
| 2828 | } | ||
| 2722 | } | 2829 | } |
| 2723 | 2830 | ||
| 2724 | if (idle) | 2831 | if (idle) |
| @@ -2811,6 +2918,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
| 2811 | u32 seqno = 0; | 2918 | u32 seqno = 0; |
| 2812 | int ret = 0; | 2919 | int ret = 0; |
| 2813 | 2920 | ||
| 2921 | if (args->flags != 0) | ||
| 2922 | return -EINVAL; | ||
| 2923 | |||
| 2814 | ret = i915_mutex_lock_interruptible(dev); | 2924 | ret = i915_mutex_lock_interruptible(dev); |
| 2815 | if (ret) | 2925 | if (ret) |
| 2816 | return ret; | 2926 | return ret; |
| @@ -2846,8 +2956,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
| 2846 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 2956 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
| 2847 | mutex_unlock(&dev->struct_mutex); | 2957 | mutex_unlock(&dev->struct_mutex); |
| 2848 | 2958 | ||
| 2849 | return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, | 2959 | return __i915_wait_seqno(ring, seqno, reset_counter, true, |
| 2850 | file->driver_priv); | 2960 | &args->timeout_ns, file->driver_priv); |
| 2851 | 2961 | ||
| 2852 | out: | 2962 | out: |
| 2853 | drm_gem_object_unreference(&obj->base); | 2963 | drm_gem_object_unreference(&obj->base); |
| @@ -3166,6 +3276,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
| 3166 | obj->stride, obj->tiling_mode); | 3276 | obj->stride, obj->tiling_mode); |
| 3167 | 3277 | ||
| 3168 | switch (INTEL_INFO(dev)->gen) { | 3278 | switch (INTEL_INFO(dev)->gen) { |
| 3279 | case 9: | ||
| 3169 | case 8: | 3280 | case 8: |
| 3170 | case 7: | 3281 | case 7: |
| 3171 | case 6: | 3282 | case 6: |
| @@ -3384,46 +3495,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, | |||
| 3384 | return true; | 3495 | return true; |
| 3385 | } | 3496 | } |
| 3386 | 3497 | ||
| 3387 | static void i915_gem_verify_gtt(struct drm_device *dev) | ||
| 3388 | { | ||
| 3389 | #if WATCH_GTT | ||
| 3390 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 3391 | struct drm_i915_gem_object *obj; | ||
| 3392 | int err = 0; | ||
| 3393 | |||
| 3394 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { | ||
| 3395 | if (obj->gtt_space == NULL) { | ||
| 3396 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); | ||
| 3397 | err++; | ||
| 3398 | continue; | ||
| 3399 | } | ||
| 3400 | |||
| 3401 | if (obj->cache_level != obj->gtt_space->color) { | ||
| 3402 | printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", | ||
| 3403 | i915_gem_obj_ggtt_offset(obj), | ||
| 3404 | i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), | ||
| 3405 | obj->cache_level, | ||
| 3406 | obj->gtt_space->color); | ||
| 3407 | err++; | ||
| 3408 | continue; | ||
| 3409 | } | ||
| 3410 | |||
| 3411 | if (!i915_gem_valid_gtt_space(dev, | ||
| 3412 | obj->gtt_space, | ||
| 3413 | obj->cache_level)) { | ||
| 3414 | printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", | ||
| 3415 | i915_gem_obj_ggtt_offset(obj), | ||
| 3416 | i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), | ||
| 3417 | obj->cache_level); | ||
| 3418 | err++; | ||
| 3419 | continue; | ||
| 3420 | } | ||
| 3421 | } | ||
| 3422 | |||
| 3423 | WARN_ON(err); | ||
| 3424 | #endif | ||
| 3425 | } | ||
| 3426 | |||
| 3427 | /** | 3498 | /** |
| 3428 | * Finds free space in the GTT aperture and binds the object there. | 3499 | * Finds free space in the GTT aperture and binds the object there. |
| 3429 | */ | 3500 | */ |
| @@ -3514,25 +3585,10 @@ search_free: | |||
| 3514 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); | 3585 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
| 3515 | list_add_tail(&vma->mm_list, &vm->inactive_list); | 3586 | list_add_tail(&vma->mm_list, &vm->inactive_list); |
| 3516 | 3587 | ||
| 3517 | if (i915_is_ggtt(vm)) { | ||
| 3518 | bool mappable, fenceable; | ||
| 3519 | |||
| 3520 | fenceable = (vma->node.size == fence_size && | ||
| 3521 | (vma->node.start & (fence_alignment - 1)) == 0); | ||
| 3522 | |||
| 3523 | mappable = (vma->node.start + obj->base.size <= | ||
| 3524 | dev_priv->gtt.mappable_end); | ||
| 3525 | |||
| 3526 | obj->map_and_fenceable = mappable && fenceable; | ||
| 3527 | } | ||
| 3528 | |||
| 3529 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); | ||
| 3530 | |||
| 3531 | trace_i915_vma_bind(vma, flags); | 3588 | trace_i915_vma_bind(vma, flags); |
| 3532 | vma->bind_vma(vma, obj->cache_level, | 3589 | vma->bind_vma(vma, obj->cache_level, |
| 3533 | flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); | 3590 | flags & PIN_GLOBAL ? GLOBAL_BIND : 0); |
| 3534 | 3591 | ||
| 3535 | i915_gem_verify_gtt(dev); | ||
| 3536 | return vma; | 3592 | return vma; |
| 3537 | 3593 | ||
| 3538 | err_remove_node: | 3594 | err_remove_node: |
| @@ -3560,7 +3616,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, | |||
| 3560 | * Stolen memory is always coherent with the GPU as it is explicitly | 3616 | * Stolen memory is always coherent with the GPU as it is explicitly |
| 3561 | * marked as wc by the system, or the system is cache-coherent. | 3617 | * marked as wc by the system, or the system is cache-coherent. |
| 3562 | */ | 3618 | */ |
| 3563 | if (obj->stolen) | 3619 | if (obj->stolen || obj->phys_handle) |
| 3564 | return false; | 3620 | return false; |
| 3565 | 3621 | ||
| 3566 | /* If the GPU is snooping the contents of the CPU cache, | 3622 | /* If the GPU is snooping the contents of the CPU cache, |
| @@ -3739,7 +3795,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
| 3739 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 3795 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
| 3740 | if (drm_mm_node_allocated(&vma->node)) | 3796 | if (drm_mm_node_allocated(&vma->node)) |
| 3741 | vma->bind_vma(vma, cache_level, | 3797 | vma->bind_vma(vma, cache_level, |
| 3742 | obj->has_global_gtt_mapping ? GLOBAL_BIND : 0); | 3798 | vma->bound & GLOBAL_BIND); |
| 3743 | } | 3799 | } |
| 3744 | 3800 | ||
| 3745 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 3801 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
| @@ -3769,7 +3825,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
| 3769 | old_write_domain); | 3825 | old_write_domain); |
| 3770 | } | 3826 | } |
| 3771 | 3827 | ||
| 3772 | i915_gem_verify_gtt(dev); | ||
| 3773 | return 0; | 3828 | return 0; |
| 3774 | } | 3829 | } |
| 3775 | 3830 | ||
| @@ -4067,7 +4122,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
| 4067 | if (seqno == 0) | 4122 | if (seqno == 0) |
| 4068 | return 0; | 4123 | return 0; |
| 4069 | 4124 | ||
| 4070 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); | 4125 | ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); |
| 4071 | if (ret == 0) | 4126 | if (ret == 0) |
| 4072 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | 4127 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
| 4073 | 4128 | ||
| @@ -4101,6 +4156,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
| 4101 | { | 4156 | { |
| 4102 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 4157 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
| 4103 | struct i915_vma *vma; | 4158 | struct i915_vma *vma; |
| 4159 | unsigned bound; | ||
| 4104 | int ret; | 4160 | int ret; |
| 4105 | 4161 | ||
| 4106 | if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) | 4162 | if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) |
| @@ -4109,6 +4165,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
| 4109 | if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) | 4165 | if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) |
| 4110 | return -EINVAL; | 4166 | return -EINVAL; |
| 4111 | 4167 | ||
| 4168 | if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) | ||
| 4169 | return -EINVAL; | ||
| 4170 | |||
| 4112 | vma = i915_gem_obj_to_vma(obj, vm); | 4171 | vma = i915_gem_obj_to_vma(obj, vm); |
| 4113 | if (vma) { | 4172 | if (vma) { |
| 4114 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) | 4173 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
| @@ -4130,15 +4189,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
| 4130 | } | 4189 | } |
| 4131 | } | 4190 | } |
| 4132 | 4191 | ||
| 4192 | bound = vma ? vma->bound : 0; | ||
| 4133 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { | 4193 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { |
| 4134 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); | 4194 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); |
| 4135 | if (IS_ERR(vma)) | 4195 | if (IS_ERR(vma)) |
| 4136 | return PTR_ERR(vma); | 4196 | return PTR_ERR(vma); |
| 4137 | } | 4197 | } |
| 4138 | 4198 | ||
| 4139 | if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping) | 4199 | if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) |
| 4140 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); | 4200 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
| 4141 | 4201 | ||
| 4202 | if ((bound ^ vma->bound) & GLOBAL_BIND) { | ||
| 4203 | bool mappable, fenceable; | ||
| 4204 | u32 fence_size, fence_alignment; | ||
| 4205 | |||
| 4206 | fence_size = i915_gem_get_gtt_size(obj->base.dev, | ||
| 4207 | obj->base.size, | ||
| 4208 | obj->tiling_mode); | ||
| 4209 | fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, | ||
| 4210 | obj->base.size, | ||
| 4211 | obj->tiling_mode, | ||
| 4212 | true); | ||
| 4213 | |||
| 4214 | fenceable = (vma->node.size == fence_size && | ||
| 4215 | (vma->node.start & (fence_alignment - 1)) == 0); | ||
| 4216 | |||
| 4217 | mappable = (vma->node.start + obj->base.size <= | ||
| 4218 | dev_priv->gtt.mappable_end); | ||
| 4219 | |||
| 4220 | obj->map_and_fenceable = mappable && fenceable; | ||
| 4221 | } | ||
| 4222 | |||
| 4223 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); | ||
| 4224 | |||
| 4142 | vma->pin_count++; | 4225 | vma->pin_count++; |
| 4143 | if (flags & PIN_MAPPABLE) | 4226 | if (flags & PIN_MAPPABLE) |
| 4144 | obj->pin_mappable |= true; | 4227 | obj->pin_mappable |= true; |
| @@ -4193,7 +4276,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
| 4193 | struct drm_i915_gem_object *obj; | 4276 | struct drm_i915_gem_object *obj; |
| 4194 | int ret; | 4277 | int ret; |
| 4195 | 4278 | ||
| 4196 | if (INTEL_INFO(dev)->gen >= 6) | 4279 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 4197 | return -ENODEV; | 4280 | return -ENODEV; |
| 4198 | 4281 | ||
| 4199 | ret = i915_mutex_lock_interruptible(dev); | 4282 | ret = i915_mutex_lock_interruptible(dev); |
| @@ -4249,6 +4332,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
| 4249 | struct drm_i915_gem_object *obj; | 4332 | struct drm_i915_gem_object *obj; |
| 4250 | int ret; | 4333 | int ret; |
| 4251 | 4334 | ||
| 4335 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 4336 | return -ENODEV; | ||
| 4337 | |||
| 4252 | ret = i915_mutex_lock_interruptible(dev); | 4338 | ret = i915_mutex_lock_interruptible(dev); |
| 4253 | if (ret) | 4339 | if (ret) |
| 4254 | return ret; | 4340 | return ret; |
| @@ -4326,6 +4412,7 @@ int | |||
| 4326 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | 4412 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
| 4327 | struct drm_file *file_priv) | 4413 | struct drm_file *file_priv) |
| 4328 | { | 4414 | { |
| 4415 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 4329 | struct drm_i915_gem_madvise *args = data; | 4416 | struct drm_i915_gem_madvise *args = data; |
| 4330 | struct drm_i915_gem_object *obj; | 4417 | struct drm_i915_gem_object *obj; |
| 4331 | int ret; | 4418 | int ret; |
| @@ -4353,6 +4440,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
| 4353 | goto out; | 4440 | goto out; |
| 4354 | } | 4441 | } |
| 4355 | 4442 | ||
| 4443 | if (obj->pages && | ||
| 4444 | obj->tiling_mode != I915_TILING_NONE && | ||
| 4445 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { | ||
| 4446 | if (obj->madv == I915_MADV_WILLNEED) | ||
| 4447 | i915_gem_object_unpin_pages(obj); | ||
| 4448 | if (args->madv == I915_MADV_WILLNEED) | ||
| 4449 | i915_gem_object_pin_pages(obj); | ||
| 4450 | } | ||
| 4451 | |||
| 4356 | if (obj->madv != __I915_MADV_PURGED) | 4452 | if (obj->madv != __I915_MADV_PURGED) |
| 4357 | obj->madv = args->madv; | 4453 | obj->madv = args->madv; |
| 4358 | 4454 | ||
| @@ -4495,8 +4591,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
| 4495 | } | 4591 | } |
| 4496 | } | 4592 | } |
| 4497 | 4593 | ||
| 4498 | i915_gem_object_detach_phys(obj); | ||
| 4499 | |||
| 4500 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up | 4594 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
| 4501 | * before progressing. */ | 4595 | * before progressing. */ |
| 4502 | if (obj->stolen) | 4596 | if (obj->stolen) |
| @@ -4504,6 +4598,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
| 4504 | 4598 | ||
| 4505 | WARN_ON(obj->frontbuffer_bits); | 4599 | WARN_ON(obj->frontbuffer_bits); |
| 4506 | 4600 | ||
| 4601 | if (obj->pages && obj->madv == I915_MADV_WILLNEED && | ||
| 4602 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && | ||
| 4603 | obj->tiling_mode != I915_TILING_NONE) | ||
| 4604 | i915_gem_object_unpin_pages(obj); | ||
| 4605 | |||
| 4507 | if (WARN_ON(obj->pages_pin_count)) | 4606 | if (WARN_ON(obj->pages_pin_count)) |
| 4508 | obj->pages_pin_count = 0; | 4607 | obj->pages_pin_count = 0; |
| 4509 | if (discard_backing_storage(obj)) | 4608 | if (discard_backing_storage(obj)) |
| @@ -4576,9 +4675,6 @@ i915_gem_suspend(struct drm_device *dev) | |||
| 4576 | int ret = 0; | 4675 | int ret = 0; |
| 4577 | 4676 | ||
| 4578 | mutex_lock(&dev->struct_mutex); | 4677 | mutex_lock(&dev->struct_mutex); |
| 4579 | if (dev_priv->ums.mm_suspended) | ||
| 4580 | goto err; | ||
| 4581 | |||
| 4582 | ret = i915_gpu_idle(dev); | 4678 | ret = i915_gpu_idle(dev); |
| 4583 | if (ret) | 4679 | if (ret) |
| 4584 | goto err; | 4680 | goto err; |
| @@ -4589,15 +4685,7 @@ i915_gem_suspend(struct drm_device *dev) | |||
| 4589 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4685 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 4590 | i915_gem_evict_everything(dev); | 4686 | i915_gem_evict_everything(dev); |
| 4591 | 4687 | ||
| 4592 | i915_kernel_lost_context(dev); | ||
| 4593 | i915_gem_stop_ringbuffers(dev); | 4688 | i915_gem_stop_ringbuffers(dev); |
| 4594 | |||
| 4595 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | ||
| 4596 | * We need to replace this with a semaphore, or something. | ||
| 4597 | * And not confound ums.mm_suspended! | ||
| 4598 | */ | ||
| 4599 | dev_priv->ums.mm_suspended = !drm_core_check_feature(dev, | ||
| 4600 | DRIVER_MODESET); | ||
| 4601 | mutex_unlock(&dev->struct_mutex); | 4689 | mutex_unlock(&dev->struct_mutex); |
| 4602 | 4690 | ||
| 4603 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); | 4691 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
| @@ -4888,9 +4976,6 @@ int i915_gem_init(struct drm_device *dev) | |||
| 4888 | } | 4976 | } |
| 4889 | mutex_unlock(&dev->struct_mutex); | 4977 | mutex_unlock(&dev->struct_mutex); |
| 4890 | 4978 | ||
| 4891 | /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */ | ||
| 4892 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 4893 | dev_priv->dri1.allow_batchbuffer = 1; | ||
| 4894 | return ret; | 4979 | return ret; |
| 4895 | } | 4980 | } |
| 4896 | 4981 | ||
| @@ -4905,74 +4990,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
| 4905 | dev_priv->gt.cleanup_ring(ring); | 4990 | dev_priv->gt.cleanup_ring(ring); |
| 4906 | } | 4991 | } |
| 4907 | 4992 | ||
| 4908 | int | ||
| 4909 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | ||
| 4910 | struct drm_file *file_priv) | ||
| 4911 | { | ||
| 4912 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 4913 | int ret; | ||
| 4914 | |||
| 4915 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 4916 | return 0; | ||
| 4917 | |||
| 4918 | if (i915_reset_in_progress(&dev_priv->gpu_error)) { | ||
| 4919 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | ||
| 4920 | atomic_set(&dev_priv->gpu_error.reset_counter, 0); | ||
| 4921 | } | ||
| 4922 | |||
| 4923 | mutex_lock(&dev->struct_mutex); | ||
| 4924 | dev_priv->ums.mm_suspended = 0; | ||
| 4925 | |||
| 4926 | ret = i915_gem_init_hw(dev); | ||
| 4927 | if (ret != 0) { | ||
| 4928 | mutex_unlock(&dev->struct_mutex); | ||
| 4929 | return ret; | ||
| 4930 | } | ||
| 4931 | |||
| 4932 | BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); | ||
| 4933 | |||
| 4934 | ret = drm_irq_install(dev, dev->pdev->irq); | ||
| 4935 | if (ret) | ||
| 4936 | goto cleanup_ringbuffer; | ||
| 4937 | mutex_unlock(&dev->struct_mutex); | ||
| 4938 | |||
| 4939 | return 0; | ||
| 4940 | |||
| 4941 | cleanup_ringbuffer: | ||
| 4942 | i915_gem_cleanup_ringbuffer(dev); | ||
| 4943 | dev_priv->ums.mm_suspended = 1; | ||
| 4944 | mutex_unlock(&dev->struct_mutex); | ||
| 4945 | |||
| 4946 | return ret; | ||
| 4947 | } | ||
| 4948 | |||
| 4949 | int | ||
| 4950 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | ||
| 4951 | struct drm_file *file_priv) | ||
| 4952 | { | ||
| 4953 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 4954 | return 0; | ||
| 4955 | |||
| 4956 | mutex_lock(&dev->struct_mutex); | ||
| 4957 | drm_irq_uninstall(dev); | ||
| 4958 | mutex_unlock(&dev->struct_mutex); | ||
| 4959 | |||
| 4960 | return i915_gem_suspend(dev); | ||
| 4961 | } | ||
| 4962 | |||
| 4963 | void | ||
| 4964 | i915_gem_lastclose(struct drm_device *dev) | ||
| 4965 | { | ||
| 4966 | int ret; | ||
| 4967 | |||
| 4968 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 4969 | return; | ||
| 4970 | |||
| 4971 | ret = i915_gem_suspend(dev); | ||
| 4972 | if (ret) | ||
| 4973 | DRM_ERROR("failed to idle hardware: %d\n", ret); | ||
| 4974 | } | ||
| 4975 | |||
| 4976 | static void | 4993 | static void |
| 4977 | init_ring_lists(struct intel_engine_cs *ring) | 4994 | init_ring_lists(struct intel_engine_cs *ring) |
| 4978 | { | 4995 | { |
| @@ -5119,6 +5136,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) | |||
| 5119 | return ret; | 5136 | return ret; |
| 5120 | } | 5137 | } |
| 5121 | 5138 | ||
| 5139 | /** | ||
| 5140 | * i915_gem_track_fb - update frontbuffer tracking | ||
| 5141 | * old: current GEM buffer for the frontbuffer slots | ||
| 5142 | * new: new GEM buffer for the frontbuffer slots | ||
| 5143 | * frontbuffer_bits: bitmask of frontbuffer slots | ||
| 5144 | * | ||
| 5145 | * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them | ||
| 5146 | * from @old and setting them in @new. Both @old and @new can be NULL. | ||
| 5147 | */ | ||
| 5122 | void i915_gem_track_fb(struct drm_i915_gem_object *old, | 5148 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
| 5123 | struct drm_i915_gem_object *new, | 5149 | struct drm_i915_gem_object *new, |
| 5124 | unsigned frontbuffer_bits) | 5150 | unsigned frontbuffer_bits) |
| @@ -5302,7 +5328,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |||
| 5302 | struct drm_device *dev = dev_priv->dev; | 5328 | struct drm_device *dev = dev_priv->dev; |
| 5303 | struct drm_i915_gem_object *obj; | 5329 | struct drm_i915_gem_object *obj; |
| 5304 | unsigned long timeout = msecs_to_jiffies(5000) + 1; | 5330 | unsigned long timeout = msecs_to_jiffies(5000) + 1; |
| 5305 | unsigned long pinned, bound, unbound, freed; | 5331 | unsigned long pinned, bound, unbound, freed_pages; |
| 5306 | bool was_interruptible; | 5332 | bool was_interruptible; |
| 5307 | bool unlock; | 5333 | bool unlock; |
| 5308 | 5334 | ||
| @@ -5319,7 +5345,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |||
| 5319 | was_interruptible = dev_priv->mm.interruptible; | 5345 | was_interruptible = dev_priv->mm.interruptible; |
| 5320 | dev_priv->mm.interruptible = false; | 5346 | dev_priv->mm.interruptible = false; |
| 5321 | 5347 | ||
| 5322 | freed = i915_gem_shrink_all(dev_priv); | 5348 | freed_pages = i915_gem_shrink_all(dev_priv); |
| 5323 | 5349 | ||
| 5324 | dev_priv->mm.interruptible = was_interruptible; | 5350 | dev_priv->mm.interruptible = was_interruptible; |
| 5325 | 5351 | ||
| @@ -5350,14 +5376,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |||
| 5350 | if (unlock) | 5376 | if (unlock) |
| 5351 | mutex_unlock(&dev->struct_mutex); | 5377 | mutex_unlock(&dev->struct_mutex); |
| 5352 | 5378 | ||
| 5353 | pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", | 5379 | if (freed_pages || unbound || bound) |
| 5354 | freed, pinned); | 5380 | pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", |
| 5381 | freed_pages << PAGE_SHIFT, pinned); | ||
| 5355 | if (unbound || bound) | 5382 | if (unbound || bound) |
| 5356 | pr_err("%lu and %lu bytes still available in the " | 5383 | pr_err("%lu and %lu bytes still available in the " |
| 5357 | "bound and unbound GPU page lists.\n", | 5384 | "bound and unbound GPU page lists.\n", |
| 5358 | bound, unbound); | 5385 | bound, unbound); |
| 5359 | 5386 | ||
| 5360 | *(unsigned long *)ptr += freed; | 5387 | *(unsigned long *)ptr += freed_pages; |
| 5361 | return NOTIFY_DONE; | 5388 | return NOTIFY_DONE; |
| 5362 | } | 5389 | } |
| 5363 | 5390 | ||
