diff options
-rw-r--r-- | drivers/gpu/drm/drm_agpsupport.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 28 | ||||
-rw-r--r-- | include/drm/drmP.h | 3 |
4 files changed, 23 insertions, 16 deletions
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 2639be2db9e5..3d33b8252b58 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c | |||
@@ -464,7 +464,8 @@ DRM_AGP_MEM * | |||
464 | drm_agp_bind_pages(struct drm_device *dev, | 464 | drm_agp_bind_pages(struct drm_device *dev, |
465 | struct page **pages, | 465 | struct page **pages, |
466 | unsigned long num_pages, | 466 | unsigned long num_pages, |
467 | uint32_t gtt_offset) | 467 | uint32_t gtt_offset, |
468 | u32 type) | ||
468 | { | 469 | { |
469 | DRM_AGP_MEM *mem; | 470 | DRM_AGP_MEM *mem; |
470 | int ret, i; | 471 | int ret, i; |
@@ -472,7 +473,7 @@ drm_agp_bind_pages(struct drm_device *dev, | |||
472 | DRM_DEBUG("\n"); | 473 | DRM_DEBUG("\n"); |
473 | 474 | ||
474 | mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, | 475 | mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, |
475 | AGP_USER_MEMORY); | 476 | type); |
476 | if (mem == NULL) { | 477 | if (mem == NULL) { |
477 | DRM_ERROR("Failed to allocate memory for %ld pages\n", | 478 | DRM_ERROR("Failed to allocate memory for %ld pages\n", |
478 | num_pages); | 479 | num_pages); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index da7c0072ad4c..eae4ed3956e0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -373,6 +373,9 @@ struct drm_i915_gem_object { | |||
373 | /** Current tiling mode for the object. */ | 373 | /** Current tiling mode for the object. */ |
374 | uint32_t tiling_mode; | 374 | uint32_t tiling_mode; |
375 | 375 | ||
376 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ | ||
377 | uint32_t agp_type; | ||
378 | |||
376 | /** | 379 | /** |
377 | * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when | 380 | * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when |
378 | * GEM_DOMAIN_CPU is not in the object's read domain. | 381 | * GEM_DOMAIN_CPU is not in the object's read domain. |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 94b9bdce0c75..99a86249bb1b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1159,7 +1159,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1159 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 1159 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
1160 | obj_priv->page_list, | 1160 | obj_priv->page_list, |
1161 | page_count, | 1161 | page_count, |
1162 | obj_priv->gtt_offset); | 1162 | obj_priv->gtt_offset, |
1163 | obj_priv->agp_type); | ||
1163 | if (obj_priv->agp_mem == NULL) { | 1164 | if (obj_priv->agp_mem == NULL) { |
1164 | i915_gem_object_free_page_list(obj); | 1165 | i915_gem_object_free_page_list(obj); |
1165 | drm_mm_put_block(obj_priv->gtt_space); | 1166 | drm_mm_put_block(obj_priv->gtt_space); |
@@ -2142,6 +2143,8 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
2142 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 2143 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
2143 | obj->read_domains = I915_GEM_DOMAIN_CPU; | 2144 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
2144 | 2145 | ||
2146 | obj_priv->agp_type = AGP_USER_MEMORY; | ||
2147 | |||
2145 | obj->driver_private = obj_priv; | 2148 | obj->driver_private = obj_priv; |
2146 | obj_priv->obj = obj; | 2149 | obj_priv->obj = obj; |
2147 | INIT_LIST_HEAD(&obj_priv->list); | 2150 | INIT_LIST_HEAD(&obj_priv->list); |
@@ -2311,6 +2314,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
2311 | return -ENOMEM; | 2314 | return -ENOMEM; |
2312 | } | 2315 | } |
2313 | obj_priv = obj->driver_private; | 2316 | obj_priv = obj->driver_private; |
2317 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
2314 | 2318 | ||
2315 | ret = i915_gem_object_pin(obj, 4096); | 2319 | ret = i915_gem_object_pin(obj, 4096); |
2316 | if (ret != 0) { | 2320 | if (ret != 0) { |
@@ -2319,25 +2323,18 @@ i915_gem_init_hws(struct drm_device *dev) | |||
2319 | } | 2323 | } |
2320 | 2324 | ||
2321 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 2325 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
2322 | dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset; | ||
2323 | dev_priv->hws_map.size = 4096; | ||
2324 | dev_priv->hws_map.type = 0; | ||
2325 | dev_priv->hws_map.flags = 0; | ||
2326 | dev_priv->hws_map.mtrr = 0; | ||
2327 | 2326 | ||
2328 | /* Ioremapping here is the wrong thing to do. We want cached access. | 2327 | dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); |
2329 | */ | 2328 | if (dev_priv->hw_status_page == NULL) { |
2330 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); | ||
2331 | if (dev_priv->hws_map.handle == NULL) { | ||
2332 | DRM_ERROR("Failed to map status page.\n"); | 2329 | DRM_ERROR("Failed to map status page.\n"); |
2333 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 2330 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
2334 | drm_gem_object_unreference(obj); | 2331 | drm_gem_object_unreference(obj); |
2335 | return -EINVAL; | 2332 | return -EINVAL; |
2336 | } | 2333 | } |
2337 | dev_priv->hws_obj = obj; | 2334 | dev_priv->hws_obj = obj; |
2338 | dev_priv->hw_status_page = dev_priv->hws_map.handle; | ||
2339 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 2335 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
2340 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | 2336 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); |
2337 | I915_READ(HWS_PGA); /* posting read */ | ||
2341 | DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 2338 | DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); |
2342 | 2339 | ||
2343 | return 0; | 2340 | return 0; |
@@ -2456,10 +2453,15 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
2456 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 2453 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
2457 | 2454 | ||
2458 | if (dev_priv->hws_obj != NULL) { | 2455 | if (dev_priv->hws_obj != NULL) { |
2459 | i915_gem_object_unpin(dev_priv->hws_obj); | 2456 | struct drm_gem_object *obj = dev_priv->hws_obj; |
2460 | drm_gem_object_unreference(dev_priv->hws_obj); | 2457 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2458 | |||
2459 | kunmap(obj_priv->page_list[0]); | ||
2460 | i915_gem_object_unpin(obj); | ||
2461 | drm_gem_object_unreference(obj); | ||
2461 | dev_priv->hws_obj = NULL; | 2462 | dev_priv->hws_obj = NULL; |
2462 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 2463 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
2464 | dev_priv->hw_status_page = NULL; | ||
2463 | 2465 | ||
2464 | /* Write high address into HWS_PGA when disabling. */ | 2466 | /* Write high address into HWS_PGA when disabling. */ |
2465 | I915_WRITE(HWS_PGA, 0x1ffff000); | 2467 | I915_WRITE(HWS_PGA, 0x1ffff000); |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 90a9e0247d58..59c796b46ee7 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1016,7 +1016,8 @@ extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); | |||
1016 | extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, | 1016 | extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, |
1017 | struct page **pages, | 1017 | struct page **pages, |
1018 | unsigned long num_pages, | 1018 | unsigned long num_pages, |
1019 | uint32_t gtt_offset); | 1019 | uint32_t gtt_offset, |
1020 | uint32_t type); | ||
1020 | extern int drm_unbind_agp(DRM_AGP_MEM * handle); | 1021 | extern int drm_unbind_agp(DRM_AGP_MEM * handle); |
1021 | 1022 | ||
1022 | /* Misc. IOCTL support (drm_ioctl.h) */ | 1023 | /* Misc. IOCTL support (drm_ioctl.h) */ |