aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2013-11-03 00:07:11 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-08 12:09:41 -0500
commit3c94ceeee27b77fa0fe59844ec6c11e4db189d00 (patch)
treeb558dc7f66fd79982933cc451917accaa871d944
parenta123f157a3c0d1ea0d02af0689b4a389d3f0c992 (diff)
drm/i915/bdw: Support 64b relocations
We don't actually return any to userspace yet, however we can pretend like we do now so userspace will support it when it happens. This is just to please Chris as the code itself isn't ready for > 64b relocations. v2: Rebase on top of the refactored relocate_entry_gtt|cpu functions. v3: Squash in fixup from Rafal Barbalho for 64 byte relocs using cpu relocs and those crossing a page boundary. v4: Squash in a fixup for the fixup from Rafael. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> (v1) Signed-off-by: Barbalho, Rafael <rafael.barbalho@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0ce0d47e4b0f..78786c44fe52 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -212,6 +212,7 @@ static int
212relocate_entry_cpu(struct drm_i915_gem_object *obj, 212relocate_entry_cpu(struct drm_i915_gem_object *obj,
213 struct drm_i915_gem_relocation_entry *reloc) 213 struct drm_i915_gem_relocation_entry *reloc)
214{ 214{
215 struct drm_device *dev = obj->base.dev;
215 uint32_t page_offset = offset_in_page(reloc->offset); 216 uint32_t page_offset = offset_in_page(reloc->offset);
216 char *vaddr; 217 char *vaddr;
217 int ret = -EINVAL; 218 int ret = -EINVAL;
@@ -223,6 +224,19 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
223 vaddr = kmap_atomic(i915_gem_object_get_page(obj, 224 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
224 reloc->offset >> PAGE_SHIFT)); 225 reloc->offset >> PAGE_SHIFT));
225 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 226 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
227
228 if (INTEL_INFO(dev)->gen >= 8) {
229 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
230
231 if (page_offset == 0) {
232 kunmap_atomic(vaddr);
233 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
234 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
235 }
236
237 *(uint32_t *)(vaddr + page_offset) = 0;
238 }
239
226 kunmap_atomic(vaddr); 240 kunmap_atomic(vaddr);
227 241
228 return 0; 242 return 0;
@@ -253,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
253 reloc_entry = (uint32_t __iomem *) 267 reloc_entry = (uint32_t __iomem *)
254 (reloc_page + offset_in_page(reloc->offset)); 268 (reloc_page + offset_in_page(reloc->offset));
255 iowrite32(reloc->delta, reloc_entry); 269 iowrite32(reloc->delta, reloc_entry);
270
271 if (INTEL_INFO(dev)->gen >= 8) {
272 reloc_entry += 1;
273
274 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
275 io_mapping_unmap_atomic(reloc_page);
276 reloc_page = io_mapping_map_atomic_wc(
277 dev_priv->gtt.mappable,
278 reloc->offset + sizeof(uint32_t));
279 reloc_entry = reloc_page;
280 }
281
282 iowrite32(0, reloc_entry);
283 }
284
256 io_mapping_unmap_atomic(reloc_page); 285 io_mapping_unmap_atomic(reloc_page);
257 286
258 return 0; 287 return 0;
@@ -323,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
323 return 0; 352 return 0;
324 353
325 /* Check that the relocation address is valid... */ 354 /* Check that the relocation address is valid... */
326 if (unlikely(reloc->offset > obj->base.size - 4)) { 355 if (unlikely(reloc->offset >
356 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
327 DRM_DEBUG("Relocation beyond object bounds: " 357 DRM_DEBUG("Relocation beyond object bounds: "
328 "obj %p target %d offset %d size %d.\n", 358 "obj %p target %d offset %d size %d.\n",
329 obj, reloc->target_handle, 359 obj, reloc->target_handle,