aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-11-10 03:35:33 -0500
committerDave Airlie <airlied@redhat.com>2013-11-10 03:35:33 -0500
commitab0169bb5cc4a5c86756dde662087f9d12302eb0 (patch)
tree495e668337410f6763480ea1f010213f6399e38c /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parent8d0a2215931f1ffd77aef65cae2c0becc3f5d560 (diff)
parent13b3a0a77625c09c84825ef6ba81d957ec207841 (diff)
Merge tag 'bdw-stage1-2013-11-08-v2' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
So here's the Broadwell pull request. From a kernel driver pov there's two areas with big changes in Broadwell: - Completely new enumerated interrupt bits. On the plus side it now looks fairly unform and sane. - Completely new pagetable layout. To ensure minimal impact on existing platforms we've refactored both the irq and low-level gtt handling code a lot in anticipation of the bdw push. So now bdw enabling in these areas just plugs in a bunch of vfuncs. Otherwise it's all fairly harmless adjusting of switch cases and if-ladders to shovel bdw into the right blocks. So minimized impact on existing platforms. I've also merged the bdw-stage1 branch into our -nightly integration branch for the past week to make sure we don't break anything. Note that there's still quite a flurry or patches floating around, but I've figured I'll push this out. I plan to keep the bdw fixes separate from my usual -fixes stream so that you can reject them easily in case it still looks like too much churn. Also, bdw is for now hidden behind the preliminary hw enabling module option. So there's no real pressure to get follow-up patches all into 3.13. * tag 'bdw-stage1-2013-11-08-v2' of git://people.freedesktop.org/~danvet/drm-intel: (75 commits) drm/i915: Mask the vblank interrupt on bdw by default drm/i915: Wire up cpu fifo underrun reporting support for bdw drm/i915: Optimize gen8_enable|disable_vblank functions drm/i915: Wire up pipe CRC support for bdw drm/i915: Wire up PCH interrupts for bdw drm/i915: Wire up port A aux channel drm/i915: Fix up the bdw pipe interrupt enable lists drm/i915: Optimize pipe irq handling on bdw drm/i915/bdw: Take render error interrupt out of the mask drm/i915/bdw: Add BDW PCH check first drm/i915: Use hsw_crt_get_config on BDW drm/i915/bdw: Change dp aux timeout to 600us on DDIA drm/i915/bdw: Enable trickle feed on Broadwell drm/i915/bdw: WaSingleSubspanDispatchOnAALinesAndPoints drm/i915/bdw: conservative SBE VUE cache mode drm/i915/bdw: Limit SDE poly depth FIFO to 2 drm/i915/bdw: Sampler power bypass disable ddrm/i915/bdw: Disable centroid pixel perf optimization drm/i915/bdw: BWGTLB clock gate disable drm/i915/bdw: Implement edp PSR workarounds ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0ce0d47e4b0f..885d595e0e02 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -212,6 +212,7 @@ static int
212relocate_entry_cpu(struct drm_i915_gem_object *obj, 212relocate_entry_cpu(struct drm_i915_gem_object *obj,
213 struct drm_i915_gem_relocation_entry *reloc) 213 struct drm_i915_gem_relocation_entry *reloc)
214{ 214{
215 struct drm_device *dev = obj->base.dev;
215 uint32_t page_offset = offset_in_page(reloc->offset); 216 uint32_t page_offset = offset_in_page(reloc->offset);
216 char *vaddr; 217 char *vaddr;
217 int ret = -EINVAL; 218 int ret = -EINVAL;
@@ -223,6 +224,19 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
223 vaddr = kmap_atomic(i915_gem_object_get_page(obj, 224 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
224 reloc->offset >> PAGE_SHIFT)); 225 reloc->offset >> PAGE_SHIFT));
225 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 226 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
227
228 if (INTEL_INFO(dev)->gen >= 8) {
229 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
230
231 if (page_offset == 0) {
232 kunmap_atomic(vaddr);
233 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
234 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
235 }
236
237 *(uint32_t *)(vaddr + page_offset) = 0;
238 }
239
226 kunmap_atomic(vaddr); 240 kunmap_atomic(vaddr);
227 241
228 return 0; 242 return 0;
@@ -253,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
253 reloc_entry = (uint32_t __iomem *) 267 reloc_entry = (uint32_t __iomem *)
254 (reloc_page + offset_in_page(reloc->offset)); 268 (reloc_page + offset_in_page(reloc->offset));
255 iowrite32(reloc->delta, reloc_entry); 269 iowrite32(reloc->delta, reloc_entry);
270
271 if (INTEL_INFO(dev)->gen >= 8) {
272 reloc_entry += 1;
273
274 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
275 io_mapping_unmap_atomic(reloc_page);
276 reloc_page = io_mapping_map_atomic_wc(
277 dev_priv->gtt.mappable,
278 reloc->offset + sizeof(uint32_t));
279 reloc_entry = reloc_page;
280 }
281
282 iowrite32(0, reloc_entry);
283 }
284
256 io_mapping_unmap_atomic(reloc_page); 285 io_mapping_unmap_atomic(reloc_page);
257 286
258 return 0; 287 return 0;
@@ -323,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
323 return 0; 352 return 0;
324 353
325 /* Check that the relocation address is valid... */ 354 /* Check that the relocation address is valid... */
326 if (unlikely(reloc->offset > obj->base.size - 4)) { 355 if (unlikely(reloc->offset >
356 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
327 DRM_DEBUG("Relocation beyond object bounds: " 357 DRM_DEBUG("Relocation beyond object bounds: "
328 "obj %p target %d offset %d size %d.\n", 358 "obj %p target %d offset %d size %d.\n",
329 obj, reloc->target_handle, 359 obj, reloc->target_handle,
@@ -1116,8 +1146,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1116 1146
1117 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1147 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1118 * batch" bit. Hence we need to pin secure batches into the global gtt. 1148 * batch" bit. Hence we need to pin secure batches into the global gtt.
1119 * hsw should have this fixed, but let's be paranoid and do it 1149 * hsw should have this fixed, but bdw mucks it up again. */
1120 * unconditionally for now. */
1121 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1150 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1122 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1151 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1123 1152