diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-25 03:41:59 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-25 09:18:41 -0400 |
commit | cb54b53adae70701bdd77d848cea4b9b39b61cf9 (patch) | |
tree | b9da2ccaf8b2207fd4e9f7ca1905a4500e011731 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | d861e3387650296f1fca2a4dd0dcd380c8fdddad (diff) | |
parent | 549f3a1218ba18fcde11ef0e22b07e6365645788 (diff) |
Merge commit 'Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux'
This backmerges Linus' merge commit of the latest drm-fixes pull:
commit 549f3a1218ba18fcde11ef0e22b07e6365645788
Merge: 42577ca 058ca4a
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date: Tue Jul 23 15:47:08 2013 -0700
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
We've accrued a few too many conflicts, but the real reason is that I
want to merge the 100% solution for Haswell concurrent registers
writes into drm-intel-next. But that depends upon the 90% bandaid
merged into -fixes:
commit a7cd1b8fea2f341b626b255d9898a5ca5fabbf0a
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri Jul 19 20:36:51 2013 +0100
drm/i915: Serialize almost all register access
Also, we can roll up on accrued conflicts.
Usually I'd backmerge a tagged -rc, but I want to get this done before
heading off to vacations next week ;-)
Conflicts:
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c
v2: For added hilarity we have a init sequence conflict around the
gt_lock, so need to move that one, too. Spotted by Jani Nikula.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 38 |
1 files changed, 23 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 815e30332247..8527ea05124b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | |||
379 | return I915_READ(acthd_reg); | 379 | return I915_READ(acthd_reg); |
380 | } | 380 | } |
381 | 381 | ||
382 | static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) | ||
383 | { | ||
384 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
385 | u32 addr; | ||
386 | |||
387 | addr = dev_priv->status_page_dmah->busaddr; | ||
388 | if (INTEL_INFO(ring->dev)->gen >= 4) | ||
389 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
390 | I915_WRITE(HWS_PGA, addr); | ||
391 | } | ||
392 | |||
382 | static int init_ring_common(struct intel_ring_buffer *ring) | 393 | static int init_ring_common(struct intel_ring_buffer *ring) |
383 | { | 394 | { |
384 | struct drm_device *dev = ring->dev; | 395 | struct drm_device *dev = ring->dev; |
@@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
390 | if (HAS_FORCE_WAKE(dev)) | 401 | if (HAS_FORCE_WAKE(dev)) |
391 | gen6_gt_force_wake_get(dev_priv); | 402 | gen6_gt_force_wake_get(dev_priv); |
392 | 403 | ||
404 | if (I915_NEED_GFX_HWS(dev)) | ||
405 | intel_ring_setup_status_page(ring); | ||
406 | else | ||
407 | ring_setup_phys_status_page(ring); | ||
408 | |||
393 | /* Stop the ring if it's running. */ | 409 | /* Stop the ring if it's running. */ |
394 | I915_WRITE_CTL(ring, 0); | 410 | I915_WRITE_CTL(ring, 0); |
395 | I915_WRITE_HEAD(ring, 0); | 411 | I915_WRITE_HEAD(ring, 0); |
@@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) | |||
518 | struct pipe_control *pc = ring->private; | 534 | struct pipe_control *pc = ring->private; |
519 | struct drm_i915_gem_object *obj; | 535 | struct drm_i915_gem_object *obj; |
520 | 536 | ||
521 | if (!ring->private) | ||
522 | return; | ||
523 | |||
524 | obj = pc->obj; | 537 | obj = pc->obj; |
525 | 538 | ||
526 | kunmap(sg_page(obj->pages->sgl)); | 539 | kunmap(sg_page(obj->pages->sgl)); |
@@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) | |||
528 | drm_gem_object_unreference(&obj->base); | 541 | drm_gem_object_unreference(&obj->base); |
529 | 542 | ||
530 | kfree(pc); | 543 | kfree(pc); |
531 | ring->private = NULL; | ||
532 | } | 544 | } |
533 | 545 | ||
534 | static int init_render_ring(struct intel_ring_buffer *ring) | 546 | static int init_render_ring(struct intel_ring_buffer *ring) |
@@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) | |||
601 | if (HAS_BROKEN_CS_TLB(dev)) | 613 | if (HAS_BROKEN_CS_TLB(dev)) |
602 | drm_gem_object_unreference(to_gem_object(ring->private)); | 614 | drm_gem_object_unreference(to_gem_object(ring->private)); |
603 | 615 | ||
604 | cleanup_pipe_control(ring); | 616 | if (INTEL_INFO(dev)->gen >= 5) |
617 | cleanup_pipe_control(ring); | ||
618 | |||
619 | ring->private = NULL; | ||
605 | } | 620 | } |
606 | 621 | ||
607 | static void | 622 | static void |
@@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
1223 | ring->status_page.obj = obj; | 1238 | ring->status_page.obj = obj; |
1224 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1239 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1225 | 1240 | ||
1226 | intel_ring_setup_status_page(ring); | ||
1227 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 1241 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
1228 | ring->name, ring->status_page.gfx_addr); | 1242 | ring->name, ring->status_page.gfx_addr); |
1229 | 1243 | ||
@@ -1237,10 +1251,9 @@ err: | |||
1237 | return ret; | 1251 | return ret; |
1238 | } | 1252 | } |
1239 | 1253 | ||
1240 | static int init_phys_hws_pga(struct intel_ring_buffer *ring) | 1254 | static int init_phys_status_page(struct intel_ring_buffer *ring) |
1241 | { | 1255 | { |
1242 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1256 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1243 | u32 addr; | ||
1244 | 1257 | ||
1245 | if (!dev_priv->status_page_dmah) { | 1258 | if (!dev_priv->status_page_dmah) { |
1246 | dev_priv->status_page_dmah = | 1259 | dev_priv->status_page_dmah = |
@@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring) | |||
1249 | return -ENOMEM; | 1262 | return -ENOMEM; |
1250 | } | 1263 | } |
1251 | 1264 | ||
1252 | addr = dev_priv->status_page_dmah->busaddr; | ||
1253 | if (INTEL_INFO(ring->dev)->gen >= 4) | ||
1254 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
1255 | I915_WRITE(HWS_PGA, addr); | ||
1256 | |||
1257 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | 1265 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1258 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1266 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1259 | 1267 | ||
@@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1281 | return ret; | 1289 | return ret; |
1282 | } else { | 1290 | } else { |
1283 | BUG_ON(ring->id != RCS); | 1291 | BUG_ON(ring->id != RCS); |
1284 | ret = init_phys_hws_pga(ring); | 1292 | ret = init_phys_status_page(ring); |
1285 | if (ret) | 1293 | if (ret) |
1286 | return ret; | 1294 | return ret; |
1287 | } | 1295 | } |
@@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1893 | } | 1901 | } |
1894 | 1902 | ||
1895 | if (!I915_NEED_GFX_HWS(dev)) { | 1903 | if (!I915_NEED_GFX_HWS(dev)) { |
1896 | ret = init_phys_hws_pga(ring); | 1904 | ret = init_phys_status_page(ring); |
1897 | if (ret) | 1905 | if (ret) |
1898 | return ret; | 1906 | return ret; |
1899 | } | 1907 | } |