aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-09-06 05:28:27 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-11-19 15:17:11 -0500
commit5c6c600354adac5f95fd41b178b084ac0182e14c (patch)
tree7c21cb65eea1843a73577964b72f2064e70fc46e /drivers/gpu/drm/i915/intel_ringbuffer.c
parent7ba717cf365d79f2b284e508205ec3d4a05fc41b (diff)
drm/i915: Remove DRI1 ring accessors and API
With the deprecation of UMS, and by association DRI1, we have a tough choice when updating the ring access routines. We either rewrite the DRI1 routines blindly without testing (so likely to be broken) or take the liberty of declaring them no longer supported and remove them entirely. This takes the latter approach. v2: Also remove the DRI1 sarea updates Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Fix rebase conflicts.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c104
1 files changed, 4 insertions, 100 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0a4f35e735c3..1d01b51ff058 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
589 goto out; 589 goto out;
590 } 590 }
591 591
592 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 592 ringbuf->head = I915_READ_HEAD(ring);
593 i915_kernel_lost_context(ring->dev); 593 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
594 else { 594 ringbuf->space = intel_ring_space(ringbuf);
595 ringbuf->head = I915_READ_HEAD(ring); 595 ringbuf->last_retired_head = -1;
596 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
597 ringbuf->space = intel_ring_space(ringbuf);
598 ringbuf->last_retired_head = -1;
599 }
600 596
601 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 597 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
602 598
@@ -1958,13 +1954,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1958 break; 1954 break;
1959 } 1955 }
1960 1956
1961 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1962 dev->primary->master) {
1963 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1964 if (master_priv->sarea_priv)
1965 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1966 }
1967
1968 msleep(1); 1957 msleep(1);
1969 1958
1970 if (dev_priv->mm.interruptible && signal_pending(current)) { 1959 if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2455,91 +2444,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2455 return intel_init_ring_buffer(dev, ring); 2444 return intel_init_ring_buffer(dev, ring);
2456} 2445}
2457 2446
2458int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2459{
2460 struct drm_i915_private *dev_priv = dev->dev_private;
2461 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2462 struct intel_ringbuffer *ringbuf = ring->buffer;
2463 int ret;
2464
2465 if (ringbuf == NULL) {
2466 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2467 if (!ringbuf)
2468 return -ENOMEM;
2469 ring->buffer = ringbuf;
2470 }
2471
2472 ring->name = "render ring";
2473 ring->id = RCS;
2474 ring->mmio_base = RENDER_RING_BASE;
2475
2476 if (INTEL_INFO(dev)->gen >= 6) {
2477 /* non-kms not supported on gen6+ */
2478 ret = -ENODEV;
2479 goto err_ringbuf;
2480 }
2481
2482 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2483 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2484 * the special gen5 functions. */
2485 ring->add_request = i9xx_add_request;
2486 if (INTEL_INFO(dev)->gen < 4)
2487 ring->flush = gen2_render_ring_flush;
2488 else
2489 ring->flush = gen4_render_ring_flush;
2490 ring->get_seqno = ring_get_seqno;
2491 ring->set_seqno = ring_set_seqno;
2492 if (IS_GEN2(dev)) {
2493 ring->irq_get = i8xx_ring_get_irq;
2494 ring->irq_put = i8xx_ring_put_irq;
2495 } else {
2496 ring->irq_get = i9xx_ring_get_irq;
2497 ring->irq_put = i9xx_ring_put_irq;
2498 }
2499 ring->irq_enable_mask = I915_USER_INTERRUPT;
2500 ring->write_tail = ring_write_tail;
2501 if (INTEL_INFO(dev)->gen >= 4)
2502 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2503 else if (IS_I830(dev) || IS_845G(dev))
2504 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2505 else
2506 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2507 ring->init = init_render_ring;
2508 ring->cleanup = render_ring_cleanup;
2509
2510 ring->dev = dev;
2511 INIT_LIST_HEAD(&ring->active_list);
2512 INIT_LIST_HEAD(&ring->request_list);
2513
2514 ringbuf->size = size;
2515 ringbuf->effective_size = ringbuf->size;
2516 if (IS_I830(ring->dev) || IS_845G(ring->dev))
2517 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2518
2519 ringbuf->virtual_start = ioremap_wc(start, size);
2520 if (ringbuf->virtual_start == NULL) {
2521 DRM_ERROR("can not ioremap virtual address for"
2522 " ring buffer\n");
2523 ret = -ENOMEM;
2524 goto err_ringbuf;
2525 }
2526
2527 if (!I915_NEED_GFX_HWS(dev)) {
2528 ret = init_phys_status_page(ring);
2529 if (ret)
2530 goto err_vstart;
2531 }
2532
2533 return 0;
2534
2535err_vstart:
2536 iounmap(ringbuf->virtual_start);
2537err_ringbuf:
2538 kfree(ringbuf);
2539 ring->buffer = NULL;
2540 return ret;
2541}
2542
2543int intel_init_bsd_ring_buffer(struct drm_device *dev) 2447int intel_init_bsd_ring_buffer(struct drm_device *dev)
2544{ 2448{
2545 struct drm_i915_private *dev_priv = dev->dev_private; 2449 struct drm_i915_private *dev_priv = dev->dev_private;