aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2010-05-21 16:26:39 -0400
committerEric Anholt <eric@anholt.net>2010-05-26 15:36:00 -0400
commit62fdfeaf8b1f487060b6e160e7b5cd90287607c9 (patch)
treedf21e22cc6cc0a3409d35f8e4edd46595a129739 /drivers/gpu/drm
parent79a78dd6266a4f3e31c800e941ec62e250770a7d (diff)
drm/i915: Move ringbuffer-related code to intel_ringbuffer.c.
This is preparation for supporting multiple ringbuffers on Ironlake. The non-copy-and-paste changes are: - de-staticing functions - I915_GEM_GPU_DOMAINS moving to i915_drv.h to be used by both files. - i915_gem_add_request had only half its implementation copy-and-pasted out of the middle of it.
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c77
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c427
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c568
6 files changed, 592 insertions, 534 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 95639017bdb..da78f2c0d90 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
22 intel_fb.o \ 22 intel_fb.o \
23 intel_tv.o \ 23 intel_tv.o \
24 intel_dvo.o \ 24 intel_dvo.o \
25 intel_ringbuffer.o \
25 intel_overlay.o \ 26 intel_overlay.o \
26 dvo_ch7xxx.o \ 27 dvo_ch7xxx.o \
27 dvo_ch7017.o \ 28 dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 22d7497e6fa..a657e331595 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,83 +40,6 @@
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42 42
43/* Really want an OS-independent resettable timer. Would like to have
44 * this loop run for (eg) 3 sec, but have the timer reset every time
45 * the head pointer changes, so that EBUSY only happens if the ring
46 * actually stalls for (eg) 3 seconds.
47 */
48int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
49{
50 drm_i915_private_t *dev_priv = dev->dev_private;
51 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
52 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
53 u32 last_acthd = I915_READ(acthd_reg);
54 u32 acthd;
55 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
56 int i;
57
58 trace_i915_ring_wait_begin (dev);
59
60 for (i = 0; i < 100000; i++) {
61 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
62 acthd = I915_READ(acthd_reg);
63 ring->space = ring->head - (ring->tail + 8);
64 if (ring->space < 0)
65 ring->space += ring->Size;
66 if (ring->space >= n) {
67 trace_i915_ring_wait_end (dev);
68 return 0;
69 }
70
71 if (dev->primary->master) {
72 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
73 if (master_priv->sarea_priv)
74 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
75 }
76
77
78 if (ring->head != last_head)
79 i = 0;
80 if (acthd != last_acthd)
81 i = 0;
82
83 last_head = ring->head;
84 last_acthd = acthd;
85 msleep_interruptible(10);
86
87 }
88
89 trace_i915_ring_wait_end (dev);
90 return -EBUSY;
91}
92
93/* As a ringbuffer is only allowed to wrap between instructions, fill
94 * the tail with NOOPs.
95 */
96int i915_wrap_ring(struct drm_device *dev)
97{
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 volatile unsigned int *virt;
100 int rem;
101
102 rem = dev_priv->ring.Size - dev_priv->ring.tail;
103 if (dev_priv->ring.space < rem) {
104 int ret = i915_wait_ring(dev, rem, __func__);
105 if (ret)
106 return ret;
107 }
108 dev_priv->ring.space -= rem;
109
110 virt = (unsigned int *)
111 (dev_priv->ring.virtual_start + dev_priv->ring.tail);
112 rem /= 4;
113 while (rem--)
114 *virt++ = MI_NOOP;
115
116 dev_priv->ring.tail = 0;
117
118 return 0;
119}
120 43
121/** 44/**
122 * Sets up the hardware status page for devices that need a physical address 45 * Sets up the hardware status page for devices that need a physical address
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef1ab3..114653aa9ae 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include "i915_reg.h" 33#include "i915_reg.h"
34#include "i915_drm.h"
34#include "intel_bios.h" 35#include "intel_bios.h"
35#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
36 37
@@ -55,6 +56,8 @@ enum plane {
55 56
56#define I915_NUM_PIPE 2 57#define I915_NUM_PIPE 2
57 58
59#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
60
58/* Interface history: 61/* Interface history:
59 * 62 *
60 * 1.1: Original. 63 * 1.1: Original.
@@ -849,6 +852,9 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
849extern int i915_vblank_swap(struct drm_device *dev, void *data, 852extern int i915_vblank_swap(struct drm_device *dev, void *data,
850 struct drm_file *file_priv); 853 struct drm_file *file_priv);
851extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 854extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
855extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
856void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
857void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
852 858
853void 859void
854i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 860i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -956,6 +962,8 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
956 962
957void i915_gem_shrinker_init(void); 963void i915_gem_shrinker_init(void);
958void i915_gem_shrinker_exit(void); 964void i915_gem_shrinker_exit(void);
965int i915_gem_init_pipe_control(struct drm_device *dev);
966void i915_gem_cleanup_pipe_control(struct drm_device *dev);
959 967
960/* i915_gem_tiling.c */ 968/* i915_gem_tiling.c */
961void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 969void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1006,6 +1014,16 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return;
1006static inline void opregion_enable_asle(struct drm_device *dev) { return; } 1014static inline void opregion_enable_asle(struct drm_device *dev) { return; }
1007#endif 1015#endif
1008 1016
1017/* intel_ringbuffer.c */
1018extern void i915_gem_flush(struct drm_device *dev,
1019 uint32_t invalidate_domains,
1020 uint32_t flush_domains);
1021extern int i915_dispatch_gem_execbuffer(struct drm_device *dev,
1022 struct drm_i915_gem_execbuffer2 *exec,
1023 struct drm_clip_rect *cliprects,
1024 uint64_t exec_offset);
1025extern uint32_t i915_ring_add_request(struct drm_device *dev);
1026
1009/* modesetting */ 1027/* modesetting */
1010extern void intel_modeset_init(struct drm_device *dev); 1028extern void intel_modeset_init(struct drm_device *dev);
1011extern void intel_modeset_cleanup(struct drm_device *dev); 1029extern void intel_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f71fa..4f2f5f8cdca 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,8 +35,6 @@
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
39
40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -1592,22 +1590,6 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1592 } 1590 }
1593 } 1591 }
1594} 1592}
1595
1596#define PIPE_CONTROL_FLUSH(addr) \
1597 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1598 PIPE_CONTROL_DEPTH_STALL); \
1599 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1600 OUT_RING(0); \
1601 OUT_RING(0); \
1602
1603/**
1604 * Creates a new sequence number, emitting a write of it to the status page
1605 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1606 *
1607 * Must be called with struct_lock held.
1608 *
1609 * Returned sequence numbers are nonzero on success.
1610 */
1611uint32_t 1593uint32_t
1612i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1594i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1613 uint32_t flush_domains) 1595 uint32_t flush_domains)
@@ -1617,7 +1599,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1617 struct drm_i915_gem_request *request; 1599 struct drm_i915_gem_request *request;
1618 uint32_t seqno; 1600 uint32_t seqno;
1619 int was_empty; 1601 int was_empty;
1620 RING_LOCALS;
1621 1602
1622 if (file_priv != NULL) 1603 if (file_priv != NULL)
1623 i915_file_priv = file_priv->driver_priv; 1604 i915_file_priv = file_priv->driver_priv;
@@ -1626,55 +1607,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1626 if (request == NULL) 1607 if (request == NULL)
1627 return 0; 1608 return 0;
1628 1609
1629 /* Grab the seqno we're going to make this request be, and bump the 1610 seqno = i915_ring_add_request(dev);
1630 * next (skipping 0 so it can be the reserved no-seqno value).
1631 */
1632 seqno = dev_priv->mm.next_gem_seqno;
1633 dev_priv->mm.next_gem_seqno++;
1634 if (dev_priv->mm.next_gem_seqno == 0)
1635 dev_priv->mm.next_gem_seqno++;
1636
1637 if (HAS_PIPE_CONTROL(dev)) {
1638 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1639
1640 /*
1641 * Workaround qword write incoherence by flushing the
1642 * PIPE_NOTIFY buffers out to memory before requesting
1643 * an interrupt.
1644 */
1645 BEGIN_LP_RING(32);
1646 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1647 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1648 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1649 OUT_RING(seqno);
1650 OUT_RING(0);
1651 PIPE_CONTROL_FLUSH(scratch_addr);
1652 scratch_addr += 128; /* write to separate cachelines */
1653 PIPE_CONTROL_FLUSH(scratch_addr);
1654 scratch_addr += 128;
1655 PIPE_CONTROL_FLUSH(scratch_addr);
1656 scratch_addr += 128;
1657 PIPE_CONTROL_FLUSH(scratch_addr);
1658 scratch_addr += 128;
1659 PIPE_CONTROL_FLUSH(scratch_addr);
1660 scratch_addr += 128;
1661 PIPE_CONTROL_FLUSH(scratch_addr);
1662 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1663 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1664 PIPE_CONTROL_NOTIFY);
1665 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1666 OUT_RING(seqno);
1667 OUT_RING(0);
1668 ADVANCE_LP_RING();
1669 } else {
1670 BEGIN_LP_RING(4);
1671 OUT_RING(MI_STORE_DWORD_INDEX);
1672 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1673 OUT_RING(seqno);
1674
1675 OUT_RING(MI_USER_INTERRUPT);
1676 ADVANCE_LP_RING();
1677 }
1678 1611
1679 DRM_DEBUG_DRIVER("%d\n", seqno); 1612 DRM_DEBUG_DRIVER("%d\n", seqno);
1680 1613
@@ -1933,78 +1866,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1933 return i915_do_wait_request(dev, seqno, 1); 1866 return i915_do_wait_request(dev, seqno, 1);
1934} 1867}
1935 1868
1936static void
1937i915_gem_flush(struct drm_device *dev,
1938 uint32_t invalidate_domains,
1939 uint32_t flush_domains)
1940{
1941 drm_i915_private_t *dev_priv = dev->dev_private;
1942 uint32_t cmd;
1943 RING_LOCALS;
1944
1945#if WATCH_EXEC
1946 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1947 invalidate_domains, flush_domains);
1948#endif
1949 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1950 invalidate_domains, flush_domains);
1951
1952 if (flush_domains & I915_GEM_DOMAIN_CPU)
1953 drm_agp_chipset_flush(dev);
1954
1955 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1956 /*
1957 * read/write caches:
1958 *
1959 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1960 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1961 * also flushed at 2d versus 3d pipeline switches.
1962 *
1963 * read-only caches:
1964 *
1965 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1966 * MI_READ_FLUSH is set, and is always flushed on 965.
1967 *
1968 * I915_GEM_DOMAIN_COMMAND may not exist?
1969 *
1970 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1971 * invalidated when MI_EXE_FLUSH is set.
1972 *
1973 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1974 * invalidated with every MI_FLUSH.
1975 *
1976 * TLBs:
1977 *
1978 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1979 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1980 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1981 * are flushed at any MI_FLUSH.
1982 */
1983
1984 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1985 if ((invalidate_domains|flush_domains) &
1986 I915_GEM_DOMAIN_RENDER)
1987 cmd &= ~MI_NO_WRITE_FLUSH;
1988 if (!IS_I965G(dev)) {
1989 /*
1990 * On the 965, the sampler cache always gets flushed
1991 * and this bit is reserved.
1992 */
1993 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1994 cmd |= MI_READ_FLUSH;
1995 }
1996 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1997 cmd |= MI_EXE_FLUSH;
1998
1999#if WATCH_EXEC
2000 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
2001#endif
2002 BEGIN_LP_RING(2);
2003 OUT_RING(cmd);
2004 OUT_RING(MI_NOOP);
2005 ADVANCE_LP_RING();
2006 }
2007}
2008 1869
2009/** 1870/**
2010 * Ensures that all rendering to the object has completed and the object is 1871 * Ensures that all rendering to the object has completed and the object is
@@ -3545,62 +3406,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3545 return 0; 3406 return 0;
3546} 3407}
3547 3408
3548/** Dispatch a batchbuffer to the ring
3549 */
3550static int
3551i915_dispatch_gem_execbuffer(struct drm_device *dev,
3552 struct drm_i915_gem_execbuffer2 *exec,
3553 struct drm_clip_rect *cliprects,
3554 uint64_t exec_offset)
3555{
3556 drm_i915_private_t *dev_priv = dev->dev_private;
3557 int nbox = exec->num_cliprects;
3558 int i = 0, count;
3559 uint32_t exec_start, exec_len;
3560 RING_LOCALS;
3561
3562 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3563 exec_len = (uint32_t) exec->batch_len;
3564
3565 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3566
3567 count = nbox ? nbox : 1;
3568
3569 for (i = 0; i < count; i++) {
3570 if (i < nbox) {
3571 int ret = i915_emit_box(dev, cliprects, i,
3572 exec->DR1, exec->DR4);
3573 if (ret)
3574 return ret;
3575 }
3576
3577 if (IS_I830(dev) || IS_845G(dev)) {
3578 BEGIN_LP_RING(4);
3579 OUT_RING(MI_BATCH_BUFFER);
3580 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3581 OUT_RING(exec_start + exec_len - 4);
3582 OUT_RING(0);
3583 ADVANCE_LP_RING();
3584 } else {
3585 BEGIN_LP_RING(2);
3586 if (IS_I965G(dev)) {
3587 OUT_RING(MI_BATCH_BUFFER_START |
3588 (2 << 6) |
3589 MI_BATCH_NON_SECURE_I965);
3590 OUT_RING(exec_start);
3591 } else {
3592 OUT_RING(MI_BATCH_BUFFER_START |
3593 (2 << 6));
3594 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3595 }
3596 ADVANCE_LP_RING();
3597 }
3598 }
3599
3600 /* XXX breadcrumb */
3601 return 0;
3602}
3603
3604/* Throttle our rendering by waiting until the ring has completed our requests 3409/* Throttle our rendering by waiting until the ring has completed our requests
3605 * emitted over 20 msec ago. 3410 * emitted over 20 msec ago.
3606 * 3411 *
@@ -4615,7 +4420,7 @@ i915_gem_idle(struct drm_device *dev)
4615 * 965+ support PIPE_CONTROL commands, which provide finer grained control 4420 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4616 * over cache flushing. 4421 * over cache flushing.
4617 */ 4422 */
4618static int 4423int
4619i915_gem_init_pipe_control(struct drm_device *dev) 4424i915_gem_init_pipe_control(struct drm_device *dev)
4620{ 4425{
4621 drm_i915_private_t *dev_priv = dev->dev_private; 4426 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4654,73 +4459,7 @@ err:
4654 return ret; 4459 return ret;
4655} 4460}
4656 4461
4657static int 4462void
4658i915_gem_init_hws(struct drm_device *dev)
4659{
4660 drm_i915_private_t *dev_priv = dev->dev_private;
4661 struct drm_gem_object *obj;
4662 struct drm_i915_gem_object *obj_priv;
4663 int ret;
4664
4665 /* If we need a physical address for the status page, it's already
4666 * initialized at driver load time.
4667 */
4668 if (!I915_NEED_GFX_HWS(dev))
4669 return 0;
4670
4671 obj = i915_gem_alloc_object(dev, 4096);
4672 if (obj == NULL) {
4673 DRM_ERROR("Failed to allocate status page\n");
4674 ret = -ENOMEM;
4675 goto err;
4676 }
4677 obj_priv = to_intel_bo(obj);
4678 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4679
4680 ret = i915_gem_object_pin(obj, 4096);
4681 if (ret != 0) {
4682 drm_gem_object_unreference(obj);
4683 goto err_unref;
4684 }
4685
4686 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4687
4688 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4689 if (dev_priv->hw_status_page == NULL) {
4690 DRM_ERROR("Failed to map status page.\n");
4691 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4692 ret = -EINVAL;
4693 goto err_unpin;
4694 }
4695
4696 if (HAS_PIPE_CONTROL(dev)) {
4697 ret = i915_gem_init_pipe_control(dev);
4698 if (ret)
4699 goto err_unpin;
4700 }
4701
4702 dev_priv->hws_obj = obj;
4703 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4704 if (IS_GEN6(dev)) {
4705 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4706 I915_READ(HWS_PGA_GEN6); /* posting read */
4707 } else {
4708 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4709 I915_READ(HWS_PGA); /* posting read */
4710 }
4711 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4712
4713 return 0;
4714
4715err_unpin:
4716 i915_gem_object_unpin(obj);
4717err_unref:
4718 drm_gem_object_unreference(obj);
4719err:
4720 return 0;
4721}
4722
4723static void
4724i915_gem_cleanup_pipe_control(struct drm_device *dev) 4463i915_gem_cleanup_pipe_control(struct drm_device *dev)
4725{ 4464{
4726 drm_i915_private_t *dev_priv = dev->dev_private; 4465 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4737,166 +4476,6 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
4737 dev_priv->seqno_page = NULL; 4476 dev_priv->seqno_page = NULL;
4738} 4477}
4739 4478
4740static void
4741i915_gem_cleanup_hws(struct drm_device *dev)
4742{
4743 drm_i915_private_t *dev_priv = dev->dev_private;
4744 struct drm_gem_object *obj;
4745 struct drm_i915_gem_object *obj_priv;
4746
4747 if (dev_priv->hws_obj == NULL)
4748 return;
4749
4750 obj = dev_priv->hws_obj;
4751 obj_priv = to_intel_bo(obj);
4752
4753 kunmap(obj_priv->pages[0]);
4754 i915_gem_object_unpin(obj);
4755 drm_gem_object_unreference(obj);
4756 dev_priv->hws_obj = NULL;
4757
4758 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4759 dev_priv->hw_status_page = NULL;
4760
4761 if (HAS_PIPE_CONTROL(dev))
4762 i915_gem_cleanup_pipe_control(dev);
4763
4764 /* Write high address into HWS_PGA when disabling. */
4765 I915_WRITE(HWS_PGA, 0x1ffff000);
4766}
4767
4768int
4769i915_gem_init_ringbuffer(struct drm_device *dev)
4770{
4771 drm_i915_private_t *dev_priv = dev->dev_private;
4772 struct drm_gem_object *obj;
4773 struct drm_i915_gem_object *obj_priv;
4774 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4775 int ret;
4776 u32 head;
4777
4778 ret = i915_gem_init_hws(dev);
4779 if (ret != 0)
4780 return ret;
4781
4782 obj = i915_gem_alloc_object(dev, 128 * 1024);
4783 if (obj == NULL) {
4784 DRM_ERROR("Failed to allocate ringbuffer\n");
4785 i915_gem_cleanup_hws(dev);
4786 return -ENOMEM;
4787 }
4788 obj_priv = to_intel_bo(obj);
4789
4790 ret = i915_gem_object_pin(obj, 4096);
4791 if (ret != 0) {
4792 drm_gem_object_unreference(obj);
4793 i915_gem_cleanup_hws(dev);
4794 return ret;
4795 }
4796
4797 /* Set up the kernel mapping for the ring. */
4798 ring->Size = obj->size;
4799
4800 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4801 ring->map.size = obj->size;
4802 ring->map.type = 0;
4803 ring->map.flags = 0;
4804 ring->map.mtrr = 0;
4805
4806 drm_core_ioremap_wc(&ring->map, dev);
4807 if (ring->map.handle == NULL) {
4808 DRM_ERROR("Failed to map ringbuffer.\n");
4809 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4810 i915_gem_object_unpin(obj);
4811 drm_gem_object_unreference(obj);
4812 i915_gem_cleanup_hws(dev);
4813 return -EINVAL;
4814 }
4815 ring->ring_obj = obj;
4816 ring->virtual_start = ring->map.handle;
4817
4818 /* Stop the ring if it's running. */
4819 I915_WRITE(PRB0_CTL, 0);
4820 I915_WRITE(PRB0_TAIL, 0);
4821 I915_WRITE(PRB0_HEAD, 0);
4822
4823 /* Initialize the ring. */
4824 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4825 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4826
4827 /* G45 ring initialization fails to reset head to zero */
4828 if (head != 0) {
4829 DRM_ERROR("Ring head not reset to zero "
4830 "ctl %08x head %08x tail %08x start %08x\n",
4831 I915_READ(PRB0_CTL),
4832 I915_READ(PRB0_HEAD),
4833 I915_READ(PRB0_TAIL),
4834 I915_READ(PRB0_START));
4835 I915_WRITE(PRB0_HEAD, 0);
4836
4837 DRM_ERROR("Ring head forced to zero "
4838 "ctl %08x head %08x tail %08x start %08x\n",
4839 I915_READ(PRB0_CTL),
4840 I915_READ(PRB0_HEAD),
4841 I915_READ(PRB0_TAIL),
4842 I915_READ(PRB0_START));
4843 }
4844
4845 I915_WRITE(PRB0_CTL,
4846 ((obj->size - 4096) & RING_NR_PAGES) |
4847 RING_NO_REPORT |
4848 RING_VALID);
4849
4850 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4851
4852 /* If the head is still not zero, the ring is dead */
4853 if (head != 0) {
4854 DRM_ERROR("Ring initialization failed "
4855 "ctl %08x head %08x tail %08x start %08x\n",
4856 I915_READ(PRB0_CTL),
4857 I915_READ(PRB0_HEAD),
4858 I915_READ(PRB0_TAIL),
4859 I915_READ(PRB0_START));
4860 return -EIO;
4861 }
4862
4863 /* Update our cache of the ring state */
4864 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4865 i915_kernel_lost_context(dev);
4866 else {
4867 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4868 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4869 ring->space = ring->head - (ring->tail + 8);
4870 if (ring->space < 0)
4871 ring->space += ring->Size;
4872 }
4873
4874 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
4875 I915_WRITE(MI_MODE,
4876 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
4877 }
4878
4879 return 0;
4880}
4881
4882void
4883i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4884{
4885 drm_i915_private_t *dev_priv = dev->dev_private;
4886
4887 if (dev_priv->ring.ring_obj == NULL)
4888 return;
4889
4890 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4891
4892 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4893 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4894 dev_priv->ring.ring_obj = NULL;
4895 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4896
4897 i915_gem_cleanup_hws(dev);
4898}
4899
4900int 4479int
4901i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 4480i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4902 struct drm_file *file_priv) 4481 struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8c3f0802686..896184bfeb1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
74 } 74 }
75} 75}
76 76
77static inline void 77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{ 79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
115 } 115 }
116} 116}
117 117
118static inline void 118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{ 120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) { 121 if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -1006,37 +1006,6 @@ static int i915_emit_irq(struct drm_device * dev)
1006 return dev_priv->counter; 1006 return dev_priv->counter;
1007} 1007}
1008 1008
1009void i915_user_irq_get(struct drm_device *dev)
1010{
1011 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1012 unsigned long irqflags;
1013
1014 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1015 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1016 if (HAS_PCH_SPLIT(dev))
1017 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1018 else
1019 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1020 }
1021 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1022}
1023
1024void i915_user_irq_put(struct drm_device *dev)
1025{
1026 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1027 unsigned long irqflags;
1028
1029 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1030 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1031 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1032 if (HAS_PCH_SPLIT(dev))
1033 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1034 else
1035 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1036 }
1037 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1038}
1039
1040void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1009void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1041{ 1010{
1042 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1011 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 00000000000..13a796fafae
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,568 @@
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34#include "i915_trace.h"
35#include "intel_drv.h"
36
37void
38i915_gem_flush(struct drm_device *dev,
39 uint32_t invalidate_domains,
40 uint32_t flush_domains)
41{
42 drm_i915_private_t *dev_priv = dev->dev_private;
43 uint32_t cmd;
44 RING_LOCALS;
45
46#if WATCH_EXEC
47 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
48 invalidate_domains, flush_domains);
49#endif
50 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
51 invalidate_domains, flush_domains);
52
53 if (flush_domains & I915_GEM_DOMAIN_CPU)
54 drm_agp_chipset_flush(dev);
55
56 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
57 /*
58 * read/write caches:
59 *
60 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
61 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
62 * also flushed at 2d versus 3d pipeline switches.
63 *
64 * read-only caches:
65 *
66 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
67 * MI_READ_FLUSH is set, and is always flushed on 965.
68 *
69 * I915_GEM_DOMAIN_COMMAND may not exist?
70 *
71 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
72 * invalidated when MI_EXE_FLUSH is set.
73 *
74 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
75 * invalidated with every MI_FLUSH.
76 *
77 * TLBs:
78 *
79 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
80 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
81 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
82 * are flushed at any MI_FLUSH.
83 */
84
85 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
86 if ((invalidate_domains|flush_domains) &
87 I915_GEM_DOMAIN_RENDER)
88 cmd &= ~MI_NO_WRITE_FLUSH;
89 if (!IS_I965G(dev)) {
90 /*
91 * On the 965, the sampler cache always gets flushed
92 * and this bit is reserved.
93 */
94 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
95 cmd |= MI_READ_FLUSH;
96 }
97 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
98 cmd |= MI_EXE_FLUSH;
99
100#if WATCH_EXEC
101 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
102#endif
103 BEGIN_LP_RING(2);
104 OUT_RING(cmd);
105 OUT_RING(MI_NOOP);
106 ADVANCE_LP_RING();
107 }
108
109}
110#define PIPE_CONTROL_FLUSH(addr) \
111 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
112 PIPE_CONTROL_DEPTH_STALL); \
113 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
114 OUT_RING(0); \
115 OUT_RING(0); \
116
117/**
118 * Creates a new sequence number, emitting a write of it to the status page
119 * plus an interrupt, which will trigger i915_user_interrupt_handler.
120 *
121 * Must be called with struct_lock held.
122 *
123 * Returned sequence numbers are nonzero on success.
124 */
125uint32_t
126i915_ring_add_request(struct drm_device *dev)
127{
128 drm_i915_private_t *dev_priv = dev->dev_private;
129 uint32_t seqno;
130 RING_LOCALS;
131
132 /* Grab the seqno we're going to make this request be, and bump the
133 * next (skipping 0 so it can be the reserved no-seqno value).
134 */
135 seqno = dev_priv->mm.next_gem_seqno;
136 dev_priv->mm.next_gem_seqno++;
137 if (dev_priv->mm.next_gem_seqno == 0)
138 dev_priv->mm.next_gem_seqno++;
139
140 if (HAS_PIPE_CONTROL(dev)) {
141 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
142
143 /*
144 * Workaround qword write incoherence by flushing the
145 * PIPE_NOTIFY buffers out to memory before requesting
146 * an interrupt.
147 */
148 BEGIN_LP_RING(32);
149 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
150 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
151 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
152 OUT_RING(seqno);
153 OUT_RING(0);
154 PIPE_CONTROL_FLUSH(scratch_addr);
155 scratch_addr += 128; /* write to separate cachelines */
156 PIPE_CONTROL_FLUSH(scratch_addr);
157 scratch_addr += 128;
158 PIPE_CONTROL_FLUSH(scratch_addr);
159 scratch_addr += 128;
160 PIPE_CONTROL_FLUSH(scratch_addr);
161 scratch_addr += 128;
162 PIPE_CONTROL_FLUSH(scratch_addr);
163 scratch_addr += 128;
164 PIPE_CONTROL_FLUSH(scratch_addr);
165 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
166 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
167 PIPE_CONTROL_NOTIFY);
168 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
169 OUT_RING(seqno);
170 OUT_RING(0);
171 ADVANCE_LP_RING();
172 } else {
173 BEGIN_LP_RING(4);
174 OUT_RING(MI_STORE_DWORD_INDEX);
175 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
176 OUT_RING(seqno);
177
178 OUT_RING(MI_USER_INTERRUPT);
179 ADVANCE_LP_RING();
180 }
181 return seqno;
182}
183
184void i915_user_irq_get(struct drm_device *dev)
185{
186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187 unsigned long irqflags;
188
189 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
190 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
191 if (HAS_PCH_SPLIT(dev))
192 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
193 else
194 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
195 }
196 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
197}
198
199void i915_user_irq_put(struct drm_device *dev)
200{
201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
202 unsigned long irqflags;
203
204 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
205 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
206 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
207 if (HAS_PCH_SPLIT(dev))
208 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
209 else
210 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
211 }
212 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
213}
214
215/** Dispatch a batchbuffer to the ring
216 */
217int
218i915_dispatch_gem_execbuffer(struct drm_device *dev,
219 struct drm_i915_gem_execbuffer2 *exec,
220 struct drm_clip_rect *cliprects,
221 uint64_t exec_offset)
222{
223 drm_i915_private_t *dev_priv = dev->dev_private;
224 int nbox = exec->num_cliprects;
225 int i = 0, count;
226 uint32_t exec_start, exec_len;
227 RING_LOCALS;
228
229 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
230 exec_len = (uint32_t) exec->batch_len;
231
232 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
233
234 count = nbox ? nbox : 1;
235
236 for (i = 0; i < count; i++) {
237 if (i < nbox) {
238 int ret = i915_emit_box(dev, cliprects, i,
239 exec->DR1, exec->DR4);
240 if (ret)
241 return ret;
242 }
243
244 if (IS_I830(dev) || IS_845G(dev)) {
245 BEGIN_LP_RING(4);
246 OUT_RING(MI_BATCH_BUFFER);
247 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
248 OUT_RING(exec_start + exec_len - 4);
249 OUT_RING(0);
250 ADVANCE_LP_RING();
251 } else {
252 BEGIN_LP_RING(2);
253 if (IS_I965G(dev)) {
254 OUT_RING(MI_BATCH_BUFFER_START |
255 (2 << 6) |
256 MI_BATCH_NON_SECURE_I965);
257 OUT_RING(exec_start);
258 } else {
259 OUT_RING(MI_BATCH_BUFFER_START |
260 (2 << 6));
261 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
262 }
263 ADVANCE_LP_RING();
264 }
265 }
266
267 /* XXX breadcrumb */
268 return 0;
269}
270
271static void
272i915_gem_cleanup_hws(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 struct drm_gem_object *obj;
276 struct drm_i915_gem_object *obj_priv;
277
278 if (dev_priv->hws_obj == NULL)
279 return;
280
281 obj = dev_priv->hws_obj;
282 obj_priv = to_intel_bo(obj);
283
284 kunmap(obj_priv->pages[0]);
285 i915_gem_object_unpin(obj);
286 drm_gem_object_unreference(obj);
287 dev_priv->hws_obj = NULL;
288
289 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
290 dev_priv->hw_status_page = NULL;
291
292 if (HAS_PIPE_CONTROL(dev))
293 i915_gem_cleanup_pipe_control(dev);
294
295 /* Write high address into HWS_PGA when disabling. */
296 I915_WRITE(HWS_PGA, 0x1ffff000);
297}
298
299static int
300i915_gem_init_hws(struct drm_device *dev)
301{
302 drm_i915_private_t *dev_priv = dev->dev_private;
303 struct drm_gem_object *obj;
304 struct drm_i915_gem_object *obj_priv;
305 int ret;
306
307 /* If we need a physical address for the status page, it's already
308 * initialized at driver load time.
309 */
310 if (!I915_NEED_GFX_HWS(dev))
311 return 0;
312
313 obj = i915_gem_alloc_object(dev, 4096);
314 if (obj == NULL) {
315 DRM_ERROR("Failed to allocate status page\n");
316 ret = -ENOMEM;
317 goto err;
318 }
319 obj_priv = to_intel_bo(obj);
320 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
321
322 ret = i915_gem_object_pin(obj, 4096);
323 if (ret != 0) {
324 drm_gem_object_unreference(obj);
325 goto err_unref;
326 }
327
328 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
329
330 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
331 if (dev_priv->hw_status_page == NULL) {
332 DRM_ERROR("Failed to map status page.\n");
333 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
334 ret = -EINVAL;
335 goto err_unpin;
336 }
337
338 if (HAS_PIPE_CONTROL(dev)) {
339 ret = i915_gem_init_pipe_control(dev);
340 if (ret)
341 goto err_unpin;
342 }
343
344 dev_priv->hws_obj = obj;
345 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
346 if (IS_GEN6(dev)) {
347 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
348 I915_READ(HWS_PGA_GEN6); /* posting read */
349 } else {
350 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
351 I915_READ(HWS_PGA); /* posting read */
352 }
353 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
354
355 return 0;
356
357err_unpin:
358 i915_gem_object_unpin(obj);
359err_unref:
360 drm_gem_object_unreference(obj);
361err:
362 return 0;
363}
364
365int
366i915_gem_init_ringbuffer(struct drm_device *dev)
367{
368 drm_i915_private_t *dev_priv = dev->dev_private;
369 struct drm_gem_object *obj;
370 struct drm_i915_gem_object *obj_priv;
371 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
372 int ret;
373 u32 head;
374
375 ret = i915_gem_init_hws(dev);
376 if (ret != 0)
377 return ret;
378
379 obj = i915_gem_alloc_object(dev, 128 * 1024);
380 if (obj == NULL) {
381 DRM_ERROR("Failed to allocate ringbuffer\n");
382 i915_gem_cleanup_hws(dev);
383 return -ENOMEM;
384 }
385 obj_priv = to_intel_bo(obj);
386
387 ret = i915_gem_object_pin(obj, 4096);
388 if (ret != 0) {
389 drm_gem_object_unreference(obj);
390 i915_gem_cleanup_hws(dev);
391 return ret;
392 }
393
394 /* Set up the kernel mapping for the ring. */
395 ring->Size = obj->size;
396
397 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
398 ring->map.size = obj->size;
399 ring->map.type = 0;
400 ring->map.flags = 0;
401 ring->map.mtrr = 0;
402
403 drm_core_ioremap_wc(&ring->map, dev);
404 if (ring->map.handle == NULL) {
405 DRM_ERROR("Failed to map ringbuffer.\n");
406 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
407 i915_gem_object_unpin(obj);
408 drm_gem_object_unreference(obj);
409 i915_gem_cleanup_hws(dev);
410 return -EINVAL;
411 }
412 ring->ring_obj = obj;
413 ring->virtual_start = ring->map.handle;
414
415 /* Stop the ring if it's running. */
416 I915_WRITE(PRB0_CTL, 0);
417 I915_WRITE(PRB0_TAIL, 0);
418 I915_WRITE(PRB0_HEAD, 0);
419
420 /* Initialize the ring. */
421 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
422 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
423
424 /* G45 ring initialization fails to reset head to zero */
425 if (head != 0) {
426 DRM_ERROR("Ring head not reset to zero "
427 "ctl %08x head %08x tail %08x start %08x\n",
428 I915_READ(PRB0_CTL),
429 I915_READ(PRB0_HEAD),
430 I915_READ(PRB0_TAIL),
431 I915_READ(PRB0_START));
432 I915_WRITE(PRB0_HEAD, 0);
433
434 DRM_ERROR("Ring head forced to zero "
435 "ctl %08x head %08x tail %08x start %08x\n",
436 I915_READ(PRB0_CTL),
437 I915_READ(PRB0_HEAD),
438 I915_READ(PRB0_TAIL),
439 I915_READ(PRB0_START));
440 }
441
442 I915_WRITE(PRB0_CTL,
443 ((obj->size - 4096) & RING_NR_PAGES) |
444 RING_NO_REPORT |
445 RING_VALID);
446
447 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
448
449 /* If the head is still not zero, the ring is dead */
450 if (head != 0) {
451 DRM_ERROR("Ring initialization failed "
452 "ctl %08x head %08x tail %08x start %08x\n",
453 I915_READ(PRB0_CTL),
454 I915_READ(PRB0_HEAD),
455 I915_READ(PRB0_TAIL),
456 I915_READ(PRB0_START));
457 return -EIO;
458 }
459
460 /* Update our cache of the ring state */
461 if (!drm_core_check_feature(dev, DRIVER_MODESET))
462 i915_kernel_lost_context(dev);
463 else {
464 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
465 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
466 ring->space = ring->head - (ring->tail + 8);
467 if (ring->space < 0)
468 ring->space += ring->Size;
469 }
470
471 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
472 I915_WRITE(MI_MODE,
473 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
474 }
475
476 return 0;
477}
478
479void
480i915_gem_cleanup_ringbuffer(struct drm_device *dev)
481{
482 drm_i915_private_t *dev_priv = dev->dev_private;
483
484 if (dev_priv->ring.ring_obj == NULL)
485 return;
486
487 drm_core_ioremapfree(&dev_priv->ring.map, dev);
488
489 i915_gem_object_unpin(dev_priv->ring.ring_obj);
490 drm_gem_object_unreference(dev_priv->ring.ring_obj);
491 dev_priv->ring.ring_obj = NULL;
492 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
493
494 i915_gem_cleanup_hws(dev);
495}
496
497/* As a ringbuffer is only allowed to wrap between instructions, fill
498 * the tail with NOOPs.
499 */
500int i915_wrap_ring(struct drm_device *dev)
501{
502 drm_i915_private_t *dev_priv = dev->dev_private;
503 volatile unsigned int *virt;
504 int rem;
505
506 rem = dev_priv->ring.Size - dev_priv->ring.tail;
507 if (dev_priv->ring.space < rem) {
508 int ret = i915_wait_ring(dev, rem, __func__);
509 if (ret)
510 return ret;
511 }
512 dev_priv->ring.space -= rem;
513
514 virt = (unsigned int *)
515 (dev_priv->ring.virtual_start + dev_priv->ring.tail);
516 rem /= 4;
517 while (rem--)
518 *virt++ = MI_NOOP;
519
520 dev_priv->ring.tail = 0;
521
522 return 0;
523}
524
525int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
526{
527 drm_i915_private_t *dev_priv = dev->dev_private;
528 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
529 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
530 u32 last_acthd = I915_READ(acthd_reg);
531 u32 acthd;
532 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
533 int i;
534
535 trace_i915_ring_wait_begin (dev);
536
537 for (i = 0; i < 100000; i++) {
538 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
539 acthd = I915_READ(acthd_reg);
540 ring->space = ring->head - (ring->tail + 8);
541 if (ring->space < 0)
542 ring->space += ring->Size;
543 if (ring->space >= n) {
544 trace_i915_ring_wait_end (dev);
545 return 0;
546 }
547
548 if (dev->primary->master) {
549 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
550 if (master_priv->sarea_priv)
551 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
552 }
553
554
555 if (ring->head != last_head)
556 i = 0;
557 if (acthd != last_acthd)
558 i = 0;
559
560 last_head = ring->head;
561 last_acthd = acthd;
562 msleep_interruptible(10);
563
564 }
565
566 trace_i915_ring_wait_end (dev);
567 return -EBUSY;
568}