diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_dma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 734 |
1 files changed, 599 insertions, 135 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2a6b5de5ae5..59a2bf8592e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -40,84 +40,6 @@ | |||
40 | #include <linux/vga_switcheroo.h> | 40 | #include <linux/vga_switcheroo.h> |
41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | 42 | ||
43 | /* Really want an OS-independent resettable timer. Would like to have | ||
44 | * this loop run for (eg) 3 sec, but have the timer reset every time | ||
45 | * the head pointer changes, so that EBUSY only happens if the ring | ||
46 | * actually stalls for (eg) 3 seconds. | ||
47 | */ | ||
48 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | ||
49 | { | ||
50 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
51 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | ||
52 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | ||
53 | u32 last_acthd = I915_READ(acthd_reg); | ||
54 | u32 acthd; | ||
55 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
56 | int i; | ||
57 | |||
58 | trace_i915_ring_wait_begin (dev); | ||
59 | |||
60 | for (i = 0; i < 100000; i++) { | ||
61 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
62 | acthd = I915_READ(acthd_reg); | ||
63 | ring->space = ring->head - (ring->tail + 8); | ||
64 | if (ring->space < 0) | ||
65 | ring->space += ring->Size; | ||
66 | if (ring->space >= n) { | ||
67 | trace_i915_ring_wait_end (dev); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | if (dev->primary->master) { | ||
72 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
73 | if (master_priv->sarea_priv) | ||
74 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
75 | } | ||
76 | |||
77 | |||
78 | if (ring->head != last_head) | ||
79 | i = 0; | ||
80 | if (acthd != last_acthd) | ||
81 | i = 0; | ||
82 | |||
83 | last_head = ring->head; | ||
84 | last_acthd = acthd; | ||
85 | msleep_interruptible(10); | ||
86 | |||
87 | } | ||
88 | |||
89 | trace_i915_ring_wait_end (dev); | ||
90 | return -EBUSY; | ||
91 | } | ||
92 | |||
93 | /* As a ringbuffer is only allowed to wrap between instructions, fill | ||
94 | * the tail with NOOPs. | ||
95 | */ | ||
96 | int i915_wrap_ring(struct drm_device *dev) | ||
97 | { | ||
98 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
99 | volatile unsigned int *virt; | ||
100 | int rem; | ||
101 | |||
102 | rem = dev_priv->ring.Size - dev_priv->ring.tail; | ||
103 | if (dev_priv->ring.space < rem) { | ||
104 | int ret = i915_wait_ring(dev, rem, __func__); | ||
105 | if (ret) | ||
106 | return ret; | ||
107 | } | ||
108 | dev_priv->ring.space -= rem; | ||
109 | |||
110 | virt = (unsigned int *) | ||
111 | (dev_priv->ring.virtual_start + dev_priv->ring.tail); | ||
112 | rem /= 4; | ||
113 | while (rem--) | ||
114 | *virt++ = MI_NOOP; | ||
115 | |||
116 | dev_priv->ring.tail = 0; | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | /** | 43 | /** |
122 | * Sets up the hardware status page for devices that need a physical address | 44 | * Sets up the hardware status page for devices that need a physical address |
123 | * in the register. | 45 | * in the register. |
@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
133 | DRM_ERROR("Can not allocate hardware status page\n"); | 55 | DRM_ERROR("Can not allocate hardware status page\n"); |
134 | return -ENOMEM; | 56 | return -ENOMEM; |
135 | } | 57 | } |
136 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; | 58 | dev_priv->render_ring.status_page.page_addr |
59 | = dev_priv->status_page_dmah->vaddr; | ||
137 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | 60 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; |
138 | 61 | ||
139 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 62 | memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); |
140 | 63 | ||
141 | if (IS_I965G(dev)) | 64 | if (IS_I965G(dev)) |
142 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | 65 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & |
@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev) | |||
159 | dev_priv->status_page_dmah = NULL; | 82 | dev_priv->status_page_dmah = NULL; |
160 | } | 83 | } |
161 | 84 | ||
162 | if (dev_priv->status_gfx_addr) { | 85 | if (dev_priv->render_ring.status_page.gfx_addr) { |
163 | dev_priv->status_gfx_addr = 0; | 86 | dev_priv->render_ring.status_page.gfx_addr = 0; |
164 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | 87 | drm_core_ioremapfree(&dev_priv->hws_map, dev); |
165 | } | 88 | } |
166 | 89 | ||
@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
172 | { | 95 | { |
173 | drm_i915_private_t *dev_priv = dev->dev_private; | 96 | drm_i915_private_t *dev_priv = dev->dev_private; |
174 | struct drm_i915_master_private *master_priv; | 97 | struct drm_i915_master_private *master_priv; |
175 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 98 | struct intel_ring_buffer *ring = &dev_priv->render_ring; |
176 | 99 | ||
177 | /* | 100 | /* |
178 | * We should never lose context on the ring with modesetting | 101 | * We should never lose context on the ring with modesetting |
@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
185 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 108 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; |
186 | ring->space = ring->head - (ring->tail + 8); | 109 | ring->space = ring->head - (ring->tail + 8); |
187 | if (ring->space < 0) | 110 | if (ring->space < 0) |
188 | ring->space += ring->Size; | 111 | ring->space += ring->size; |
189 | 112 | ||
190 | if (!dev->primary->master) | 113 | if (!dev->primary->master) |
191 | return; | 114 | return; |
@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
205 | if (dev->irq_enabled) | 128 | if (dev->irq_enabled) |
206 | drm_irq_uninstall(dev); | 129 | drm_irq_uninstall(dev); |
207 | 130 | ||
208 | if (dev_priv->ring.virtual_start) { | 131 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
209 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | 132 | if (HAS_BSD(dev)) |
210 | dev_priv->ring.virtual_start = NULL; | 133 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); |
211 | dev_priv->ring.map.handle = NULL; | ||
212 | dev_priv->ring.map.size = 0; | ||
213 | } | ||
214 | 134 | ||
215 | /* Clear the HWS virtual address at teardown */ | 135 | /* Clear the HWS virtual address at teardown */ |
216 | if (I915_NEED_GFX_HWS(dev)) | 136 | if (I915_NEED_GFX_HWS(dev)) |
@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
233 | } | 153 | } |
234 | 154 | ||
235 | if (init->ring_size != 0) { | 155 | if (init->ring_size != 0) { |
236 | if (dev_priv->ring.ring_obj != NULL) { | 156 | if (dev_priv->render_ring.gem_object != NULL) { |
237 | i915_dma_cleanup(dev); | 157 | i915_dma_cleanup(dev); |
238 | DRM_ERROR("Client tried to initialize ringbuffer in " | 158 | DRM_ERROR("Client tried to initialize ringbuffer in " |
239 | "GEM mode\n"); | 159 | "GEM mode\n"); |
240 | return -EINVAL; | 160 | return -EINVAL; |
241 | } | 161 | } |
242 | 162 | ||
243 | dev_priv->ring.Size = init->ring_size; | 163 | dev_priv->render_ring.size = init->ring_size; |
244 | 164 | ||
245 | dev_priv->ring.map.offset = init->ring_start; | 165 | dev_priv->render_ring.map.offset = init->ring_start; |
246 | dev_priv->ring.map.size = init->ring_size; | 166 | dev_priv->render_ring.map.size = init->ring_size; |
247 | dev_priv->ring.map.type = 0; | 167 | dev_priv->render_ring.map.type = 0; |
248 | dev_priv->ring.map.flags = 0; | 168 | dev_priv->render_ring.map.flags = 0; |
249 | dev_priv->ring.map.mtrr = 0; | 169 | dev_priv->render_ring.map.mtrr = 0; |
250 | 170 | ||
251 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); | 171 | drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); |
252 | 172 | ||
253 | if (dev_priv->ring.map.handle == NULL) { | 173 | if (dev_priv->render_ring.map.handle == NULL) { |
254 | i915_dma_cleanup(dev); | 174 | i915_dma_cleanup(dev); |
255 | DRM_ERROR("can not ioremap virtual address for" | 175 | DRM_ERROR("can not ioremap virtual address for" |
256 | " ring buffer\n"); | 176 | " ring buffer\n"); |
@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
258 | } | 178 | } |
259 | } | 179 | } |
260 | 180 | ||
261 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 181 | dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; |
262 | 182 | ||
263 | dev_priv->cpp = init->cpp; | 183 | dev_priv->cpp = init->cpp; |
264 | dev_priv->back_offset = init->back_offset; | 184 | dev_priv->back_offset = init->back_offset; |
@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev) | |||
278 | { | 198 | { |
279 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 199 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
280 | 200 | ||
201 | struct intel_ring_buffer *ring; | ||
281 | DRM_DEBUG_DRIVER("%s\n", __func__); | 202 | DRM_DEBUG_DRIVER("%s\n", __func__); |
282 | 203 | ||
283 | if (dev_priv->ring.map.handle == NULL) { | 204 | ring = &dev_priv->render_ring; |
205 | |||
206 | if (ring->map.handle == NULL) { | ||
284 | DRM_ERROR("can not ioremap virtual address for" | 207 | DRM_ERROR("can not ioremap virtual address for" |
285 | " ring buffer\n"); | 208 | " ring buffer\n"); |
286 | return -ENOMEM; | 209 | return -ENOMEM; |
287 | } | 210 | } |
288 | 211 | ||
289 | /* Program Hardware Status Page */ | 212 | /* Program Hardware Status Page */ |
290 | if (!dev_priv->hw_status_page) { | 213 | if (!ring->status_page.page_addr) { |
291 | DRM_ERROR("Can not find hardware status page\n"); | 214 | DRM_ERROR("Can not find hardware status page\n"); |
292 | return -EINVAL; | 215 | return -EINVAL; |
293 | } | 216 | } |
294 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 217 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
295 | dev_priv->hw_status_page); | 218 | ring->status_page.page_addr); |
296 | 219 | if (ring->status_page.gfx_addr != 0) | |
297 | if (dev_priv->status_gfx_addr != 0) | 220 | ring->setup_status_page(dev, ring); |
298 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
299 | else | 221 | else |
300 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 222 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
223 | |||
301 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 224 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
302 | 225 | ||
303 | return 0; | 226 | return 0; |
@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
407 | { | 330 | { |
408 | drm_i915_private_t *dev_priv = dev->dev_private; | 331 | drm_i915_private_t *dev_priv = dev->dev_private; |
409 | int i; | 332 | int i; |
410 | RING_LOCALS; | ||
411 | 333 | ||
412 | if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) | 334 | if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) |
413 | return -EINVAL; | 335 | return -EINVAL; |
414 | 336 | ||
415 | BEGIN_LP_RING((dwords+1)&~1); | 337 | BEGIN_LP_RING((dwords+1)&~1); |
@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev, | |||
442 | struct drm_clip_rect *boxes, | 364 | struct drm_clip_rect *boxes, |
443 | int i, int DR1, int DR4) | 365 | int i, int DR1, int DR4) |
444 | { | 366 | { |
445 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
446 | struct drm_clip_rect box = boxes[i]; | 367 | struct drm_clip_rect box = boxes[i]; |
447 | RING_LOCALS; | ||
448 | 368 | ||
449 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 369 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
450 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 370 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
481 | { | 401 | { |
482 | drm_i915_private_t *dev_priv = dev->dev_private; | 402 | drm_i915_private_t *dev_priv = dev->dev_private; |
483 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 403 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
484 | RING_LOCALS; | ||
485 | 404 | ||
486 | dev_priv->counter++; | 405 | dev_priv->counter++; |
487 | if (dev_priv->counter > 0x7FFFFFFFUL) | 406 | if (dev_priv->counter > 0x7FFFFFFFUL) |
@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
535 | drm_i915_batchbuffer_t * batch, | 454 | drm_i915_batchbuffer_t * batch, |
536 | struct drm_clip_rect *cliprects) | 455 | struct drm_clip_rect *cliprects) |
537 | { | 456 | { |
538 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
539 | int nbox = batch->num_cliprects; | 457 | int nbox = batch->num_cliprects; |
540 | int i = 0, count; | 458 | int i = 0, count; |
541 | RING_LOCALS; | ||
542 | 459 | ||
543 | if ((batch->start | batch->used) & 0x7) { | 460 | if ((batch->start | batch->used) & 0x7) { |
544 | DRM_ERROR("alignment"); | 461 | DRM_ERROR("alignment"); |
@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
587 | drm_i915_private_t *dev_priv = dev->dev_private; | 504 | drm_i915_private_t *dev_priv = dev->dev_private; |
588 | struct drm_i915_master_private *master_priv = | 505 | struct drm_i915_master_private *master_priv = |
589 | dev->primary->master->driver_priv; | 506 | dev->primary->master->driver_priv; |
590 | RING_LOCALS; | ||
591 | 507 | ||
592 | if (!master_priv->sarea_priv) | 508 | if (!master_priv->sarea_priv) |
593 | return -EINVAL; | 509 | return -EINVAL; |
@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev) | |||
640 | drm_i915_private_t *dev_priv = dev->dev_private; | 556 | drm_i915_private_t *dev_priv = dev->dev_private; |
641 | 557 | ||
642 | i915_kernel_lost_context(dev); | 558 | i915_kernel_lost_context(dev); |
643 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); | 559 | return intel_wait_ring_buffer(dev, &dev_priv->render_ring, |
560 | dev_priv->render_ring.size - 8); | ||
644 | } | 561 | } |
645 | 562 | ||
646 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 563 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
827 | /* depends on GEM */ | 744 | /* depends on GEM */ |
828 | value = dev_priv->has_gem; | 745 | value = dev_priv->has_gem; |
829 | break; | 746 | break; |
747 | case I915_PARAM_HAS_BSD: | ||
748 | value = HAS_BSD(dev); | ||
749 | break; | ||
830 | default: | 750 | default: |
831 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 751 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
832 | param->param); | 752 | param->param); |
@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
882 | { | 802 | { |
883 | drm_i915_private_t *dev_priv = dev->dev_private; | 803 | drm_i915_private_t *dev_priv = dev->dev_private; |
884 | drm_i915_hws_addr_t *hws = data; | 804 | drm_i915_hws_addr_t *hws = data; |
805 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | ||
885 | 806 | ||
886 | if (!I915_NEED_GFX_HWS(dev)) | 807 | if (!I915_NEED_GFX_HWS(dev)) |
887 | return -EINVAL; | 808 | return -EINVAL; |
@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
898 | 819 | ||
899 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); | 820 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
900 | 821 | ||
901 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); | 822 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
902 | 823 | ||
903 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; | 824 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; |
904 | dev_priv->hws_map.size = 4*1024; | 825 | dev_priv->hws_map.size = 4*1024; |
@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
909 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); | 830 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); |
910 | if (dev_priv->hws_map.handle == NULL) { | 831 | if (dev_priv->hws_map.handle == NULL) { |
911 | i915_dma_cleanup(dev); | 832 | i915_dma_cleanup(dev); |
912 | dev_priv->status_gfx_addr = 0; | 833 | ring->status_page.gfx_addr = 0; |
913 | DRM_ERROR("can not ioremap virtual address for" | 834 | DRM_ERROR("can not ioremap virtual address for" |
914 | " G33 hw status page\n"); | 835 | " G33 hw status page\n"); |
915 | return -ENOMEM; | 836 | return -ENOMEM; |
916 | } | 837 | } |
917 | dev_priv->hw_status_page = dev_priv->hws_map.handle; | 838 | ring->status_page.page_addr = dev_priv->hws_map.handle; |
839 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
840 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | ||
918 | 841 | ||
919 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | ||
920 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
921 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | 842 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
922 | dev_priv->status_gfx_addr); | 843 | ring->status_page.gfx_addr); |
923 | DRM_DEBUG_DRIVER("load hws at %p\n", | 844 | DRM_DEBUG_DRIVER("load hws at %p\n", |
924 | dev_priv->hw_status_page); | 845 | ring->status_page.page_addr); |
925 | return 0; | 846 | return 0; |
926 | } | 847 | } |
927 | 848 | ||
@@ -1399,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
1399 | struct drm_device *dev = pci_get_drvdata(pdev); | 1320 | struct drm_device *dev = pci_get_drvdata(pdev); |
1400 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1321 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1401 | if (state == VGA_SWITCHEROO_ON) { | 1322 | if (state == VGA_SWITCHEROO_ON) { |
1402 | printk(KERN_INFO "i915: switched off\n"); | 1323 | printk(KERN_INFO "i915: switched on\n"); |
1403 | /* i915 resume handler doesn't set to D0 */ | 1324 | /* i915 resume handler doesn't set to D0 */ |
1404 | pci_set_power_state(dev->pdev, PCI_D0); | 1325 | pci_set_power_state(dev->pdev, PCI_D0); |
1405 | i915_resume(dev); | 1326 | i915_resume(dev); |
1327 | drm_kms_helper_poll_enable(dev); | ||
1406 | } else { | 1328 | } else { |
1407 | printk(KERN_ERR "i915: switched off\n"); | 1329 | printk(KERN_ERR "i915: switched off\n"); |
1330 | drm_kms_helper_poll_disable(dev); | ||
1408 | i915_suspend(dev, pmm); | 1331 | i915_suspend(dev, pmm); |
1409 | } | 1332 | } |
1410 | } | 1333 | } |
@@ -1479,19 +1402,19 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1479 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 1402 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
1480 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1403 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1481 | if (ret) | 1404 | if (ret) |
1482 | goto destroy_ringbuffer; | 1405 | goto cleanup_ringbuffer; |
1483 | 1406 | ||
1484 | ret = vga_switcheroo_register_client(dev->pdev, | 1407 | ret = vga_switcheroo_register_client(dev->pdev, |
1485 | i915_switcheroo_set_state, | 1408 | i915_switcheroo_set_state, |
1486 | i915_switcheroo_can_switch); | 1409 | i915_switcheroo_can_switch); |
1487 | if (ret) | 1410 | if (ret) |
1488 | goto destroy_ringbuffer; | 1411 | goto cleanup_vga_client; |
1489 | 1412 | ||
1490 | intel_modeset_init(dev); | 1413 | intel_modeset_init(dev); |
1491 | 1414 | ||
1492 | ret = drm_irq_install(dev); | 1415 | ret = drm_irq_install(dev); |
1493 | if (ret) | 1416 | if (ret) |
1494 | goto destroy_ringbuffer; | 1417 | goto cleanup_vga_switcheroo; |
1495 | 1418 | ||
1496 | /* Always safe in the mode setting case. */ | 1419 | /* Always safe in the mode setting case. */ |
1497 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1420 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
@@ -1503,11 +1426,20 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1503 | 1426 | ||
1504 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1427 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
1505 | 1428 | ||
1506 | intel_fbdev_init(dev); | 1429 | ret = intel_fbdev_init(dev); |
1430 | if (ret) | ||
1431 | goto cleanup_irq; | ||
1432 | |||
1507 | drm_kms_helper_poll_init(dev); | 1433 | drm_kms_helper_poll_init(dev); |
1508 | return 0; | 1434 | return 0; |
1509 | 1435 | ||
1510 | destroy_ringbuffer: | 1436 | cleanup_irq: |
1437 | drm_irq_uninstall(dev); | ||
1438 | cleanup_vga_switcheroo: | ||
1439 | vga_switcheroo_unregister_client(dev->pdev); | ||
1440 | cleanup_vga_client: | ||
1441 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
1442 | cleanup_ringbuffer: | ||
1511 | mutex_lock(&dev->struct_mutex); | 1443 | mutex_lock(&dev->struct_mutex); |
1512 | i915_gem_cleanup_ringbuffer(dev); | 1444 | i915_gem_cleanup_ringbuffer(dev); |
1513 | mutex_unlock(&dev->struct_mutex); | 1445 | mutex_unlock(&dev->struct_mutex); |
@@ -1539,14 +1471,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |||
1539 | master->driver_priv = NULL; | 1471 | master->driver_priv = NULL; |
1540 | } | 1472 | } |
1541 | 1473 | ||
1542 | static void i915_get_mem_freq(struct drm_device *dev) | 1474 | static void i915_pineview_get_mem_freq(struct drm_device *dev) |
1543 | { | 1475 | { |
1544 | drm_i915_private_t *dev_priv = dev->dev_private; | 1476 | drm_i915_private_t *dev_priv = dev->dev_private; |
1545 | u32 tmp; | 1477 | u32 tmp; |
1546 | 1478 | ||
1547 | if (!IS_PINEVIEW(dev)) | ||
1548 | return; | ||
1549 | |||
1550 | tmp = I915_READ(CLKCFG); | 1479 | tmp = I915_READ(CLKCFG); |
1551 | 1480 | ||
1552 | switch (tmp & CLKCFG_FSB_MASK) { | 1481 | switch (tmp & CLKCFG_FSB_MASK) { |
@@ -1575,7 +1504,524 @@ static void i915_get_mem_freq(struct drm_device *dev) | |||
1575 | dev_priv->mem_freq = 800; | 1504 | dev_priv->mem_freq = 800; |
1576 | break; | 1505 | break; |
1577 | } | 1506 | } |
1507 | |||
1508 | /* detect pineview DDR3 setting */ | ||
1509 | tmp = I915_READ(CSHRDDR3CTL); | ||
1510 | dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; | ||
1511 | } | ||
1512 | |||
1513 | static void i915_ironlake_get_mem_freq(struct drm_device *dev) | ||
1514 | { | ||
1515 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1516 | u16 ddrpll, csipll; | ||
1517 | |||
1518 | ddrpll = I915_READ16(DDRMPLL1); | ||
1519 | csipll = I915_READ16(CSIPLL0); | ||
1520 | |||
1521 | switch (ddrpll & 0xff) { | ||
1522 | case 0xc: | ||
1523 | dev_priv->mem_freq = 800; | ||
1524 | break; | ||
1525 | case 0x10: | ||
1526 | dev_priv->mem_freq = 1066; | ||
1527 | break; | ||
1528 | case 0x14: | ||
1529 | dev_priv->mem_freq = 1333; | ||
1530 | break; | ||
1531 | case 0x18: | ||
1532 | dev_priv->mem_freq = 1600; | ||
1533 | break; | ||
1534 | default: | ||
1535 | DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", | ||
1536 | ddrpll & 0xff); | ||
1537 | dev_priv->mem_freq = 0; | ||
1538 | break; | ||
1539 | } | ||
1540 | |||
1541 | dev_priv->r_t = dev_priv->mem_freq; | ||
1542 | |||
1543 | switch (csipll & 0x3ff) { | ||
1544 | case 0x00c: | ||
1545 | dev_priv->fsb_freq = 3200; | ||
1546 | break; | ||
1547 | case 0x00e: | ||
1548 | dev_priv->fsb_freq = 3733; | ||
1549 | break; | ||
1550 | case 0x010: | ||
1551 | dev_priv->fsb_freq = 4266; | ||
1552 | break; | ||
1553 | case 0x012: | ||
1554 | dev_priv->fsb_freq = 4800; | ||
1555 | break; | ||
1556 | case 0x014: | ||
1557 | dev_priv->fsb_freq = 5333; | ||
1558 | break; | ||
1559 | case 0x016: | ||
1560 | dev_priv->fsb_freq = 5866; | ||
1561 | break; | ||
1562 | case 0x018: | ||
1563 | dev_priv->fsb_freq = 6400; | ||
1564 | break; | ||
1565 | default: | ||
1566 | DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", | ||
1567 | csipll & 0x3ff); | ||
1568 | dev_priv->fsb_freq = 0; | ||
1569 | break; | ||
1570 | } | ||
1571 | |||
1572 | if (dev_priv->fsb_freq == 3200) { | ||
1573 | dev_priv->c_m = 0; | ||
1574 | } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { | ||
1575 | dev_priv->c_m = 1; | ||
1576 | } else { | ||
1577 | dev_priv->c_m = 2; | ||
1578 | } | ||
1579 | } | ||
1580 | |||
1581 | struct v_table { | ||
1582 | u8 vid; | ||
1583 | unsigned long vd; /* in .1 mil */ | ||
1584 | unsigned long vm; /* in .1 mil */ | ||
1585 | u8 pvid; | ||
1586 | }; | ||
1587 | |||
1588 | static struct v_table v_table[] = { | ||
1589 | { 0, 16125, 15000, 0x7f, }, | ||
1590 | { 1, 16000, 14875, 0x7e, }, | ||
1591 | { 2, 15875, 14750, 0x7d, }, | ||
1592 | { 3, 15750, 14625, 0x7c, }, | ||
1593 | { 4, 15625, 14500, 0x7b, }, | ||
1594 | { 5, 15500, 14375, 0x7a, }, | ||
1595 | { 6, 15375, 14250, 0x79, }, | ||
1596 | { 7, 15250, 14125, 0x78, }, | ||
1597 | { 8, 15125, 14000, 0x77, }, | ||
1598 | { 9, 15000, 13875, 0x76, }, | ||
1599 | { 10, 14875, 13750, 0x75, }, | ||
1600 | { 11, 14750, 13625, 0x74, }, | ||
1601 | { 12, 14625, 13500, 0x73, }, | ||
1602 | { 13, 14500, 13375, 0x72, }, | ||
1603 | { 14, 14375, 13250, 0x71, }, | ||
1604 | { 15, 14250, 13125, 0x70, }, | ||
1605 | { 16, 14125, 13000, 0x6f, }, | ||
1606 | { 17, 14000, 12875, 0x6e, }, | ||
1607 | { 18, 13875, 12750, 0x6d, }, | ||
1608 | { 19, 13750, 12625, 0x6c, }, | ||
1609 | { 20, 13625, 12500, 0x6b, }, | ||
1610 | { 21, 13500, 12375, 0x6a, }, | ||
1611 | { 22, 13375, 12250, 0x69, }, | ||
1612 | { 23, 13250, 12125, 0x68, }, | ||
1613 | { 24, 13125, 12000, 0x67, }, | ||
1614 | { 25, 13000, 11875, 0x66, }, | ||
1615 | { 26, 12875, 11750, 0x65, }, | ||
1616 | { 27, 12750, 11625, 0x64, }, | ||
1617 | { 28, 12625, 11500, 0x63, }, | ||
1618 | { 29, 12500, 11375, 0x62, }, | ||
1619 | { 30, 12375, 11250, 0x61, }, | ||
1620 | { 31, 12250, 11125, 0x60, }, | ||
1621 | { 32, 12125, 11000, 0x5f, }, | ||
1622 | { 33, 12000, 10875, 0x5e, }, | ||
1623 | { 34, 11875, 10750, 0x5d, }, | ||
1624 | { 35, 11750, 10625, 0x5c, }, | ||
1625 | { 36, 11625, 10500, 0x5b, }, | ||
1626 | { 37, 11500, 10375, 0x5a, }, | ||
1627 | { 38, 11375, 10250, 0x59, }, | ||
1628 | { 39, 11250, 10125, 0x58, }, | ||
1629 | { 40, 11125, 10000, 0x57, }, | ||
1630 | { 41, 11000, 9875, 0x56, }, | ||
1631 | { 42, 10875, 9750, 0x55, }, | ||
1632 | { 43, 10750, 9625, 0x54, }, | ||
1633 | { 44, 10625, 9500, 0x53, }, | ||
1634 | { 45, 10500, 9375, 0x52, }, | ||
1635 | { 46, 10375, 9250, 0x51, }, | ||
1636 | { 47, 10250, 9125, 0x50, }, | ||
1637 | { 48, 10125, 9000, 0x4f, }, | ||
1638 | { 49, 10000, 8875, 0x4e, }, | ||
1639 | { 50, 9875, 8750, 0x4d, }, | ||
1640 | { 51, 9750, 8625, 0x4c, }, | ||
1641 | { 52, 9625, 8500, 0x4b, }, | ||
1642 | { 53, 9500, 8375, 0x4a, }, | ||
1643 | { 54, 9375, 8250, 0x49, }, | ||
1644 | { 55, 9250, 8125, 0x48, }, | ||
1645 | { 56, 9125, 8000, 0x47, }, | ||
1646 | { 57, 9000, 7875, 0x46, }, | ||
1647 | { 58, 8875, 7750, 0x45, }, | ||
1648 | { 59, 8750, 7625, 0x44, }, | ||
1649 | { 60, 8625, 7500, 0x43, }, | ||
1650 | { 61, 8500, 7375, 0x42, }, | ||
1651 | { 62, 8375, 7250, 0x41, }, | ||
1652 | { 63, 8250, 7125, 0x40, }, | ||
1653 | { 64, 8125, 7000, 0x3f, }, | ||
1654 | { 65, 8000, 6875, 0x3e, }, | ||
1655 | { 66, 7875, 6750, 0x3d, }, | ||
1656 | { 67, 7750, 6625, 0x3c, }, | ||
1657 | { 68, 7625, 6500, 0x3b, }, | ||
1658 | { 69, 7500, 6375, 0x3a, }, | ||
1659 | { 70, 7375, 6250, 0x39, }, | ||
1660 | { 71, 7250, 6125, 0x38, }, | ||
1661 | { 72, 7125, 6000, 0x37, }, | ||
1662 | { 73, 7000, 5875, 0x36, }, | ||
1663 | { 74, 6875, 5750, 0x35, }, | ||
1664 | { 75, 6750, 5625, 0x34, }, | ||
1665 | { 76, 6625, 5500, 0x33, }, | ||
1666 | { 77, 6500, 5375, 0x32, }, | ||
1667 | { 78, 6375, 5250, 0x31, }, | ||
1668 | { 79, 6250, 5125, 0x30, }, | ||
1669 | { 80, 6125, 5000, 0x2f, }, | ||
1670 | { 81, 6000, 4875, 0x2e, }, | ||
1671 | { 82, 5875, 4750, 0x2d, }, | ||
1672 | { 83, 5750, 4625, 0x2c, }, | ||
1673 | { 84, 5625, 4500, 0x2b, }, | ||
1674 | { 85, 5500, 4375, 0x2a, }, | ||
1675 | { 86, 5375, 4250, 0x29, }, | ||
1676 | { 87, 5250, 4125, 0x28, }, | ||
1677 | { 88, 5125, 4000, 0x27, }, | ||
1678 | { 89, 5000, 3875, 0x26, }, | ||
1679 | { 90, 4875, 3750, 0x25, }, | ||
1680 | { 91, 4750, 3625, 0x24, }, | ||
1681 | { 92, 4625, 3500, 0x23, }, | ||
1682 | { 93, 4500, 3375, 0x22, }, | ||
1683 | { 94, 4375, 3250, 0x21, }, | ||
1684 | { 95, 4250, 3125, 0x20, }, | ||
1685 | { 96, 4125, 3000, 0x1f, }, | ||
1686 | { 97, 4125, 3000, 0x1e, }, | ||
1687 | { 98, 4125, 3000, 0x1d, }, | ||
1688 | { 99, 4125, 3000, 0x1c, }, | ||
1689 | { 100, 4125, 3000, 0x1b, }, | ||
1690 | { 101, 4125, 3000, 0x1a, }, | ||
1691 | { 102, 4125, 3000, 0x19, }, | ||
1692 | { 103, 4125, 3000, 0x18, }, | ||
1693 | { 104, 4125, 3000, 0x17, }, | ||
1694 | { 105, 4125, 3000, 0x16, }, | ||
1695 | { 106, 4125, 3000, 0x15, }, | ||
1696 | { 107, 4125, 3000, 0x14, }, | ||
1697 | { 108, 4125, 3000, 0x13, }, | ||
1698 | { 109, 4125, 3000, 0x12, }, | ||
1699 | { 110, 4125, 3000, 0x11, }, | ||
1700 | { 111, 4125, 3000, 0x10, }, | ||
1701 | { 112, 4125, 3000, 0x0f, }, | ||
1702 | { 113, 4125, 3000, 0x0e, }, | ||
1703 | { 114, 4125, 3000, 0x0d, }, | ||
1704 | { 115, 4125, 3000, 0x0c, }, | ||
1705 | { 116, 4125, 3000, 0x0b, }, | ||
1706 | { 117, 4125, 3000, 0x0a, }, | ||
1707 | { 118, 4125, 3000, 0x09, }, | ||
1708 | { 119, 4125, 3000, 0x08, }, | ||
1709 | { 120, 1125, 0, 0x07, }, | ||
1710 | { 121, 1000, 0, 0x06, }, | ||
1711 | { 122, 875, 0, 0x05, }, | ||
1712 | { 123, 750, 0, 0x04, }, | ||
1713 | { 124, 625, 0, 0x03, }, | ||
1714 | { 125, 500, 0, 0x02, }, | ||
1715 | { 126, 375, 0, 0x01, }, | ||
1716 | { 127, 0, 0, 0x00, }, | ||
1717 | }; | ||
1718 | |||
1719 | struct cparams { | ||
1720 | int i; | ||
1721 | int t; | ||
1722 | int m; | ||
1723 | int c; | ||
1724 | }; | ||
1725 | |||
1726 | static struct cparams cparams[] = { | ||
1727 | { 1, 1333, 301, 28664 }, | ||
1728 | { 1, 1066, 294, 24460 }, | ||
1729 | { 1, 800, 294, 25192 }, | ||
1730 | { 0, 1333, 276, 27605 }, | ||
1731 | { 0, 1066, 276, 27605 }, | ||
1732 | { 0, 800, 231, 23784 }, | ||
1733 | }; | ||
1734 | |||
1735 | unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | ||
1736 | { | ||
1737 | u64 total_count, diff, ret; | ||
1738 | u32 count1, count2, count3, m = 0, c = 0; | ||
1739 | unsigned long now = jiffies_to_msecs(jiffies), diff1; | ||
1740 | int i; | ||
1741 | |||
1742 | diff1 = now - dev_priv->last_time1; | ||
1743 | |||
1744 | count1 = I915_READ(DMIEC); | ||
1745 | count2 = I915_READ(DDREC); | ||
1746 | count3 = I915_READ(CSIEC); | ||
1747 | |||
1748 | total_count = count1 + count2 + count3; | ||
1749 | |||
1750 | /* FIXME: handle per-counter overflow */ | ||
1751 | if (total_count < dev_priv->last_count1) { | ||
1752 | diff = ~0UL - dev_priv->last_count1; | ||
1753 | diff += total_count; | ||
1754 | } else { | ||
1755 | diff = total_count - dev_priv->last_count1; | ||
1756 | } | ||
1757 | |||
1758 | for (i = 0; i < ARRAY_SIZE(cparams); i++) { | ||
1759 | if (cparams[i].i == dev_priv->c_m && | ||
1760 | cparams[i].t == dev_priv->r_t) { | ||
1761 | m = cparams[i].m; | ||
1762 | c = cparams[i].c; | ||
1763 | break; | ||
1764 | } | ||
1765 | } | ||
1766 | |||
1767 | div_u64(diff, diff1); | ||
1768 | ret = ((m * diff) + c); | ||
1769 | div_u64(ret, 10); | ||
1770 | |||
1771 | dev_priv->last_count1 = total_count; | ||
1772 | dev_priv->last_time1 = now; | ||
1773 | |||
1774 | return ret; | ||
1775 | } | ||
1776 | |||
1777 | unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | ||
1778 | { | ||
1779 | unsigned long m, x, b; | ||
1780 | u32 tsfs; | ||
1781 | |||
1782 | tsfs = I915_READ(TSFS); | ||
1783 | |||
1784 | m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); | ||
1785 | x = I915_READ8(TR1); | ||
1786 | |||
1787 | b = tsfs & TSFS_INTR_MASK; | ||
1788 | |||
1789 | return ((m * x) / 127) - b; | ||
1790 | } | ||
1791 | |||
1792 | static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | ||
1793 | { | ||
1794 | unsigned long val = 0; | ||
1795 | int i; | ||
1796 | |||
1797 | for (i = 0; i < ARRAY_SIZE(v_table); i++) { | ||
1798 | if (v_table[i].pvid == pxvid) { | ||
1799 | if (IS_MOBILE(dev_priv->dev)) | ||
1800 | val = v_table[i].vm; | ||
1801 | else | ||
1802 | val = v_table[i].vd; | ||
1803 | } | ||
1804 | } | ||
1805 | |||
1806 | return val; | ||
1807 | } | ||
1808 | |||
1809 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | ||
1810 | { | ||
1811 | struct timespec now, diff1; | ||
1812 | u64 diff; | ||
1813 | unsigned long diffms; | ||
1814 | u32 count; | ||
1815 | |||
1816 | getrawmonotonic(&now); | ||
1817 | diff1 = timespec_sub(now, dev_priv->last_time2); | ||
1818 | |||
1819 | /* Don't divide by 0 */ | ||
1820 | diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; | ||
1821 | if (!diffms) | ||
1822 | return; | ||
1823 | |||
1824 | count = I915_READ(GFXEC); | ||
1825 | |||
1826 | if (count < dev_priv->last_count2) { | ||
1827 | diff = ~0UL - dev_priv->last_count2; | ||
1828 | diff += count; | ||
1829 | } else { | ||
1830 | diff = count - dev_priv->last_count2; | ||
1831 | } | ||
1832 | |||
1833 | dev_priv->last_count2 = count; | ||
1834 | dev_priv->last_time2 = now; | ||
1835 | |||
1836 | /* More magic constants... */ | ||
1837 | diff = diff * 1181; | ||
1838 | div_u64(diff, diffms * 10); | ||
1839 | dev_priv->gfx_power = diff; | ||
1840 | } | ||
1841 | |||
1842 | unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | ||
1843 | { | ||
1844 | unsigned long t, corr, state1, corr2, state2; | ||
1845 | u32 pxvid, ext_v; | ||
1846 | |||
1847 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); | ||
1848 | pxvid = (pxvid >> 24) & 0x7f; | ||
1849 | ext_v = pvid_to_extvid(dev_priv, pxvid); | ||
1850 | |||
1851 | state1 = ext_v; | ||
1852 | |||
1853 | t = i915_mch_val(dev_priv); | ||
1854 | |||
1855 | /* Revel in the empirically derived constants */ | ||
1856 | |||
1857 | /* Correction factor in 1/100000 units */ | ||
1858 | if (t > 80) | ||
1859 | corr = ((t * 2349) + 135940); | ||
1860 | else if (t >= 50) | ||
1861 | corr = ((t * 964) + 29317); | ||
1862 | else /* < 50 */ | ||
1863 | corr = ((t * 301) + 1004); | ||
1864 | |||
1865 | corr = corr * ((150142 * state1) / 10000 - 78642); | ||
1866 | corr /= 100000; | ||
1867 | corr2 = (corr * dev_priv->corr); | ||
1868 | |||
1869 | state2 = (corr2 * state1) / 10000; | ||
1870 | state2 /= 100; /* convert to mW */ | ||
1871 | |||
1872 | i915_update_gfx_val(dev_priv); | ||
1873 | |||
1874 | return dev_priv->gfx_power + state2; | ||
1875 | } | ||
1876 | |||
1877 | /* Global for IPS driver to get at the current i915 device */ | ||
1878 | static struct drm_i915_private *i915_mch_dev; | ||
1879 | /* | ||
1880 | * Lock protecting IPS related data structures | ||
1881 | * - i915_mch_dev | ||
1882 | * - dev_priv->max_delay | ||
1883 | * - dev_priv->min_delay | ||
1884 | * - dev_priv->fmax | ||
1885 | * - dev_priv->gpu_busy | ||
1886 | */ | ||
1887 | DEFINE_SPINLOCK(mchdev_lock); | ||
1888 | |||
1889 | /** | ||
1890 | * i915_read_mch_val - return value for IPS use | ||
1891 | * | ||
1892 | * Calculate and return a value for the IPS driver to use when deciding whether | ||
1893 | * we have thermal and power headroom to increase CPU or GPU power budget. | ||
1894 | */ | ||
1895 | unsigned long i915_read_mch_val(void) | ||
1896 | { | ||
1897 | struct drm_i915_private *dev_priv; | ||
1898 | unsigned long chipset_val, graphics_val, ret = 0; | ||
1899 | |||
1900 | spin_lock(&mchdev_lock); | ||
1901 | if (!i915_mch_dev) | ||
1902 | goto out_unlock; | ||
1903 | dev_priv = i915_mch_dev; | ||
1904 | |||
1905 | chipset_val = i915_chipset_val(dev_priv); | ||
1906 | graphics_val = i915_gfx_val(dev_priv); | ||
1907 | |||
1908 | ret = chipset_val + graphics_val; | ||
1909 | |||
1910 | out_unlock: | ||
1911 | spin_unlock(&mchdev_lock); | ||
1912 | |||
1913 | return ret; | ||
1914 | } | ||
1915 | EXPORT_SYMBOL_GPL(i915_read_mch_val); | ||
1916 | |||
1917 | /** | ||
1918 | * i915_gpu_raise - raise GPU frequency limit | ||
1919 | * | ||
1920 | * Raise the limit; IPS indicates we have thermal headroom. | ||
1921 | */ | ||
1922 | bool i915_gpu_raise(void) | ||
1923 | { | ||
1924 | struct drm_i915_private *dev_priv; | ||
1925 | bool ret = true; | ||
1926 | |||
1927 | spin_lock(&mchdev_lock); | ||
1928 | if (!i915_mch_dev) { | ||
1929 | ret = false; | ||
1930 | goto out_unlock; | ||
1931 | } | ||
1932 | dev_priv = i915_mch_dev; | ||
1933 | |||
1934 | if (dev_priv->max_delay > dev_priv->fmax) | ||
1935 | dev_priv->max_delay--; | ||
1936 | |||
1937 | out_unlock: | ||
1938 | spin_unlock(&mchdev_lock); | ||
1939 | |||
1940 | return ret; | ||
1941 | } | ||
1942 | EXPORT_SYMBOL_GPL(i915_gpu_raise); | ||
1943 | |||
1944 | /** | ||
1945 | * i915_gpu_lower - lower GPU frequency limit | ||
1946 | * | ||
1947 | * IPS indicates we're close to a thermal limit, so throttle back the GPU | ||
1948 | * frequency maximum. | ||
1949 | */ | ||
1950 | bool i915_gpu_lower(void) | ||
1951 | { | ||
1952 | struct drm_i915_private *dev_priv; | ||
1953 | bool ret = true; | ||
1954 | |||
1955 | spin_lock(&mchdev_lock); | ||
1956 | if (!i915_mch_dev) { | ||
1957 | ret = false; | ||
1958 | goto out_unlock; | ||
1959 | } | ||
1960 | dev_priv = i915_mch_dev; | ||
1961 | |||
1962 | if (dev_priv->max_delay < dev_priv->min_delay) | ||
1963 | dev_priv->max_delay++; | ||
1964 | |||
1965 | out_unlock: | ||
1966 | spin_unlock(&mchdev_lock); | ||
1967 | |||
1968 | return ret; | ||
1578 | } | 1969 | } |
1970 | EXPORT_SYMBOL_GPL(i915_gpu_lower); | ||
1971 | |||
1972 | /** | ||
1973 | * i915_gpu_busy - indicate GPU business to IPS | ||
1974 | * | ||
1975 | * Tell the IPS driver whether or not the GPU is busy. | ||
1976 | */ | ||
1977 | bool i915_gpu_busy(void) | ||
1978 | { | ||
1979 | struct drm_i915_private *dev_priv; | ||
1980 | bool ret = false; | ||
1981 | |||
1982 | spin_lock(&mchdev_lock); | ||
1983 | if (!i915_mch_dev) | ||
1984 | goto out_unlock; | ||
1985 | dev_priv = i915_mch_dev; | ||
1986 | |||
1987 | ret = dev_priv->busy; | ||
1988 | |||
1989 | out_unlock: | ||
1990 | spin_unlock(&mchdev_lock); | ||
1991 | |||
1992 | return ret; | ||
1993 | } | ||
1994 | EXPORT_SYMBOL_GPL(i915_gpu_busy); | ||
1995 | |||
1996 | /** | ||
1997 | * i915_gpu_turbo_disable - disable graphics turbo | ||
1998 | * | ||
1999 | * Disable graphics turbo by resetting the max frequency and setting the | ||
2000 | * current frequency to the default. | ||
2001 | */ | ||
2002 | bool i915_gpu_turbo_disable(void) | ||
2003 | { | ||
2004 | struct drm_i915_private *dev_priv; | ||
2005 | bool ret = true; | ||
2006 | |||
2007 | spin_lock(&mchdev_lock); | ||
2008 | if (!i915_mch_dev) { | ||
2009 | ret = false; | ||
2010 | goto out_unlock; | ||
2011 | } | ||
2012 | dev_priv = i915_mch_dev; | ||
2013 | |||
2014 | dev_priv->max_delay = dev_priv->fstart; | ||
2015 | |||
2016 | if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) | ||
2017 | ret = false; | ||
2018 | |||
2019 | out_unlock: | ||
2020 | spin_unlock(&mchdev_lock); | ||
2021 | |||
2022 | return ret; | ||
2023 | } | ||
2024 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | ||
1579 | 2025 | ||
1580 | /** | 2026 | /** |
1581 | * i915_driver_load - setup chip and create an initial config | 2027 | * i915_driver_load - setup chip and create an initial config |
@@ -1594,7 +2040,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1594 | resource_size_t base, size; | 2040 | resource_size_t base, size; |
1595 | int ret = 0, mmio_bar; | 2041 | int ret = 0, mmio_bar; |
1596 | uint32_t agp_size, prealloc_size, prealloc_start; | 2042 | uint32_t agp_size, prealloc_size, prealloc_start; |
1597 | |||
1598 | /* i915 has 4 more counters */ | 2043 | /* i915 has 4 more counters */ |
1599 | dev->counters += 4; | 2044 | dev->counters += 4; |
1600 | dev->types[6] = _DRM_STAT_IRQ; | 2045 | dev->types[6] = _DRM_STAT_IRQ; |
@@ -1672,6 +2117,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1672 | dev_priv->has_gem = 0; | 2117 | dev_priv->has_gem = 0; |
1673 | } | 2118 | } |
1674 | 2119 | ||
2120 | if (dev_priv->has_gem == 0 && | ||
2121 | drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
2122 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | ||
2123 | ret = -ENODEV; | ||
2124 | goto out_iomapfree; | ||
2125 | } | ||
2126 | |||
1675 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 2127 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
1676 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 2128 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
1677 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 2129 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { |
@@ -1691,7 +2143,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1691 | goto out_workqueue_free; | 2143 | goto out_workqueue_free; |
1692 | } | 2144 | } |
1693 | 2145 | ||
1694 | i915_get_mem_freq(dev); | 2146 | if (IS_PINEVIEW(dev)) |
2147 | i915_pineview_get_mem_freq(dev); | ||
2148 | else if (IS_IRONLAKE(dev)) | ||
2149 | i915_ironlake_get_mem_freq(dev); | ||
1695 | 2150 | ||
1696 | /* On the 945G/GM, the chipset reports the MSI capability on the | 2151 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1697 | * integrated graphics even though the support isn't actually there | 2152 | * integrated graphics even though the support isn't actually there |
@@ -1709,7 +2164,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1709 | 2164 | ||
1710 | spin_lock_init(&dev_priv->user_irq_lock); | 2165 | spin_lock_init(&dev_priv->user_irq_lock); |
1711 | spin_lock_init(&dev_priv->error_lock); | 2166 | spin_lock_init(&dev_priv->error_lock); |
1712 | dev_priv->user_irq_refcount = 0; | ||
1713 | dev_priv->trace_irq_seqno = 0; | 2167 | dev_priv->trace_irq_seqno = 0; |
1714 | 2168 | ||
1715 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2169 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
@@ -1738,6 +2192,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1738 | 2192 | ||
1739 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 2193 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
1740 | (unsigned long) dev); | 2194 | (unsigned long) dev); |
2195 | |||
2196 | spin_lock(&mchdev_lock); | ||
2197 | i915_mch_dev = dev_priv; | ||
2198 | dev_priv->mchdev_lock = &mchdev_lock; | ||
2199 | spin_unlock(&mchdev_lock); | ||
2200 | |||
1741 | return 0; | 2201 | return 0; |
1742 | 2202 | ||
1743 | out_workqueue_free: | 2203 | out_workqueue_free: |
@@ -1759,6 +2219,10 @@ int i915_driver_unload(struct drm_device *dev) | |||
1759 | 2219 | ||
1760 | i915_destroy_error_state(dev); | 2220 | i915_destroy_error_state(dev); |
1761 | 2221 | ||
2222 | spin_lock(&mchdev_lock); | ||
2223 | i915_mch_dev = NULL; | ||
2224 | spin_unlock(&mchdev_lock); | ||
2225 | |||
1762 | destroy_workqueue(dev_priv->wq); | 2226 | destroy_workqueue(dev_priv->wq); |
1763 | del_timer_sync(&dev_priv->hangcheck_timer); | 2227 | del_timer_sync(&dev_priv->hangcheck_timer); |
1764 | 2228 | ||