diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_dma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 1238 |
1 files changed, 555 insertions, 683 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dd2c93ebfa3..296fbd66f0e1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -34,14 +34,25 @@ | |||
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "i915_trace.h" | 36 | #include "i915_trace.h" |
37 | #include "../../../platform/x86/intel_ips.h" | ||
37 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
38 | #include <linux/vgaarb.h> | 39 | #include <linux/vgaarb.h> |
39 | #include <linux/acpi.h> | 40 | #include <linux/acpi.h> |
40 | #include <linux/pnp.h> | 41 | #include <linux/pnp.h> |
41 | #include <linux/vga_switcheroo.h> | 42 | #include <linux/vga_switcheroo.h> |
42 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <acpi/video.h> | ||
43 | 45 | ||
44 | extern int intel_max_stolen; /* from AGP driver */ | 46 | static void i915_write_hws_pga(struct drm_device *dev) |
47 | { | ||
48 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
49 | u32 addr; | ||
50 | |||
51 | addr = dev_priv->status_page_dmah->busaddr; | ||
52 | if (INTEL_INFO(dev)->gen >= 4) | ||
53 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
54 | I915_WRITE(HWS_PGA, addr); | ||
55 | } | ||
45 | 56 | ||
46 | /** | 57 | /** |
47 | * Sets up the hardware status page for devices that need a physical address | 58 | * Sets up the hardware status page for devices that need a physical address |
@@ -50,6 +61,8 @@ extern int intel_max_stolen; /* from AGP driver */ | |||
50 | static int i915_init_phys_hws(struct drm_device *dev) | 61 | static int i915_init_phys_hws(struct drm_device *dev) |
51 | { | 62 | { |
52 | drm_i915_private_t *dev_priv = dev->dev_private; | 63 | drm_i915_private_t *dev_priv = dev->dev_private; |
64 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
65 | |||
53 | /* Program Hardware Status Page */ | 66 | /* Program Hardware Status Page */ |
54 | dev_priv->status_page_dmah = | 67 | dev_priv->status_page_dmah = |
55 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); | 68 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
@@ -58,17 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
58 | DRM_ERROR("Can not allocate hardware status page\n"); | 71 | DRM_ERROR("Can not allocate hardware status page\n"); |
59 | return -ENOMEM; | 72 | return -ENOMEM; |
60 | } | 73 | } |
61 | dev_priv->render_ring.status_page.page_addr | 74 | ring->status_page.page_addr = |
62 | = dev_priv->status_page_dmah->vaddr; | 75 | (void __force __iomem *)dev_priv->status_page_dmah->vaddr; |
63 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | ||
64 | 76 | ||
65 | memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); | 77 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); |
66 | 78 | ||
67 | if (IS_I965G(dev)) | 79 | i915_write_hws_pga(dev); |
68 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | ||
69 | 0xf0; | ||
70 | 80 | ||
71 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | ||
72 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 81 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
73 | return 0; | 82 | return 0; |
74 | } | 83 | } |
@@ -80,13 +89,15 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
80 | static void i915_free_hws(struct drm_device *dev) | 89 | static void i915_free_hws(struct drm_device *dev) |
81 | { | 90 | { |
82 | drm_i915_private_t *dev_priv = dev->dev_private; | 91 | drm_i915_private_t *dev_priv = dev->dev_private; |
92 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
93 | |||
83 | if (dev_priv->status_page_dmah) { | 94 | if (dev_priv->status_page_dmah) { |
84 | drm_pci_free(dev, dev_priv->status_page_dmah); | 95 | drm_pci_free(dev, dev_priv->status_page_dmah); |
85 | dev_priv->status_page_dmah = NULL; | 96 | dev_priv->status_page_dmah = NULL; |
86 | } | 97 | } |
87 | 98 | ||
88 | if (dev_priv->render_ring.status_page.gfx_addr) { | 99 | if (ring->status_page.gfx_addr) { |
89 | dev_priv->render_ring.status_page.gfx_addr = 0; | 100 | ring->status_page.gfx_addr = 0; |
90 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | 101 | drm_core_ioremapfree(&dev_priv->hws_map, dev); |
91 | } | 102 | } |
92 | 103 | ||
@@ -98,7 +109,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
98 | { | 109 | { |
99 | drm_i915_private_t *dev_priv = dev->dev_private; | 110 | drm_i915_private_t *dev_priv = dev->dev_private; |
100 | struct drm_i915_master_private *master_priv; | 111 | struct drm_i915_master_private *master_priv; |
101 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 112 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
102 | 113 | ||
103 | /* | 114 | /* |
104 | * We should never lose context on the ring with modesetting | 115 | * We should never lose context on the ring with modesetting |
@@ -107,8 +118,8 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
107 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 118 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
108 | return; | 119 | return; |
109 | 120 | ||
110 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 121 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
111 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 122 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
112 | ring->space = ring->head - (ring->tail + 8); | 123 | ring->space = ring->head - (ring->tail + 8); |
113 | if (ring->space < 0) | 124 | if (ring->space < 0) |
114 | ring->space += ring->size; | 125 | ring->space += ring->size; |
@@ -124,6 +135,8 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
124 | static int i915_dma_cleanup(struct drm_device * dev) | 135 | static int i915_dma_cleanup(struct drm_device * dev) |
125 | { | 136 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 137 | drm_i915_private_t *dev_priv = dev->dev_private; |
138 | int i; | ||
139 | |||
127 | /* Make sure interrupts are disabled here because the uninstall ioctl | 140 | /* Make sure interrupts are disabled here because the uninstall ioctl |
128 | * may not have been called from userspace and after dev_private | 141 | * may not have been called from userspace and after dev_private |
129 | * is freed, it's too late. | 142 | * is freed, it's too late. |
@@ -132,9 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
132 | drm_irq_uninstall(dev); | 145 | drm_irq_uninstall(dev); |
133 | 146 | ||
134 | mutex_lock(&dev->struct_mutex); | 147 | mutex_lock(&dev->struct_mutex); |
135 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 148 | for (i = 0; i < I915_NUM_RINGS; i++) |
136 | if (HAS_BSD(dev)) | 149 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
137 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | ||
138 | mutex_unlock(&dev->struct_mutex); | 150 | mutex_unlock(&dev->struct_mutex); |
139 | 151 | ||
140 | /* Clear the HWS virtual address at teardown */ | 152 | /* Clear the HWS virtual address at teardown */ |
@@ -148,6 +160,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
148 | { | 160 | { |
149 | drm_i915_private_t *dev_priv = dev->dev_private; | 161 | drm_i915_private_t *dev_priv = dev->dev_private; |
150 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 162 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
163 | int ret; | ||
151 | 164 | ||
152 | master_priv->sarea = drm_getsarea(dev); | 165 | master_priv->sarea = drm_getsarea(dev); |
153 | if (master_priv->sarea) { | 166 | if (master_priv->sarea) { |
@@ -158,33 +171,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
158 | } | 171 | } |
159 | 172 | ||
160 | if (init->ring_size != 0) { | 173 | if (init->ring_size != 0) { |
161 | if (dev_priv->render_ring.gem_object != NULL) { | 174 | if (LP_RING(dev_priv)->obj != NULL) { |
162 | i915_dma_cleanup(dev); | 175 | i915_dma_cleanup(dev); |
163 | DRM_ERROR("Client tried to initialize ringbuffer in " | 176 | DRM_ERROR("Client tried to initialize ringbuffer in " |
164 | "GEM mode\n"); | 177 | "GEM mode\n"); |
165 | return -EINVAL; | 178 | return -EINVAL; |
166 | } | 179 | } |
167 | 180 | ||
168 | dev_priv->render_ring.size = init->ring_size; | 181 | ret = intel_render_ring_init_dri(dev, |
169 | 182 | init->ring_start, | |
170 | dev_priv->render_ring.map.offset = init->ring_start; | 183 | init->ring_size); |
171 | dev_priv->render_ring.map.size = init->ring_size; | 184 | if (ret) { |
172 | dev_priv->render_ring.map.type = 0; | ||
173 | dev_priv->render_ring.map.flags = 0; | ||
174 | dev_priv->render_ring.map.mtrr = 0; | ||
175 | |||
176 | drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); | ||
177 | |||
178 | if (dev_priv->render_ring.map.handle == NULL) { | ||
179 | i915_dma_cleanup(dev); | 185 | i915_dma_cleanup(dev); |
180 | DRM_ERROR("can not ioremap virtual address for" | 186 | return ret; |
181 | " ring buffer\n"); | ||
182 | return -ENOMEM; | ||
183 | } | 187 | } |
184 | } | 188 | } |
185 | 189 | ||
186 | dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; | ||
187 | |||
188 | dev_priv->cpp = init->cpp; | 190 | dev_priv->cpp = init->cpp; |
189 | dev_priv->back_offset = init->back_offset; | 191 | dev_priv->back_offset = init->back_offset; |
190 | dev_priv->front_offset = init->front_offset; | 192 | dev_priv->front_offset = init->front_offset; |
@@ -202,12 +204,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
202 | static int i915_dma_resume(struct drm_device * dev) | 204 | static int i915_dma_resume(struct drm_device * dev) |
203 | { | 205 | { |
204 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 206 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
207 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
205 | 208 | ||
206 | struct intel_ring_buffer *ring; | ||
207 | DRM_DEBUG_DRIVER("%s\n", __func__); | 209 | DRM_DEBUG_DRIVER("%s\n", __func__); |
208 | 210 | ||
209 | ring = &dev_priv->render_ring; | ||
210 | |||
211 | if (ring->map.handle == NULL) { | 211 | if (ring->map.handle == NULL) { |
212 | DRM_ERROR("can not ioremap virtual address for" | 212 | DRM_ERROR("can not ioremap virtual address for" |
213 | " ring buffer\n"); | 213 | " ring buffer\n"); |
@@ -222,9 +222,9 @@ static int i915_dma_resume(struct drm_device * dev) | |||
222 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 222 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
223 | ring->status_page.page_addr); | 223 | ring->status_page.page_addr); |
224 | if (ring->status_page.gfx_addr != 0) | 224 | if (ring->status_page.gfx_addr != 0) |
225 | ring->setup_status_page(dev, ring); | 225 | intel_ring_setup_status_page(ring); |
226 | else | 226 | else |
227 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 227 | i915_write_hws_pga(dev); |
228 | 228 | ||
229 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 229 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
230 | 230 | ||
@@ -264,7 +264,7 @@ static int i915_dma_init(struct drm_device *dev, void *data, | |||
264 | * instruction detected will be given a size of zero, which is a | 264 | * instruction detected will be given a size of zero, which is a |
265 | * signal to abort the rest of the buffer. | 265 | * signal to abort the rest of the buffer. |
266 | */ | 266 | */ |
267 | static int do_validate_cmd(int cmd) | 267 | static int validate_cmd(int cmd) |
268 | { | 268 | { |
269 | switch (((cmd >> 29) & 0x7)) { | 269 | switch (((cmd >> 29) & 0x7)) { |
270 | case 0x0: | 270 | case 0x0: |
@@ -322,40 +322,27 @@ static int do_validate_cmd(int cmd) | |||
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | static int validate_cmd(int cmd) | ||
326 | { | ||
327 | int ret = do_validate_cmd(cmd); | ||
328 | |||
329 | /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ | ||
330 | |||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | 325 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
335 | { | 326 | { |
336 | drm_i915_private_t *dev_priv = dev->dev_private; | 327 | drm_i915_private_t *dev_priv = dev->dev_private; |
337 | int i; | 328 | int i, ret; |
338 | 329 | ||
339 | if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) | 330 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
340 | return -EINVAL; | 331 | return -EINVAL; |
341 | 332 | ||
342 | BEGIN_LP_RING((dwords+1)&~1); | ||
343 | |||
344 | for (i = 0; i < dwords;) { | 333 | for (i = 0; i < dwords;) { |
345 | int cmd, sz; | 334 | int sz = validate_cmd(buffer[i]); |
346 | 335 | if (sz == 0 || i + sz > dwords) | |
347 | cmd = buffer[i]; | ||
348 | |||
349 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | ||
350 | return -EINVAL; | 336 | return -EINVAL; |
351 | 337 | i += sz; | |
352 | OUT_RING(cmd); | ||
353 | |||
354 | while (++i, --sz) { | ||
355 | OUT_RING(buffer[i]); | ||
356 | } | ||
357 | } | 338 | } |
358 | 339 | ||
340 | ret = BEGIN_LP_RING((dwords+1)&~1); | ||
341 | if (ret) | ||
342 | return ret; | ||
343 | |||
344 | for (i = 0; i < dwords; i++) | ||
345 | OUT_RING(buffer[i]); | ||
359 | if (dwords & 1) | 346 | if (dwords & 1) |
360 | OUT_RING(0); | 347 | OUT_RING(0); |
361 | 348 | ||
@@ -366,34 +353,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
366 | 353 | ||
367 | int | 354 | int |
368 | i915_emit_box(struct drm_device *dev, | 355 | i915_emit_box(struct drm_device *dev, |
369 | struct drm_clip_rect *boxes, | 356 | struct drm_clip_rect *box, |
370 | int i, int DR1, int DR4) | 357 | int DR1, int DR4) |
371 | { | 358 | { |
372 | struct drm_clip_rect box = boxes[i]; | 359 | struct drm_i915_private *dev_priv = dev->dev_private; |
360 | int ret; | ||
373 | 361 | ||
374 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 362 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
363 | box->y2 <= 0 || box->x2 <= 0) { | ||
375 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 364 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
376 | box.x1, box.y1, box.x2, box.y2); | 365 | box->x1, box->y1, box->x2, box->y2); |
377 | return -EINVAL; | 366 | return -EINVAL; |
378 | } | 367 | } |
379 | 368 | ||
380 | if (IS_I965G(dev)) { | 369 | if (INTEL_INFO(dev)->gen >= 4) { |
381 | BEGIN_LP_RING(4); | 370 | ret = BEGIN_LP_RING(4); |
371 | if (ret) | ||
372 | return ret; | ||
373 | |||
382 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | 374 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
383 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 375 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
384 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 376 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
385 | OUT_RING(DR4); | 377 | OUT_RING(DR4); |
386 | ADVANCE_LP_RING(); | ||
387 | } else { | 378 | } else { |
388 | BEGIN_LP_RING(6); | 379 | ret = BEGIN_LP_RING(6); |
380 | if (ret) | ||
381 | return ret; | ||
382 | |||
389 | OUT_RING(GFX_OP_DRAWRECT_INFO); | 383 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
390 | OUT_RING(DR1); | 384 | OUT_RING(DR1); |
391 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 385 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
392 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 386 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
393 | OUT_RING(DR4); | 387 | OUT_RING(DR4); |
394 | OUT_RING(0); | 388 | OUT_RING(0); |
395 | ADVANCE_LP_RING(); | ||
396 | } | 389 | } |
390 | ADVANCE_LP_RING(); | ||
397 | 391 | ||
398 | return 0; | 392 | return 0; |
399 | } | 393 | } |
@@ -413,12 +407,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
413 | if (master_priv->sarea_priv) | 407 | if (master_priv->sarea_priv) |
414 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 408 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
415 | 409 | ||
416 | BEGIN_LP_RING(4); | 410 | if (BEGIN_LP_RING(4) == 0) { |
417 | OUT_RING(MI_STORE_DWORD_INDEX); | 411 | OUT_RING(MI_STORE_DWORD_INDEX); |
418 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 412 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
419 | OUT_RING(dev_priv->counter); | 413 | OUT_RING(dev_priv->counter); |
420 | OUT_RING(0); | 414 | OUT_RING(0); |
421 | ADVANCE_LP_RING(); | 415 | ADVANCE_LP_RING(); |
416 | } | ||
422 | } | 417 | } |
423 | 418 | ||
424 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 419 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
@@ -440,7 +435,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
440 | 435 | ||
441 | for (i = 0; i < count; i++) { | 436 | for (i = 0; i < count; i++) { |
442 | if (i < nbox) { | 437 | if (i < nbox) { |
443 | ret = i915_emit_box(dev, cliprects, i, | 438 | ret = i915_emit_box(dev, &cliprects[i], |
444 | cmd->DR1, cmd->DR4); | 439 | cmd->DR1, cmd->DR4); |
445 | if (ret) | 440 | if (ret) |
446 | return ret; | 441 | return ret; |
@@ -459,8 +454,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
459 | drm_i915_batchbuffer_t * batch, | 454 | drm_i915_batchbuffer_t * batch, |
460 | struct drm_clip_rect *cliprects) | 455 | struct drm_clip_rect *cliprects) |
461 | { | 456 | { |
457 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
462 | int nbox = batch->num_cliprects; | 458 | int nbox = batch->num_cliprects; |
463 | int i = 0, count; | 459 | int i, count, ret; |
464 | 460 | ||
465 | if ((batch->start | batch->used) & 0x7) { | 461 | if ((batch->start | batch->used) & 0x7) { |
466 | DRM_ERROR("alignment"); | 462 | DRM_ERROR("alignment"); |
@@ -470,44 +466,49 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
470 | i915_kernel_lost_context(dev); | 466 | i915_kernel_lost_context(dev); |
471 | 467 | ||
472 | count = nbox ? nbox : 1; | 468 | count = nbox ? nbox : 1; |
473 | |||
474 | for (i = 0; i < count; i++) { | 469 | for (i = 0; i < count; i++) { |
475 | if (i < nbox) { | 470 | if (i < nbox) { |
476 | int ret = i915_emit_box(dev, cliprects, i, | 471 | ret = i915_emit_box(dev, &cliprects[i], |
477 | batch->DR1, batch->DR4); | 472 | batch->DR1, batch->DR4); |
478 | if (ret) | 473 | if (ret) |
479 | return ret; | 474 | return ret; |
480 | } | 475 | } |
481 | 476 | ||
482 | if (!IS_I830(dev) && !IS_845G(dev)) { | 477 | if (!IS_I830(dev) && !IS_845G(dev)) { |
483 | BEGIN_LP_RING(2); | 478 | ret = BEGIN_LP_RING(2); |
484 | if (IS_I965G(dev)) { | 479 | if (ret) |
480 | return ret; | ||
481 | |||
482 | if (INTEL_INFO(dev)->gen >= 4) { | ||
485 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 483 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
486 | OUT_RING(batch->start); | 484 | OUT_RING(batch->start); |
487 | } else { | 485 | } else { |
488 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | 486 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
489 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 487 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
490 | } | 488 | } |
491 | ADVANCE_LP_RING(); | ||
492 | } else { | 489 | } else { |
493 | BEGIN_LP_RING(4); | 490 | ret = BEGIN_LP_RING(4); |
491 | if (ret) | ||
492 | return ret; | ||
493 | |||
494 | OUT_RING(MI_BATCH_BUFFER); | 494 | OUT_RING(MI_BATCH_BUFFER); |
495 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 495 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
496 | OUT_RING(batch->start + batch->used - 4); | 496 | OUT_RING(batch->start + batch->used - 4); |
497 | OUT_RING(0); | 497 | OUT_RING(0); |
498 | ADVANCE_LP_RING(); | ||
499 | } | 498 | } |
499 | ADVANCE_LP_RING(); | ||
500 | } | 500 | } |
501 | 501 | ||
502 | 502 | ||
503 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | 503 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
504 | BEGIN_LP_RING(2); | 504 | if (BEGIN_LP_RING(2) == 0) { |
505 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | 505 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
506 | OUT_RING(MI_NOOP); | 506 | OUT_RING(MI_NOOP); |
507 | ADVANCE_LP_RING(); | 507 | ADVANCE_LP_RING(); |
508 | } | ||
508 | } | 509 | } |
509 | i915_emit_breadcrumb(dev); | ||
510 | 510 | ||
511 | i915_emit_breadcrumb(dev); | ||
511 | return 0; | 512 | return 0; |
512 | } | 513 | } |
513 | 514 | ||
@@ -516,6 +517,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
516 | drm_i915_private_t *dev_priv = dev->dev_private; | 517 | drm_i915_private_t *dev_priv = dev->dev_private; |
517 | struct drm_i915_master_private *master_priv = | 518 | struct drm_i915_master_private *master_priv = |
518 | dev->primary->master->driver_priv; | 519 | dev->primary->master->driver_priv; |
520 | int ret; | ||
519 | 521 | ||
520 | if (!master_priv->sarea_priv) | 522 | if (!master_priv->sarea_priv) |
521 | return -EINVAL; | 523 | return -EINVAL; |
@@ -527,12 +529,13 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
527 | 529 | ||
528 | i915_kernel_lost_context(dev); | 530 | i915_kernel_lost_context(dev); |
529 | 531 | ||
530 | BEGIN_LP_RING(2); | 532 | ret = BEGIN_LP_RING(10); |
533 | if (ret) | ||
534 | return ret; | ||
535 | |||
531 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | 536 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
532 | OUT_RING(0); | 537 | OUT_RING(0); |
533 | ADVANCE_LP_RING(); | ||
534 | 538 | ||
535 | BEGIN_LP_RING(6); | ||
536 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | 539 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
537 | OUT_RING(0); | 540 | OUT_RING(0); |
538 | if (dev_priv->current_page == 0) { | 541 | if (dev_priv->current_page == 0) { |
@@ -543,33 +546,32 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
543 | dev_priv->current_page = 0; | 546 | dev_priv->current_page = 0; |
544 | } | 547 | } |
545 | OUT_RING(0); | 548 | OUT_RING(0); |
546 | ADVANCE_LP_RING(); | ||
547 | 549 | ||
548 | BEGIN_LP_RING(2); | ||
549 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | 550 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
550 | OUT_RING(0); | 551 | OUT_RING(0); |
552 | |||
551 | ADVANCE_LP_RING(); | 553 | ADVANCE_LP_RING(); |
552 | 554 | ||
553 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; | 555 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
554 | 556 | ||
555 | BEGIN_LP_RING(4); | 557 | if (BEGIN_LP_RING(4) == 0) { |
556 | OUT_RING(MI_STORE_DWORD_INDEX); | 558 | OUT_RING(MI_STORE_DWORD_INDEX); |
557 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 559 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
558 | OUT_RING(dev_priv->counter); | 560 | OUT_RING(dev_priv->counter); |
559 | OUT_RING(0); | 561 | OUT_RING(0); |
560 | ADVANCE_LP_RING(); | 562 | ADVANCE_LP_RING(); |
563 | } | ||
561 | 564 | ||
562 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 565 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
563 | return 0; | 566 | return 0; |
564 | } | 567 | } |
565 | 568 | ||
566 | static int i915_quiescent(struct drm_device * dev) | 569 | static int i915_quiescent(struct drm_device *dev) |
567 | { | 570 | { |
568 | drm_i915_private_t *dev_priv = dev->dev_private; | 571 | struct intel_ring_buffer *ring = LP_RING(dev->dev_private); |
569 | 572 | ||
570 | i915_kernel_lost_context(dev); | 573 | i915_kernel_lost_context(dev); |
571 | return intel_wait_ring_buffer(dev, &dev_priv->render_ring, | 574 | return intel_wait_ring_idle(ring); |
572 | dev_priv->render_ring.size - 8); | ||
573 | } | 575 | } |
574 | 576 | ||
575 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 577 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
@@ -765,6 +767,21 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
765 | case I915_PARAM_HAS_BSD: | 767 | case I915_PARAM_HAS_BSD: |
766 | value = HAS_BSD(dev); | 768 | value = HAS_BSD(dev); |
767 | break; | 769 | break; |
770 | case I915_PARAM_HAS_BLT: | ||
771 | value = HAS_BLT(dev); | ||
772 | break; | ||
773 | case I915_PARAM_HAS_RELAXED_FENCING: | ||
774 | value = 1; | ||
775 | break; | ||
776 | case I915_PARAM_HAS_COHERENT_RINGS: | ||
777 | value = 1; | ||
778 | break; | ||
779 | case I915_PARAM_HAS_EXEC_CONSTANTS: | ||
780 | value = INTEL_INFO(dev)->gen >= 4; | ||
781 | break; | ||
782 | case I915_PARAM_HAS_RELAXED_DELTA: | ||
783 | value = 1; | ||
784 | break; | ||
768 | default: | 785 | default: |
769 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 786 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
770 | param->param); | 787 | param->param); |
@@ -820,7 +837,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
820 | { | 837 | { |
821 | drm_i915_private_t *dev_priv = dev->dev_private; | 838 | drm_i915_private_t *dev_priv = dev->dev_private; |
822 | drm_i915_hws_addr_t *hws = data; | 839 | drm_i915_hws_addr_t *hws = data; |
823 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 840 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
824 | 841 | ||
825 | if (!I915_NEED_GFX_HWS(dev)) | 842 | if (!I915_NEED_GFX_HWS(dev)) |
826 | return -EINVAL; | 843 | return -EINVAL; |
@@ -853,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
853 | " G33 hw status page\n"); | 870 | " G33 hw status page\n"); |
854 | return -ENOMEM; | 871 | return -ENOMEM; |
855 | } | 872 | } |
856 | ring->status_page.page_addr = dev_priv->hws_map.handle; | 873 | ring->status_page.page_addr = |
857 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 874 | (void __force __iomem *)dev_priv->hws_map.handle; |
875 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
858 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 876 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
859 | 877 | ||
860 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | 878 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
@@ -888,12 +906,12 @@ static int | |||
888 | intel_alloc_mchbar_resource(struct drm_device *dev) | 906 | intel_alloc_mchbar_resource(struct drm_device *dev) |
889 | { | 907 | { |
890 | drm_i915_private_t *dev_priv = dev->dev_private; | 908 | drm_i915_private_t *dev_priv = dev->dev_private; |
891 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 909 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
892 | u32 temp_lo, temp_hi = 0; | 910 | u32 temp_lo, temp_hi = 0; |
893 | u64 mchbar_addr; | 911 | u64 mchbar_addr; |
894 | int ret; | 912 | int ret; |
895 | 913 | ||
896 | if (IS_I965G(dev)) | 914 | if (INTEL_INFO(dev)->gen >= 4) |
897 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | 915 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
898 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | 916 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); |
899 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | 917 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
@@ -920,7 +938,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
920 | return ret; | 938 | return ret; |
921 | } | 939 | } |
922 | 940 | ||
923 | if (IS_I965G(dev)) | 941 | if (INTEL_INFO(dev)->gen >= 4) |
924 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | 942 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
925 | upper_32_bits(dev_priv->mch_res.start)); | 943 | upper_32_bits(dev_priv->mch_res.start)); |
926 | 944 | ||
@@ -934,7 +952,7 @@ static void | |||
934 | intel_setup_mchbar(struct drm_device *dev) | 952 | intel_setup_mchbar(struct drm_device *dev) |
935 | { | 953 | { |
936 | drm_i915_private_t *dev_priv = dev->dev_private; | 954 | drm_i915_private_t *dev_priv = dev->dev_private; |
937 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 955 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
938 | u32 temp; | 956 | u32 temp; |
939 | bool enabled; | 957 | bool enabled; |
940 | 958 | ||
@@ -971,7 +989,7 @@ static void | |||
971 | intel_teardown_mchbar(struct drm_device *dev) | 989 | intel_teardown_mchbar(struct drm_device *dev) |
972 | { | 990 | { |
973 | drm_i915_private_t *dev_priv = dev->dev_private; | 991 | drm_i915_private_t *dev_priv = dev->dev_private; |
974 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 992 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
975 | u32 temp; | 993 | u32 temp; |
976 | 994 | ||
977 | if (dev_priv->mchbar_need_disable) { | 995 | if (dev_priv->mchbar_need_disable) { |
@@ -990,174 +1008,6 @@ intel_teardown_mchbar(struct drm_device *dev) | |||
990 | release_resource(&dev_priv->mch_res); | 1008 | release_resource(&dev_priv->mch_res); |
991 | } | 1009 | } |
992 | 1010 | ||
993 | /** | ||
994 | * i915_probe_agp - get AGP bootup configuration | ||
995 | * @pdev: PCI device | ||
996 | * @aperture_size: returns AGP aperture configured size | ||
997 | * @preallocated_size: returns size of BIOS preallocated AGP space | ||
998 | * | ||
999 | * Since Intel integrated graphics are UMA, the BIOS has to set aside | ||
1000 | * some RAM for the framebuffer at early boot. This code figures out | ||
1001 | * how much was set aside so we can use it for our own purposes. | ||
1002 | */ | ||
1003 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | ||
1004 | uint32_t *preallocated_size, | ||
1005 | uint32_t *start) | ||
1006 | { | ||
1007 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1008 | u16 tmp = 0; | ||
1009 | unsigned long overhead; | ||
1010 | unsigned long stolen; | ||
1011 | |||
1012 | /* Get the fb aperture size and "stolen" memory amount. */ | ||
1013 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); | ||
1014 | |||
1015 | *aperture_size = 1024 * 1024; | ||
1016 | *preallocated_size = 1024 * 1024; | ||
1017 | |||
1018 | switch (dev->pdev->device) { | ||
1019 | case PCI_DEVICE_ID_INTEL_82830_CGC: | ||
1020 | case PCI_DEVICE_ID_INTEL_82845G_IG: | ||
1021 | case PCI_DEVICE_ID_INTEL_82855GM_IG: | ||
1022 | case PCI_DEVICE_ID_INTEL_82865_IG: | ||
1023 | if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) | ||
1024 | *aperture_size *= 64; | ||
1025 | else | ||
1026 | *aperture_size *= 128; | ||
1027 | break; | ||
1028 | default: | ||
1029 | /* 9xx supports large sizes, just look at the length */ | ||
1030 | *aperture_size = pci_resource_len(dev->pdev, 2); | ||
1031 | break; | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * Some of the preallocated space is taken by the GTT | ||
1036 | * and popup. GTT is 1K per MB of aperture size, and popup is 4K. | ||
1037 | */ | ||
1038 | if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
1039 | overhead = 4096; | ||
1040 | else | ||
1041 | overhead = (*aperture_size / 1024) + 4096; | ||
1042 | |||
1043 | if (IS_GEN6(dev)) { | ||
1044 | /* SNB has memory control reg at 0x50.w */ | ||
1045 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); | ||
1046 | |||
1047 | switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { | ||
1048 | case INTEL_855_GMCH_GMS_DISABLED: | ||
1049 | DRM_ERROR("video memory is disabled\n"); | ||
1050 | return -1; | ||
1051 | case SNB_GMCH_GMS_STOLEN_32M: | ||
1052 | stolen = 32 * 1024 * 1024; | ||
1053 | break; | ||
1054 | case SNB_GMCH_GMS_STOLEN_64M: | ||
1055 | stolen = 64 * 1024 * 1024; | ||
1056 | break; | ||
1057 | case SNB_GMCH_GMS_STOLEN_96M: | ||
1058 | stolen = 96 * 1024 * 1024; | ||
1059 | break; | ||
1060 | case SNB_GMCH_GMS_STOLEN_128M: | ||
1061 | stolen = 128 * 1024 * 1024; | ||
1062 | break; | ||
1063 | case SNB_GMCH_GMS_STOLEN_160M: | ||
1064 | stolen = 160 * 1024 * 1024; | ||
1065 | break; | ||
1066 | case SNB_GMCH_GMS_STOLEN_192M: | ||
1067 | stolen = 192 * 1024 * 1024; | ||
1068 | break; | ||
1069 | case SNB_GMCH_GMS_STOLEN_224M: | ||
1070 | stolen = 224 * 1024 * 1024; | ||
1071 | break; | ||
1072 | case SNB_GMCH_GMS_STOLEN_256M: | ||
1073 | stolen = 256 * 1024 * 1024; | ||
1074 | break; | ||
1075 | case SNB_GMCH_GMS_STOLEN_288M: | ||
1076 | stolen = 288 * 1024 * 1024; | ||
1077 | break; | ||
1078 | case SNB_GMCH_GMS_STOLEN_320M: | ||
1079 | stolen = 320 * 1024 * 1024; | ||
1080 | break; | ||
1081 | case SNB_GMCH_GMS_STOLEN_352M: | ||
1082 | stolen = 352 * 1024 * 1024; | ||
1083 | break; | ||
1084 | case SNB_GMCH_GMS_STOLEN_384M: | ||
1085 | stolen = 384 * 1024 * 1024; | ||
1086 | break; | ||
1087 | case SNB_GMCH_GMS_STOLEN_416M: | ||
1088 | stolen = 416 * 1024 * 1024; | ||
1089 | break; | ||
1090 | case SNB_GMCH_GMS_STOLEN_448M: | ||
1091 | stolen = 448 * 1024 * 1024; | ||
1092 | break; | ||
1093 | case SNB_GMCH_GMS_STOLEN_480M: | ||
1094 | stolen = 480 * 1024 * 1024; | ||
1095 | break; | ||
1096 | case SNB_GMCH_GMS_STOLEN_512M: | ||
1097 | stolen = 512 * 1024 * 1024; | ||
1098 | break; | ||
1099 | default: | ||
1100 | DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", | ||
1101 | tmp & SNB_GMCH_GMS_STOLEN_MASK); | ||
1102 | return -1; | ||
1103 | } | ||
1104 | } else { | ||
1105 | switch (tmp & INTEL_GMCH_GMS_MASK) { | ||
1106 | case INTEL_855_GMCH_GMS_DISABLED: | ||
1107 | DRM_ERROR("video memory is disabled\n"); | ||
1108 | return -1; | ||
1109 | case INTEL_855_GMCH_GMS_STOLEN_1M: | ||
1110 | stolen = 1 * 1024 * 1024; | ||
1111 | break; | ||
1112 | case INTEL_855_GMCH_GMS_STOLEN_4M: | ||
1113 | stolen = 4 * 1024 * 1024; | ||
1114 | break; | ||
1115 | case INTEL_855_GMCH_GMS_STOLEN_8M: | ||
1116 | stolen = 8 * 1024 * 1024; | ||
1117 | break; | ||
1118 | case INTEL_855_GMCH_GMS_STOLEN_16M: | ||
1119 | stolen = 16 * 1024 * 1024; | ||
1120 | break; | ||
1121 | case INTEL_855_GMCH_GMS_STOLEN_32M: | ||
1122 | stolen = 32 * 1024 * 1024; | ||
1123 | break; | ||
1124 | case INTEL_915G_GMCH_GMS_STOLEN_48M: | ||
1125 | stolen = 48 * 1024 * 1024; | ||
1126 | break; | ||
1127 | case INTEL_915G_GMCH_GMS_STOLEN_64M: | ||
1128 | stolen = 64 * 1024 * 1024; | ||
1129 | break; | ||
1130 | case INTEL_GMCH_GMS_STOLEN_128M: | ||
1131 | stolen = 128 * 1024 * 1024; | ||
1132 | break; | ||
1133 | case INTEL_GMCH_GMS_STOLEN_256M: | ||
1134 | stolen = 256 * 1024 * 1024; | ||
1135 | break; | ||
1136 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
1137 | stolen = 96 * 1024 * 1024; | ||
1138 | break; | ||
1139 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
1140 | stolen = 160 * 1024 * 1024; | ||
1141 | break; | ||
1142 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
1143 | stolen = 224 * 1024 * 1024; | ||
1144 | break; | ||
1145 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
1146 | stolen = 352 * 1024 * 1024; | ||
1147 | break; | ||
1148 | default: | ||
1149 | DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", | ||
1150 | tmp & INTEL_GMCH_GMS_MASK); | ||
1151 | return -1; | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | *preallocated_size = stolen - overhead; | ||
1156 | *start = overhead; | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1161 | #define PTE_ADDRESS_MASK 0xfffff000 | 1011 | #define PTE_ADDRESS_MASK 0xfffff000 |
1162 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ | 1012 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ |
1163 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) | 1013 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) |
@@ -1167,75 +1017,47 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | |||
1167 | #define PTE_VALID (1 << 0) | 1017 | #define PTE_VALID (1 << 0) |
1168 | 1018 | ||
1169 | /** | 1019 | /** |
1170 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | 1020 | * i915_stolen_to_phys - take an offset into stolen memory and turn it into |
1021 | * a physical one | ||
1171 | * @dev: drm device | 1022 | * @dev: drm device |
1172 | * @gtt_addr: address to translate | 1023 | * @offset: address to translate |
1173 | * | 1024 | * |
1174 | * Some chip functions require allocations from stolen space but need the | 1025 | * Some chip functions require allocations from stolen space and need the |
1175 | * physical address of the memory in question. We use this routine | 1026 | * physical address of the memory in question. |
1176 | * to get a physical address suitable for register programming from a given | ||
1177 | * GTT address. | ||
1178 | */ | 1027 | */ |
1179 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | 1028 | static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) |
1180 | unsigned long gtt_addr) | ||
1181 | { | 1029 | { |
1182 | unsigned long *gtt; | 1030 | struct drm_i915_private *dev_priv = dev->dev_private; |
1183 | unsigned long entry, phys; | 1031 | struct pci_dev *pdev = dev_priv->bridge_dev; |
1184 | int gtt_bar = IS_I9XX(dev) ? 0 : 1; | 1032 | u32 base; |
1185 | int gtt_offset, gtt_size; | 1033 | |
1186 | 1034 | #if 0 | |
1187 | if (IS_I965G(dev)) { | 1035 | /* On the machines I have tested the Graphics Base of Stolen Memory |
1188 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 1036 | * is unreliable, so compute the base by subtracting the stolen memory |
1189 | gtt_offset = 2*1024*1024; | 1037 | * from the Top of Low Usable DRAM which is where the BIOS places |
1190 | gtt_size = 2*1024*1024; | 1038 | * the graphics stolen memory. |
1191 | } else { | 1039 | */ |
1192 | gtt_offset = 512*1024; | 1040 | if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { |
1193 | gtt_size = 512*1024; | 1041 | /* top 32bits are reserved = 0 */ |
1194 | } | 1042 | pci_read_config_dword(pdev, 0xA4, &base); |
1195 | } else { | 1043 | } else { |
1196 | gtt_bar = 3; | 1044 | /* XXX presume 8xx is the same as i915 */ |
1197 | gtt_offset = 0; | 1045 | pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); |
1198 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | 1046 | } |
1199 | } | 1047 | #else |
1200 | 1048 | if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { | |
1201 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | 1049 | u16 val; |
1202 | gtt_size); | 1050 | pci_read_config_word(pdev, 0xb0, &val); |
1203 | if (!gtt) { | 1051 | base = val >> 4 << 20; |
1204 | DRM_ERROR("ioremap of GTT failed\n"); | 1052 | } else { |
1205 | return 0; | 1053 | u8 val; |
1206 | } | 1054 | pci_read_config_byte(pdev, 0x9c, &val); |
1207 | 1055 | base = val >> 3 << 27; | |
1208 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | ||
1209 | |||
1210 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | ||
1211 | |||
1212 | /* Mask out these reserved bits on this hardware. */ | ||
1213 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || | ||
1214 | IS_I945G(dev) || IS_I945GM(dev)) { | ||
1215 | entry &= ~PTE_ADDRESS_MASK_HIGH; | ||
1216 | } | ||
1217 | |||
1218 | /* If it's not a mapping type we know, then bail. */ | ||
1219 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | ||
1220 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | ||
1221 | iounmap(gtt); | ||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | if (!(entry & PTE_VALID)) { | ||
1226 | DRM_ERROR("bad GTT entry in stolen space\n"); | ||
1227 | iounmap(gtt); | ||
1228 | return 0; | ||
1229 | } | 1056 | } |
1057 | base -= dev_priv->mm.gtt->stolen_size; | ||
1058 | #endif | ||
1230 | 1059 | ||
1231 | iounmap(gtt); | 1060 | return base + offset; |
1232 | |||
1233 | phys =(entry & PTE_ADDRESS_MASK) | | ||
1234 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | ||
1235 | |||
1236 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | ||
1237 | |||
1238 | return phys; | ||
1239 | } | 1061 | } |
1240 | 1062 | ||
1241 | static void i915_warn_stolen(struct drm_device *dev) | 1063 | static void i915_warn_stolen(struct drm_device *dev) |
@@ -1251,54 +1073,35 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1251 | unsigned long cfb_base; | 1073 | unsigned long cfb_base; |
1252 | unsigned long ll_base = 0; | 1074 | unsigned long ll_base = 0; |
1253 | 1075 | ||
1254 | /* Leave 1M for line length buffer & misc. */ | 1076 | compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); |
1255 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | 1077 | if (compressed_fb) |
1256 | if (!compressed_fb) { | 1078 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); |
1257 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1079 | if (!compressed_fb) |
1258 | i915_warn_stolen(dev); | 1080 | goto err; |
1259 | return; | ||
1260 | } | ||
1261 | |||
1262 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | ||
1263 | if (!compressed_fb) { | ||
1264 | i915_warn_stolen(dev); | ||
1265 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1266 | return; | ||
1267 | } | ||
1268 | |||
1269 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); | ||
1270 | if (!cfb_base) { | ||
1271 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1272 | drm_mm_put_block(compressed_fb); | ||
1273 | } | ||
1274 | 1081 | ||
1275 | if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { | 1082 | cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); |
1276 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | 1083 | if (!cfb_base) |
1277 | 4096, 0); | 1084 | goto err_fb; |
1278 | if (!compressed_llb) { | ||
1279 | i915_warn_stolen(dev); | ||
1280 | return; | ||
1281 | } | ||
1282 | 1085 | ||
1283 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | 1086 | if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { |
1284 | if (!compressed_llb) { | 1087 | compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, |
1285 | i915_warn_stolen(dev); | 1088 | 4096, 4096, 0); |
1286 | return; | 1089 | if (compressed_llb) |
1287 | } | 1090 | compressed_llb = drm_mm_get_block(compressed_llb, |
1091 | 4096, 4096); | ||
1092 | if (!compressed_llb) | ||
1093 | goto err_fb; | ||
1288 | 1094 | ||
1289 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | 1095 | ll_base = i915_stolen_to_phys(dev, compressed_llb->start); |
1290 | if (!ll_base) { | 1096 | if (!ll_base) |
1291 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | 1097 | goto err_llb; |
1292 | drm_mm_put_block(compressed_fb); | ||
1293 | drm_mm_put_block(compressed_llb); | ||
1294 | } | ||
1295 | } | 1098 | } |
1296 | 1099 | ||
1297 | dev_priv->cfb_size = size; | 1100 | dev_priv->cfb_size = size; |
1298 | 1101 | ||
1299 | intel_disable_fbc(dev); | 1102 | intel_disable_fbc(dev); |
1300 | dev_priv->compressed_fb = compressed_fb; | 1103 | dev_priv->compressed_fb = compressed_fb; |
1301 | if (IS_IRONLAKE_M(dev)) | 1104 | if (HAS_PCH_SPLIT(dev)) |
1302 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); | 1105 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
1303 | else if (IS_GM45(dev)) { | 1106 | else if (IS_GM45(dev)) { |
1304 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1107 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
@@ -1308,8 +1111,17 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1308 | dev_priv->compressed_llb = compressed_llb; | 1111 | dev_priv->compressed_llb = compressed_llb; |
1309 | } | 1112 | } |
1310 | 1113 | ||
1311 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1114 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", |
1312 | ll_base, size >> 20); | 1115 | cfb_base, ll_base, size >> 20); |
1116 | return; | ||
1117 | |||
1118 | err_llb: | ||
1119 | drm_mm_put_block(compressed_llb); | ||
1120 | err_fb: | ||
1121 | drm_mm_put_block(compressed_fb); | ||
1122 | err: | ||
1123 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1124 | i915_warn_stolen(dev); | ||
1313 | } | 1125 | } |
1314 | 1126 | ||
1315 | static void i915_cleanup_compression(struct drm_device *dev) | 1127 | static void i915_cleanup_compression(struct drm_device *dev) |
@@ -1340,14 +1152,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
1340 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1152 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1341 | if (state == VGA_SWITCHEROO_ON) { | 1153 | if (state == VGA_SWITCHEROO_ON) { |
1342 | printk(KERN_INFO "i915: switched on\n"); | 1154 | printk(KERN_INFO "i915: switched on\n"); |
1155 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
1343 | /* i915 resume handler doesn't set to D0 */ | 1156 | /* i915 resume handler doesn't set to D0 */ |
1344 | pci_set_power_state(dev->pdev, PCI_D0); | 1157 | pci_set_power_state(dev->pdev, PCI_D0); |
1345 | i915_resume(dev); | 1158 | i915_resume(dev); |
1346 | drm_kms_helper_poll_enable(dev); | 1159 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
1347 | } else { | 1160 | } else { |
1348 | printk(KERN_ERR "i915: switched off\n"); | 1161 | printk(KERN_ERR "i915: switched off\n"); |
1349 | drm_kms_helper_poll_disable(dev); | 1162 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
1350 | i915_suspend(dev, pmm); | 1163 | i915_suspend(dev, pmm); |
1164 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | ||
1351 | } | 1165 | } |
1352 | } | 1166 | } |
1353 | 1167 | ||
@@ -1362,26 +1176,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |||
1362 | return can_switch; | 1176 | return can_switch; |
1363 | } | 1177 | } |
1364 | 1178 | ||
1365 | static int i915_load_modeset_init(struct drm_device *dev, | 1179 | static int i915_load_gem_init(struct drm_device *dev) |
1366 | unsigned long prealloc_start, | ||
1367 | unsigned long prealloc_size, | ||
1368 | unsigned long agp_size) | ||
1369 | { | 1180 | { |
1370 | struct drm_i915_private *dev_priv = dev->dev_private; | 1181 | struct drm_i915_private *dev_priv = dev->dev_private; |
1371 | int fb_bar = IS_I9XX(dev) ? 2 : 0; | 1182 | unsigned long prealloc_size, gtt_size, mappable_size; |
1372 | int ret = 0; | 1183 | int ret; |
1373 | |||
1374 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & | ||
1375 | 0xff000000; | ||
1376 | 1184 | ||
1377 | /* Basic memrange allocator for stolen space (aka vram) */ | 1185 | prealloc_size = dev_priv->mm.gtt->stolen_size; |
1378 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1186 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
1379 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | 1187 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
1380 | 1188 | ||
1381 | /* We're off and running w/KMS */ | 1189 | /* Basic memrange allocator for stolen space */ |
1382 | dev_priv->mm.suspended = 0; | 1190 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); |
1383 | 1191 | ||
1384 | /* Let GEM Manage from end of prealloc space to end of aperture. | 1192 | /* Let GEM Manage all of the aperture. |
1385 | * | 1193 | * |
1386 | * However, leave one page at the end still bound to the scratch page. | 1194 | * However, leave one page at the end still bound to the scratch page. |
1387 | * There are a number of places where the hardware apparently | 1195 | * There are a number of places where the hardware apparently |
@@ -1390,41 +1198,58 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1390 | * at the last page of the aperture. One page should be enough to | 1198 | * at the last page of the aperture. One page should be enough to |
1391 | * keep any prefetching inside of the aperture. | 1199 | * keep any prefetching inside of the aperture. |
1392 | */ | 1200 | */ |
1393 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); | 1201 | i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); |
1394 | 1202 | ||
1395 | mutex_lock(&dev->struct_mutex); | 1203 | mutex_lock(&dev->struct_mutex); |
1396 | ret = i915_gem_init_ringbuffer(dev); | 1204 | ret = i915_gem_init_ringbuffer(dev); |
1397 | mutex_unlock(&dev->struct_mutex); | 1205 | mutex_unlock(&dev->struct_mutex); |
1398 | if (ret) | 1206 | if (ret) |
1399 | goto out; | 1207 | return ret; |
1400 | 1208 | ||
1401 | /* Try to set up FBC with a reasonable compressed buffer size */ | 1209 | /* Try to set up FBC with a reasonable compressed buffer size */ |
1402 | if (I915_HAS_FBC(dev) && i915_powersave) { | 1210 | if (I915_HAS_FBC(dev) && i915_powersave) { |
1403 | int cfb_size; | 1211 | int cfb_size; |
1404 | 1212 | ||
1405 | /* Try to get an 8M buffer... */ | 1213 | /* Leave 1M for line length buffer & misc. */ |
1406 | if (prealloc_size > (9*1024*1024)) | 1214 | |
1407 | cfb_size = 8*1024*1024; | 1215 | /* Try to get a 32M buffer... */ |
1216 | if (prealloc_size > (36*1024*1024)) | ||
1217 | cfb_size = 32*1024*1024; | ||
1408 | else /* fall back to 7/8 of the stolen space */ | 1218 | else /* fall back to 7/8 of the stolen space */ |
1409 | cfb_size = prealloc_size * 7 / 8; | 1219 | cfb_size = prealloc_size * 7 / 8; |
1410 | i915_setup_compression(dev, cfb_size); | 1220 | i915_setup_compression(dev, cfb_size); |
1411 | } | 1221 | } |
1412 | 1222 | ||
1413 | /* Allow hardware batchbuffers unless told otherwise. | 1223 | /* Allow hardware batchbuffers unless told otherwise. */ |
1414 | */ | ||
1415 | dev_priv->allow_batchbuffer = 1; | 1224 | dev_priv->allow_batchbuffer = 1; |
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | static int i915_load_modeset_init(struct drm_device *dev) | ||
1229 | { | ||
1230 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1231 | int ret; | ||
1416 | 1232 | ||
1417 | ret = intel_init_bios(dev); | 1233 | ret = intel_parse_bios(dev); |
1418 | if (ret) | 1234 | if (ret) |
1419 | DRM_INFO("failed to find VBIOS tables\n"); | 1235 | DRM_INFO("failed to find VBIOS tables\n"); |
1420 | 1236 | ||
1421 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 1237 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1238 | * to the common VGA resources. | ||
1239 | * | ||
1240 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | ||
1241 | * then we do not take part in VGA arbitration and the | ||
1242 | * vga_client_register() fails with -ENODEV. | ||
1243 | */ | ||
1422 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1244 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1423 | if (ret) | 1245 | if (ret && ret != -ENODEV) |
1424 | goto cleanup_ringbuffer; | 1246 | goto out; |
1247 | |||
1248 | intel_register_dsm_handler(); | ||
1425 | 1249 | ||
1426 | ret = vga_switcheroo_register_client(dev->pdev, | 1250 | ret = vga_switcheroo_register_client(dev->pdev, |
1427 | i915_switcheroo_set_state, | 1251 | i915_switcheroo_set_state, |
1252 | NULL, | ||
1428 | i915_switcheroo_can_switch); | 1253 | i915_switcheroo_can_switch); |
1429 | if (ret) | 1254 | if (ret) |
1430 | goto cleanup_vga_client; | 1255 | goto cleanup_vga_client; |
@@ -1435,37 +1260,41 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1435 | 1260 | ||
1436 | intel_modeset_init(dev); | 1261 | intel_modeset_init(dev); |
1437 | 1262 | ||
1438 | ret = drm_irq_install(dev); | 1263 | ret = i915_load_gem_init(dev); |
1439 | if (ret) | 1264 | if (ret) |
1440 | goto cleanup_vga_switcheroo; | 1265 | goto cleanup_vga_switcheroo; |
1441 | 1266 | ||
1267 | intel_modeset_gem_init(dev); | ||
1268 | |||
1269 | ret = drm_irq_install(dev); | ||
1270 | if (ret) | ||
1271 | goto cleanup_gem; | ||
1272 | |||
1442 | /* Always safe in the mode setting case. */ | 1273 | /* Always safe in the mode setting case. */ |
1443 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1274 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1444 | dev->vblank_disable_allowed = 1; | 1275 | dev->vblank_disable_allowed = 1; |
1445 | 1276 | ||
1446 | /* | ||
1447 | * Initialize the hardware status page IRQ location. | ||
1448 | */ | ||
1449 | |||
1450 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | ||
1451 | |||
1452 | ret = intel_fbdev_init(dev); | 1277 | ret = intel_fbdev_init(dev); |
1453 | if (ret) | 1278 | if (ret) |
1454 | goto cleanup_irq; | 1279 | goto cleanup_irq; |
1455 | 1280 | ||
1456 | drm_kms_helper_poll_init(dev); | 1281 | drm_kms_helper_poll_init(dev); |
1282 | |||
1283 | /* We're off and running w/KMS */ | ||
1284 | dev_priv->mm.suspended = 0; | ||
1285 | |||
1457 | return 0; | 1286 | return 0; |
1458 | 1287 | ||
1459 | cleanup_irq: | 1288 | cleanup_irq: |
1460 | drm_irq_uninstall(dev); | 1289 | drm_irq_uninstall(dev); |
1290 | cleanup_gem: | ||
1291 | mutex_lock(&dev->struct_mutex); | ||
1292 | i915_gem_cleanup_ringbuffer(dev); | ||
1293 | mutex_unlock(&dev->struct_mutex); | ||
1461 | cleanup_vga_switcheroo: | 1294 | cleanup_vga_switcheroo: |
1462 | vga_switcheroo_unregister_client(dev->pdev); | 1295 | vga_switcheroo_unregister_client(dev->pdev); |
1463 | cleanup_vga_client: | 1296 | cleanup_vga_client: |
1464 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 1297 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1465 | cleanup_ringbuffer: | ||
1466 | mutex_lock(&dev->struct_mutex); | ||
1467 | i915_gem_cleanup_ringbuffer(dev); | ||
1468 | mutex_unlock(&dev->struct_mutex); | ||
1469 | out: | 1298 | out: |
1470 | return ret; | 1299 | return ret; |
1471 | } | 1300 | } |
@@ -1601,152 +1430,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) | |||
1601 | } | 1430 | } |
1602 | } | 1431 | } |
1603 | 1432 | ||
1604 | struct v_table { | 1433 | static const struct cparams { |
1605 | u8 vid; | 1434 | u16 i; |
1606 | unsigned long vd; /* in .1 mil */ | 1435 | u16 t; |
1607 | unsigned long vm; /* in .1 mil */ | 1436 | u16 m; |
1608 | u8 pvid; | 1437 | u16 c; |
1609 | }; | 1438 | } cparams[] = { |
1610 | |||
1611 | static struct v_table v_table[] = { | ||
1612 | { 0, 16125, 15000, 0x7f, }, | ||
1613 | { 1, 16000, 14875, 0x7e, }, | ||
1614 | { 2, 15875, 14750, 0x7d, }, | ||
1615 | { 3, 15750, 14625, 0x7c, }, | ||
1616 | { 4, 15625, 14500, 0x7b, }, | ||
1617 | { 5, 15500, 14375, 0x7a, }, | ||
1618 | { 6, 15375, 14250, 0x79, }, | ||
1619 | { 7, 15250, 14125, 0x78, }, | ||
1620 | { 8, 15125, 14000, 0x77, }, | ||
1621 | { 9, 15000, 13875, 0x76, }, | ||
1622 | { 10, 14875, 13750, 0x75, }, | ||
1623 | { 11, 14750, 13625, 0x74, }, | ||
1624 | { 12, 14625, 13500, 0x73, }, | ||
1625 | { 13, 14500, 13375, 0x72, }, | ||
1626 | { 14, 14375, 13250, 0x71, }, | ||
1627 | { 15, 14250, 13125, 0x70, }, | ||
1628 | { 16, 14125, 13000, 0x6f, }, | ||
1629 | { 17, 14000, 12875, 0x6e, }, | ||
1630 | { 18, 13875, 12750, 0x6d, }, | ||
1631 | { 19, 13750, 12625, 0x6c, }, | ||
1632 | { 20, 13625, 12500, 0x6b, }, | ||
1633 | { 21, 13500, 12375, 0x6a, }, | ||
1634 | { 22, 13375, 12250, 0x69, }, | ||
1635 | { 23, 13250, 12125, 0x68, }, | ||
1636 | { 24, 13125, 12000, 0x67, }, | ||
1637 | { 25, 13000, 11875, 0x66, }, | ||
1638 | { 26, 12875, 11750, 0x65, }, | ||
1639 | { 27, 12750, 11625, 0x64, }, | ||
1640 | { 28, 12625, 11500, 0x63, }, | ||
1641 | { 29, 12500, 11375, 0x62, }, | ||
1642 | { 30, 12375, 11250, 0x61, }, | ||
1643 | { 31, 12250, 11125, 0x60, }, | ||
1644 | { 32, 12125, 11000, 0x5f, }, | ||
1645 | { 33, 12000, 10875, 0x5e, }, | ||
1646 | { 34, 11875, 10750, 0x5d, }, | ||
1647 | { 35, 11750, 10625, 0x5c, }, | ||
1648 | { 36, 11625, 10500, 0x5b, }, | ||
1649 | { 37, 11500, 10375, 0x5a, }, | ||
1650 | { 38, 11375, 10250, 0x59, }, | ||
1651 | { 39, 11250, 10125, 0x58, }, | ||
1652 | { 40, 11125, 10000, 0x57, }, | ||
1653 | { 41, 11000, 9875, 0x56, }, | ||
1654 | { 42, 10875, 9750, 0x55, }, | ||
1655 | { 43, 10750, 9625, 0x54, }, | ||
1656 | { 44, 10625, 9500, 0x53, }, | ||
1657 | { 45, 10500, 9375, 0x52, }, | ||
1658 | { 46, 10375, 9250, 0x51, }, | ||
1659 | { 47, 10250, 9125, 0x50, }, | ||
1660 | { 48, 10125, 9000, 0x4f, }, | ||
1661 | { 49, 10000, 8875, 0x4e, }, | ||
1662 | { 50, 9875, 8750, 0x4d, }, | ||
1663 | { 51, 9750, 8625, 0x4c, }, | ||
1664 | { 52, 9625, 8500, 0x4b, }, | ||
1665 | { 53, 9500, 8375, 0x4a, }, | ||
1666 | { 54, 9375, 8250, 0x49, }, | ||
1667 | { 55, 9250, 8125, 0x48, }, | ||
1668 | { 56, 9125, 8000, 0x47, }, | ||
1669 | { 57, 9000, 7875, 0x46, }, | ||
1670 | { 58, 8875, 7750, 0x45, }, | ||
1671 | { 59, 8750, 7625, 0x44, }, | ||
1672 | { 60, 8625, 7500, 0x43, }, | ||
1673 | { 61, 8500, 7375, 0x42, }, | ||
1674 | { 62, 8375, 7250, 0x41, }, | ||
1675 | { 63, 8250, 7125, 0x40, }, | ||
1676 | { 64, 8125, 7000, 0x3f, }, | ||
1677 | { 65, 8000, 6875, 0x3e, }, | ||
1678 | { 66, 7875, 6750, 0x3d, }, | ||
1679 | { 67, 7750, 6625, 0x3c, }, | ||
1680 | { 68, 7625, 6500, 0x3b, }, | ||
1681 | { 69, 7500, 6375, 0x3a, }, | ||
1682 | { 70, 7375, 6250, 0x39, }, | ||
1683 | { 71, 7250, 6125, 0x38, }, | ||
1684 | { 72, 7125, 6000, 0x37, }, | ||
1685 | { 73, 7000, 5875, 0x36, }, | ||
1686 | { 74, 6875, 5750, 0x35, }, | ||
1687 | { 75, 6750, 5625, 0x34, }, | ||
1688 | { 76, 6625, 5500, 0x33, }, | ||
1689 | { 77, 6500, 5375, 0x32, }, | ||
1690 | { 78, 6375, 5250, 0x31, }, | ||
1691 | { 79, 6250, 5125, 0x30, }, | ||
1692 | { 80, 6125, 5000, 0x2f, }, | ||
1693 | { 81, 6000, 4875, 0x2e, }, | ||
1694 | { 82, 5875, 4750, 0x2d, }, | ||
1695 | { 83, 5750, 4625, 0x2c, }, | ||
1696 | { 84, 5625, 4500, 0x2b, }, | ||
1697 | { 85, 5500, 4375, 0x2a, }, | ||
1698 | { 86, 5375, 4250, 0x29, }, | ||
1699 | { 87, 5250, 4125, 0x28, }, | ||
1700 | { 88, 5125, 4000, 0x27, }, | ||
1701 | { 89, 5000, 3875, 0x26, }, | ||
1702 | { 90, 4875, 3750, 0x25, }, | ||
1703 | { 91, 4750, 3625, 0x24, }, | ||
1704 | { 92, 4625, 3500, 0x23, }, | ||
1705 | { 93, 4500, 3375, 0x22, }, | ||
1706 | { 94, 4375, 3250, 0x21, }, | ||
1707 | { 95, 4250, 3125, 0x20, }, | ||
1708 | { 96, 4125, 3000, 0x1f, }, | ||
1709 | { 97, 4125, 3000, 0x1e, }, | ||
1710 | { 98, 4125, 3000, 0x1d, }, | ||
1711 | { 99, 4125, 3000, 0x1c, }, | ||
1712 | { 100, 4125, 3000, 0x1b, }, | ||
1713 | { 101, 4125, 3000, 0x1a, }, | ||
1714 | { 102, 4125, 3000, 0x19, }, | ||
1715 | { 103, 4125, 3000, 0x18, }, | ||
1716 | { 104, 4125, 3000, 0x17, }, | ||
1717 | { 105, 4125, 3000, 0x16, }, | ||
1718 | { 106, 4125, 3000, 0x15, }, | ||
1719 | { 107, 4125, 3000, 0x14, }, | ||
1720 | { 108, 4125, 3000, 0x13, }, | ||
1721 | { 109, 4125, 3000, 0x12, }, | ||
1722 | { 110, 4125, 3000, 0x11, }, | ||
1723 | { 111, 4125, 3000, 0x10, }, | ||
1724 | { 112, 4125, 3000, 0x0f, }, | ||
1725 | { 113, 4125, 3000, 0x0e, }, | ||
1726 | { 114, 4125, 3000, 0x0d, }, | ||
1727 | { 115, 4125, 3000, 0x0c, }, | ||
1728 | { 116, 4125, 3000, 0x0b, }, | ||
1729 | { 117, 4125, 3000, 0x0a, }, | ||
1730 | { 118, 4125, 3000, 0x09, }, | ||
1731 | { 119, 4125, 3000, 0x08, }, | ||
1732 | { 120, 1125, 0, 0x07, }, | ||
1733 | { 121, 1000, 0, 0x06, }, | ||
1734 | { 122, 875, 0, 0x05, }, | ||
1735 | { 123, 750, 0, 0x04, }, | ||
1736 | { 124, 625, 0, 0x03, }, | ||
1737 | { 125, 500, 0, 0x02, }, | ||
1738 | { 126, 375, 0, 0x01, }, | ||
1739 | { 127, 0, 0, 0x00, }, | ||
1740 | }; | ||
1741 | |||
1742 | struct cparams { | ||
1743 | int i; | ||
1744 | int t; | ||
1745 | int m; | ||
1746 | int c; | ||
1747 | }; | ||
1748 | |||
1749 | static struct cparams cparams[] = { | ||
1750 | { 1, 1333, 301, 28664 }, | 1439 | { 1, 1333, 301, 28664 }, |
1751 | { 1, 1066, 294, 24460 }, | 1440 | { 1, 1066, 294, 24460 }, |
1752 | { 1, 800, 294, 25192 }, | 1441 | { 1, 800, 294, 25192 }, |
@@ -1812,21 +1501,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | |||
1812 | return ((m * x) / 127) - b; | 1501 | return ((m * x) / 127) - b; |
1813 | } | 1502 | } |
1814 | 1503 | ||
1815 | static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | 1504 | static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) |
1816 | { | 1505 | { |
1817 | unsigned long val = 0; | 1506 | static const struct v_table { |
1818 | int i; | 1507 | u16 vd; /* in .1 mil */ |
1819 | 1508 | u16 vm; /* in .1 mil */ | |
1820 | for (i = 0; i < ARRAY_SIZE(v_table); i++) { | 1509 | } v_table[] = { |
1821 | if (v_table[i].pvid == pxvid) { | 1510 | { 0, 0, }, |
1822 | if (IS_MOBILE(dev_priv->dev)) | 1511 | { 375, 0, }, |
1823 | val = v_table[i].vm; | 1512 | { 500, 0, }, |
1824 | else | 1513 | { 625, 0, }, |
1825 | val = v_table[i].vd; | 1514 | { 750, 0, }, |
1826 | } | 1515 | { 875, 0, }, |
1827 | } | 1516 | { 1000, 0, }, |
1828 | 1517 | { 1125, 0, }, | |
1829 | return val; | 1518 | { 4125, 3000, }, |
1519 | { 4125, 3000, }, | ||
1520 | { 4125, 3000, }, | ||
1521 | { 4125, 3000, }, | ||
1522 | { 4125, 3000, }, | ||
1523 | { 4125, 3000, }, | ||
1524 | { 4125, 3000, }, | ||
1525 | { 4125, 3000, }, | ||
1526 | { 4125, 3000, }, | ||
1527 | { 4125, 3000, }, | ||
1528 | { 4125, 3000, }, | ||
1529 | { 4125, 3000, }, | ||
1530 | { 4125, 3000, }, | ||
1531 | { 4125, 3000, }, | ||
1532 | { 4125, 3000, }, | ||
1533 | { 4125, 3000, }, | ||
1534 | { 4125, 3000, }, | ||
1535 | { 4125, 3000, }, | ||
1536 | { 4125, 3000, }, | ||
1537 | { 4125, 3000, }, | ||
1538 | { 4125, 3000, }, | ||
1539 | { 4125, 3000, }, | ||
1540 | { 4125, 3000, }, | ||
1541 | { 4125, 3000, }, | ||
1542 | { 4250, 3125, }, | ||
1543 | { 4375, 3250, }, | ||
1544 | { 4500, 3375, }, | ||
1545 | { 4625, 3500, }, | ||
1546 | { 4750, 3625, }, | ||
1547 | { 4875, 3750, }, | ||
1548 | { 5000, 3875, }, | ||
1549 | { 5125, 4000, }, | ||
1550 | { 5250, 4125, }, | ||
1551 | { 5375, 4250, }, | ||
1552 | { 5500, 4375, }, | ||
1553 | { 5625, 4500, }, | ||
1554 | { 5750, 4625, }, | ||
1555 | { 5875, 4750, }, | ||
1556 | { 6000, 4875, }, | ||
1557 | { 6125, 5000, }, | ||
1558 | { 6250, 5125, }, | ||
1559 | { 6375, 5250, }, | ||
1560 | { 6500, 5375, }, | ||
1561 | { 6625, 5500, }, | ||
1562 | { 6750, 5625, }, | ||
1563 | { 6875, 5750, }, | ||
1564 | { 7000, 5875, }, | ||
1565 | { 7125, 6000, }, | ||
1566 | { 7250, 6125, }, | ||
1567 | { 7375, 6250, }, | ||
1568 | { 7500, 6375, }, | ||
1569 | { 7625, 6500, }, | ||
1570 | { 7750, 6625, }, | ||
1571 | { 7875, 6750, }, | ||
1572 | { 8000, 6875, }, | ||
1573 | { 8125, 7000, }, | ||
1574 | { 8250, 7125, }, | ||
1575 | { 8375, 7250, }, | ||
1576 | { 8500, 7375, }, | ||
1577 | { 8625, 7500, }, | ||
1578 | { 8750, 7625, }, | ||
1579 | { 8875, 7750, }, | ||
1580 | { 9000, 7875, }, | ||
1581 | { 9125, 8000, }, | ||
1582 | { 9250, 8125, }, | ||
1583 | { 9375, 8250, }, | ||
1584 | { 9500, 8375, }, | ||
1585 | { 9625, 8500, }, | ||
1586 | { 9750, 8625, }, | ||
1587 | { 9875, 8750, }, | ||
1588 | { 10000, 8875, }, | ||
1589 | { 10125, 9000, }, | ||
1590 | { 10250, 9125, }, | ||
1591 | { 10375, 9250, }, | ||
1592 | { 10500, 9375, }, | ||
1593 | { 10625, 9500, }, | ||
1594 | { 10750, 9625, }, | ||
1595 | { 10875, 9750, }, | ||
1596 | { 11000, 9875, }, | ||
1597 | { 11125, 10000, }, | ||
1598 | { 11250, 10125, }, | ||
1599 | { 11375, 10250, }, | ||
1600 | { 11500, 10375, }, | ||
1601 | { 11625, 10500, }, | ||
1602 | { 11750, 10625, }, | ||
1603 | { 11875, 10750, }, | ||
1604 | { 12000, 10875, }, | ||
1605 | { 12125, 11000, }, | ||
1606 | { 12250, 11125, }, | ||
1607 | { 12375, 11250, }, | ||
1608 | { 12500, 11375, }, | ||
1609 | { 12625, 11500, }, | ||
1610 | { 12750, 11625, }, | ||
1611 | { 12875, 11750, }, | ||
1612 | { 13000, 11875, }, | ||
1613 | { 13125, 12000, }, | ||
1614 | { 13250, 12125, }, | ||
1615 | { 13375, 12250, }, | ||
1616 | { 13500, 12375, }, | ||
1617 | { 13625, 12500, }, | ||
1618 | { 13750, 12625, }, | ||
1619 | { 13875, 12750, }, | ||
1620 | { 14000, 12875, }, | ||
1621 | { 14125, 13000, }, | ||
1622 | { 14250, 13125, }, | ||
1623 | { 14375, 13250, }, | ||
1624 | { 14500, 13375, }, | ||
1625 | { 14625, 13500, }, | ||
1626 | { 14750, 13625, }, | ||
1627 | { 14875, 13750, }, | ||
1628 | { 15000, 13875, }, | ||
1629 | { 15125, 14000, }, | ||
1630 | { 15250, 14125, }, | ||
1631 | { 15375, 14250, }, | ||
1632 | { 15500, 14375, }, | ||
1633 | { 15625, 14500, }, | ||
1634 | { 15750, 14625, }, | ||
1635 | { 15875, 14750, }, | ||
1636 | { 16000, 14875, }, | ||
1637 | { 16125, 15000, }, | ||
1638 | }; | ||
1639 | if (dev_priv->info->is_mobile) | ||
1640 | return v_table[pxvid].vm; | ||
1641 | else | ||
1642 | return v_table[pxvid].vd; | ||
1830 | } | 1643 | } |
1831 | 1644 | ||
1832 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | 1645 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) |
@@ -1907,7 +1720,7 @@ static struct drm_i915_private *i915_mch_dev; | |||
1907 | * - dev_priv->fmax | 1720 | * - dev_priv->fmax |
1908 | * - dev_priv->gpu_busy | 1721 | * - dev_priv->gpu_busy |
1909 | */ | 1722 | */ |
1910 | DEFINE_SPINLOCK(mchdev_lock); | 1723 | static DEFINE_SPINLOCK(mchdev_lock); |
1911 | 1724 | ||
1912 | /** | 1725 | /** |
1913 | * i915_read_mch_val - return value for IPS use | 1726 | * i915_read_mch_val - return value for IPS use |
@@ -2047,6 +1860,26 @@ out_unlock: | |||
2047 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | 1860 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); |
2048 | 1861 | ||
2049 | /** | 1862 | /** |
1863 | * Tells the intel_ips driver that the i915 driver is now loaded, if | ||
1864 | * IPS got loaded first. | ||
1865 | * | ||
1866 | * This awkward dance is so that neither module has to depend on the | ||
1867 | * other in order for IPS to do the appropriate communication of | ||
1868 | * GPU turbo limits to i915. | ||
1869 | */ | ||
1870 | static void | ||
1871 | ips_ping_for_i915_load(void) | ||
1872 | { | ||
1873 | void (*link)(void); | ||
1874 | |||
1875 | link = symbol_get(ips_link_to_i915_driver); | ||
1876 | if (link) { | ||
1877 | link(); | ||
1878 | symbol_put(ips_link_to_i915_driver); | ||
1879 | } | ||
1880 | } | ||
1881 | |||
1882 | /** | ||
2050 | * i915_driver_load - setup chip and create an initial config | 1883 | * i915_driver_load - setup chip and create an initial config |
2051 | * @dev: DRM device | 1884 | * @dev: DRM device |
2052 | * @flags: startup flags | 1885 | * @flags: startup flags |
@@ -2060,9 +1893,9 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | |||
2060 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 1893 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
2061 | { | 1894 | { |
2062 | struct drm_i915_private *dev_priv; | 1895 | struct drm_i915_private *dev_priv; |
2063 | resource_size_t base, size; | ||
2064 | int ret = 0, mmio_bar; | 1896 | int ret = 0, mmio_bar; |
2065 | uint32_t agp_size, prealloc_size, prealloc_start; | 1897 | uint32_t agp_size; |
1898 | |||
2066 | /* i915 has 4 more counters */ | 1899 | /* i915 has 4 more counters */ |
2067 | dev->counters += 4; | 1900 | dev->counters += 4; |
2068 | dev->types[6] = _DRM_STAT_IRQ; | 1901 | dev->types[6] = _DRM_STAT_IRQ; |
@@ -2078,11 +1911,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2078 | dev_priv->dev = dev; | 1911 | dev_priv->dev = dev; |
2079 | dev_priv->info = (struct intel_device_info *) flags; | 1912 | dev_priv->info = (struct intel_device_info *) flags; |
2080 | 1913 | ||
2081 | /* Add register map (needed for suspend/resume) */ | ||
2082 | mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
2083 | base = pci_resource_start(dev->pdev, mmio_bar); | ||
2084 | size = pci_resource_len(dev->pdev, mmio_bar); | ||
2085 | |||
2086 | if (i915_get_bridge_dev(dev)) { | 1914 | if (i915_get_bridge_dev(dev)) { |
2087 | ret = -EIO; | 1915 | ret = -EIO; |
2088 | goto free_priv; | 1916 | goto free_priv; |
@@ -2092,16 +1920,36 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2092 | if (IS_GEN2(dev)) | 1920 | if (IS_GEN2(dev)) |
2093 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1921 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
2094 | 1922 | ||
2095 | dev_priv->regs = ioremap(base, size); | 1923 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1924 | * using 32bit addressing, overwriting memory if HWS is located | ||
1925 | * above 4GB. | ||
1926 | * | ||
1927 | * The documentation also mentions an issue with undefined | ||
1928 | * behaviour if any general state is accessed within a page above 4GB, | ||
1929 | * which also needs to be handled carefully. | ||
1930 | */ | ||
1931 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
1932 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
1933 | |||
1934 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | ||
1935 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | ||
2096 | if (!dev_priv->regs) { | 1936 | if (!dev_priv->regs) { |
2097 | DRM_ERROR("failed to map registers\n"); | 1937 | DRM_ERROR("failed to map registers\n"); |
2098 | ret = -EIO; | 1938 | ret = -EIO; |
2099 | goto put_bridge; | 1939 | goto put_bridge; |
2100 | } | 1940 | } |
2101 | 1941 | ||
1942 | dev_priv->mm.gtt = intel_gtt_get(); | ||
1943 | if (!dev_priv->mm.gtt) { | ||
1944 | DRM_ERROR("Failed to initialize GTT\n"); | ||
1945 | ret = -ENODEV; | ||
1946 | goto out_rmmap; | ||
1947 | } | ||
1948 | |||
1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||
1950 | |||
2102 | dev_priv->mm.gtt_mapping = | 1951 | dev_priv->mm.gtt_mapping = |
2103 | io_mapping_create_wc(dev->agp->base, | 1952 | io_mapping_create_wc(dev->agp->base, agp_size); |
2104 | dev->agp->agp_info.aper_size * 1024*1024); | ||
2105 | if (dev_priv->mm.gtt_mapping == NULL) { | 1953 | if (dev_priv->mm.gtt_mapping == NULL) { |
2106 | ret = -EIO; | 1954 | ret = -EIO; |
2107 | goto out_rmmap; | 1955 | goto out_rmmap; |
@@ -2113,72 +1961,60 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2113 | * MTRR if present. Even if a UC MTRR isn't present. | 1961 | * MTRR if present. Even if a UC MTRR isn't present. |
2114 | */ | 1962 | */ |
2115 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | 1963 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, |
2116 | dev->agp->agp_info.aper_size * | 1964 | agp_size, |
2117 | 1024 * 1024, | ||
2118 | MTRR_TYPE_WRCOMB, 1); | 1965 | MTRR_TYPE_WRCOMB, 1); |
2119 | if (dev_priv->mm.gtt_mtrr < 0) { | 1966 | if (dev_priv->mm.gtt_mtrr < 0) { |
2120 | DRM_INFO("MTRR allocation failed. Graphics " | 1967 | DRM_INFO("MTRR allocation failed. Graphics " |
2121 | "performance may suffer.\n"); | 1968 | "performance may suffer.\n"); |
2122 | } | 1969 | } |
2123 | 1970 | ||
2124 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); | 1971 | /* The i915 workqueue is primarily used for batched retirement of |
2125 | if (ret) | 1972 | * requests (and thus managing bo) once the task has been completed |
2126 | goto out_iomapfree; | 1973 | * by the GPU. i915_gem_retire_requests() is called directly when we |
2127 | 1974 | * need high-priority retirement, such as waiting for an explicit | |
2128 | if (prealloc_size > intel_max_stolen) { | 1975 | * bo. |
2129 | DRM_INFO("detected %dM stolen memory, trimming to %dM\n", | 1976 | * |
2130 | prealloc_size >> 20, intel_max_stolen >> 20); | 1977 | * It is also used for periodic low-priority events, such as |
2131 | prealloc_size = intel_max_stolen; | 1978 | * idle-timers and recording error state. |
2132 | } | 1979 | * |
2133 | 1980 | * All tasks on the workqueue are expected to acquire the dev mutex | |
2134 | dev_priv->wq = create_singlethread_workqueue("i915"); | 1981 | * so there is no point in running more than one instance of the |
1982 | * workqueue at any time: max_active = 1 and NON_REENTRANT. | ||
1983 | */ | ||
1984 | dev_priv->wq = alloc_workqueue("i915", | ||
1985 | WQ_UNBOUND | WQ_NON_REENTRANT, | ||
1986 | 1); | ||
2135 | if (dev_priv->wq == NULL) { | 1987 | if (dev_priv->wq == NULL) { |
2136 | DRM_ERROR("Failed to create our workqueue.\n"); | 1988 | DRM_ERROR("Failed to create our workqueue.\n"); |
2137 | ret = -ENOMEM; | 1989 | ret = -ENOMEM; |
2138 | goto out_iomapfree; | 1990 | goto out_mtrrfree; |
2139 | } | 1991 | } |
2140 | 1992 | ||
2141 | /* enable GEM by default */ | 1993 | /* enable GEM by default */ |
2142 | dev_priv->has_gem = 1; | 1994 | dev_priv->has_gem = 1; |
2143 | 1995 | ||
2144 | if (prealloc_size > agp_size * 3 / 4) { | 1996 | intel_irq_init(dev); |
2145 | DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " | ||
2146 | "memory stolen.\n", | ||
2147 | prealloc_size / 1024, agp_size / 1024); | ||
2148 | DRM_ERROR("Disabling GEM. (try reducing stolen memory or " | ||
2149 | "updating the BIOS to fix).\n"); | ||
2150 | dev_priv->has_gem = 0; | ||
2151 | } | ||
2152 | |||
2153 | if (dev_priv->has_gem == 0 && | ||
2154 | drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
2155 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | ||
2156 | ret = -ENODEV; | ||
2157 | goto out_iomapfree; | ||
2158 | } | ||
2159 | |||
2160 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
2161 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||
2162 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | ||
2163 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | ||
2164 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | ||
2165 | } | ||
2166 | 1997 | ||
2167 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1998 | /* Try to make sure MCHBAR is enabled before poking at it */ |
2168 | intel_setup_mchbar(dev); | 1999 | intel_setup_mchbar(dev); |
2000 | intel_setup_gmbus(dev); | ||
2001 | intel_opregion_setup(dev); | ||
2002 | |||
2003 | /* Make sure the bios did its job and set up vital registers */ | ||
2004 | intel_setup_bios(dev); | ||
2169 | 2005 | ||
2170 | i915_gem_load(dev); | 2006 | i915_gem_load(dev); |
2171 | 2007 | ||
2172 | /* Init HWS */ | 2008 | /* Init HWS */ |
2173 | if (!I915_NEED_GFX_HWS(dev)) { | 2009 | if (!I915_NEED_GFX_HWS(dev)) { |
2174 | ret = i915_init_phys_hws(dev); | 2010 | ret = i915_init_phys_hws(dev); |
2175 | if (ret != 0) | 2011 | if (ret) |
2176 | goto out_workqueue_free; | 2012 | goto out_gem_unload; |
2177 | } | 2013 | } |
2178 | 2014 | ||
2179 | if (IS_PINEVIEW(dev)) | 2015 | if (IS_PINEVIEW(dev)) |
2180 | i915_pineview_get_mem_freq(dev); | 2016 | i915_pineview_get_mem_freq(dev); |
2181 | else if (IS_IRONLAKE(dev)) | 2017 | else if (IS_GEN5(dev)) |
2182 | i915_ironlake_get_mem_freq(dev); | 2018 | i915_ironlake_get_mem_freq(dev); |
2183 | 2019 | ||
2184 | /* On the 945G/GM, the chipset reports the MSI capability on the | 2020 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -2195,16 +2031,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2195 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 2031 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
2196 | pci_enable_msi(dev->pdev); | 2032 | pci_enable_msi(dev->pdev); |
2197 | 2033 | ||
2198 | spin_lock_init(&dev_priv->user_irq_lock); | 2034 | spin_lock_init(&dev_priv->irq_lock); |
2199 | spin_lock_init(&dev_priv->error_lock); | 2035 | spin_lock_init(&dev_priv->error_lock); |
2200 | dev_priv->trace_irq_seqno = 0; | 2036 | spin_lock_init(&dev_priv->rps_lock); |
2201 | 2037 | ||
2202 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2038 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
2039 | dev_priv->num_pipe = 2; | ||
2040 | else | ||
2041 | dev_priv->num_pipe = 1; | ||
2203 | 2042 | ||
2204 | if (ret) { | 2043 | ret = drm_vblank_init(dev, dev_priv->num_pipe); |
2205 | (void) i915_driver_unload(dev); | 2044 | if (ret) |
2206 | return ret; | 2045 | goto out_gem_unload; |
2207 | } | ||
2208 | 2046 | ||
2209 | /* Start out suspended */ | 2047 | /* Start out suspended */ |
2210 | dev_priv->mm.suspended = 1; | 2048 | dev_priv->mm.suspended = 1; |
@@ -2212,16 +2050,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2212 | intel_detect_pch(dev); | 2050 | intel_detect_pch(dev); |
2213 | 2051 | ||
2214 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2052 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2215 | ret = i915_load_modeset_init(dev, prealloc_start, | 2053 | ret = i915_load_modeset_init(dev); |
2216 | prealloc_size, agp_size); | ||
2217 | if (ret < 0) { | 2054 | if (ret < 0) { |
2218 | DRM_ERROR("failed to init modeset\n"); | 2055 | DRM_ERROR("failed to init modeset\n"); |
2219 | goto out_workqueue_free; | 2056 | goto out_gem_unload; |
2220 | } | 2057 | } |
2221 | } | 2058 | } |
2222 | 2059 | ||
2223 | /* Must be done after probing outputs */ | 2060 | /* Must be done after probing outputs */ |
2224 | intel_opregion_init(dev, 0); | 2061 | intel_opregion_init(dev); |
2062 | acpi_video_register(); | ||
2225 | 2063 | ||
2226 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 2064 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
2227 | (unsigned long) dev); | 2065 | (unsigned long) dev); |
@@ -2231,17 +2069,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2231 | dev_priv->mchdev_lock = &mchdev_lock; | 2069 | dev_priv->mchdev_lock = &mchdev_lock; |
2232 | spin_unlock(&mchdev_lock); | 2070 | spin_unlock(&mchdev_lock); |
2233 | 2071 | ||
2234 | /* XXX Prevent module unload due to memory corruption bugs. */ | 2072 | ips_ping_for_i915_load(); |
2235 | __module_get(THIS_MODULE); | ||
2236 | 2073 | ||
2237 | return 0; | 2074 | return 0; |
2238 | 2075 | ||
2239 | out_workqueue_free: | 2076 | out_gem_unload: |
2077 | if (dev_priv->mm.inactive_shrinker.shrink) | ||
2078 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | ||
2079 | |||
2080 | if (dev->pdev->msi_enabled) | ||
2081 | pci_disable_msi(dev->pdev); | ||
2082 | |||
2083 | intel_teardown_gmbus(dev); | ||
2084 | intel_teardown_mchbar(dev); | ||
2240 | destroy_workqueue(dev_priv->wq); | 2085 | destroy_workqueue(dev_priv->wq); |
2241 | out_iomapfree: | 2086 | out_mtrrfree: |
2087 | if (dev_priv->mm.gtt_mtrr >= 0) { | ||
2088 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||
2089 | dev->agp->agp_info.aper_size * 1024 * 1024); | ||
2090 | dev_priv->mm.gtt_mtrr = -1; | ||
2091 | } | ||
2242 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2092 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2243 | out_rmmap: | 2093 | out_rmmap: |
2244 | iounmap(dev_priv->regs); | 2094 | pci_iounmap(dev->pdev, dev_priv->regs); |
2245 | put_bridge: | 2095 | put_bridge: |
2246 | pci_dev_put(dev_priv->bridge_dev); | 2096 | pci_dev_put(dev_priv->bridge_dev); |
2247 | free_priv: | 2097 | free_priv: |
@@ -2252,15 +2102,23 @@ free_priv: | |||
2252 | int i915_driver_unload(struct drm_device *dev) | 2102 | int i915_driver_unload(struct drm_device *dev) |
2253 | { | 2103 | { |
2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2104 | struct drm_i915_private *dev_priv = dev->dev_private; |
2255 | 2105 | int ret; | |
2256 | i915_destroy_error_state(dev); | ||
2257 | 2106 | ||
2258 | spin_lock(&mchdev_lock); | 2107 | spin_lock(&mchdev_lock); |
2259 | i915_mch_dev = NULL; | 2108 | i915_mch_dev = NULL; |
2260 | spin_unlock(&mchdev_lock); | 2109 | spin_unlock(&mchdev_lock); |
2261 | 2110 | ||
2262 | destroy_workqueue(dev_priv->wq); | 2111 | if (dev_priv->mm.inactive_shrinker.shrink) |
2263 | del_timer_sync(&dev_priv->hangcheck_timer); | 2112 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
2113 | |||
2114 | mutex_lock(&dev->struct_mutex); | ||
2115 | ret = i915_gpu_idle(dev); | ||
2116 | if (ret) | ||
2117 | DRM_ERROR("failed to idle hardware: %d\n", ret); | ||
2118 | mutex_unlock(&dev->struct_mutex); | ||
2119 | |||
2120 | /* Cancel the retire work handler, which should be idle now. */ | ||
2121 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | ||
2264 | 2122 | ||
2265 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2123 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2266 | if (dev_priv->mm.gtt_mtrr >= 0) { | 2124 | if (dev_priv->mm.gtt_mtrr >= 0) { |
@@ -2269,7 +2127,10 @@ int i915_driver_unload(struct drm_device *dev) | |||
2269 | dev_priv->mm.gtt_mtrr = -1; | 2127 | dev_priv->mm.gtt_mtrr = -1; |
2270 | } | 2128 | } |
2271 | 2129 | ||
2130 | acpi_video_unregister(); | ||
2131 | |||
2272 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2132 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2133 | intel_fbdev_fini(dev); | ||
2273 | intel_modeset_cleanup(dev); | 2134 | intel_modeset_cleanup(dev); |
2274 | 2135 | ||
2275 | /* | 2136 | /* |
@@ -2281,55 +2142,66 @@ int i915_driver_unload(struct drm_device *dev) | |||
2281 | dev_priv->child_dev = NULL; | 2142 | dev_priv->child_dev = NULL; |
2282 | dev_priv->child_dev_num = 0; | 2143 | dev_priv->child_dev_num = 0; |
2283 | } | 2144 | } |
2284 | drm_irq_uninstall(dev); | 2145 | |
2285 | vga_switcheroo_unregister_client(dev->pdev); | 2146 | vga_switcheroo_unregister_client(dev->pdev); |
2286 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 2147 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
2287 | } | 2148 | } |
2288 | 2149 | ||
2150 | /* Free error state after interrupts are fully disabled. */ | ||
2151 | del_timer_sync(&dev_priv->hangcheck_timer); | ||
2152 | cancel_work_sync(&dev_priv->error_work); | ||
2153 | i915_destroy_error_state(dev); | ||
2154 | |||
2289 | if (dev->pdev->msi_enabled) | 2155 | if (dev->pdev->msi_enabled) |
2290 | pci_disable_msi(dev->pdev); | 2156 | pci_disable_msi(dev->pdev); |
2291 | 2157 | ||
2292 | if (dev_priv->regs != NULL) | 2158 | intel_opregion_fini(dev); |
2293 | iounmap(dev_priv->regs); | ||
2294 | |||
2295 | intel_opregion_free(dev, 0); | ||
2296 | 2159 | ||
2297 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2160 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2298 | i915_gem_free_all_phys_object(dev); | 2161 | /* Flush any outstanding unpin_work. */ |
2162 | flush_workqueue(dev_priv->wq); | ||
2299 | 2163 | ||
2300 | mutex_lock(&dev->struct_mutex); | 2164 | mutex_lock(&dev->struct_mutex); |
2165 | i915_gem_free_all_phys_object(dev); | ||
2301 | i915_gem_cleanup_ringbuffer(dev); | 2166 | i915_gem_cleanup_ringbuffer(dev); |
2302 | mutex_unlock(&dev->struct_mutex); | 2167 | mutex_unlock(&dev->struct_mutex); |
2303 | if (I915_HAS_FBC(dev) && i915_powersave) | 2168 | if (I915_HAS_FBC(dev) && i915_powersave) |
2304 | i915_cleanup_compression(dev); | 2169 | i915_cleanup_compression(dev); |
2305 | drm_mm_takedown(&dev_priv->vram); | 2170 | drm_mm_takedown(&dev_priv->mm.stolen); |
2306 | i915_gem_lastclose(dev); | ||
2307 | 2171 | ||
2308 | intel_cleanup_overlay(dev); | 2172 | intel_cleanup_overlay(dev); |
2173 | |||
2174 | if (!I915_NEED_GFX_HWS(dev)) | ||
2175 | i915_free_hws(dev); | ||
2309 | } | 2176 | } |
2310 | 2177 | ||
2178 | if (dev_priv->regs != NULL) | ||
2179 | pci_iounmap(dev->pdev, dev_priv->regs); | ||
2180 | |||
2181 | intel_teardown_gmbus(dev); | ||
2311 | intel_teardown_mchbar(dev); | 2182 | intel_teardown_mchbar(dev); |
2312 | 2183 | ||
2184 | destroy_workqueue(dev_priv->wq); | ||
2185 | |||
2313 | pci_dev_put(dev_priv->bridge_dev); | 2186 | pci_dev_put(dev_priv->bridge_dev); |
2314 | kfree(dev->dev_private); | 2187 | kfree(dev->dev_private); |
2315 | 2188 | ||
2316 | return 0; | 2189 | return 0; |
2317 | } | 2190 | } |
2318 | 2191 | ||
2319 | int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) | 2192 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2320 | { | 2193 | { |
2321 | struct drm_i915_file_private *i915_file_priv; | 2194 | struct drm_i915_file_private *file_priv; |
2322 | 2195 | ||
2323 | DRM_DEBUG_DRIVER("\n"); | 2196 | DRM_DEBUG_DRIVER("\n"); |
2324 | i915_file_priv = (struct drm_i915_file_private *) | 2197 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); |
2325 | kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); | 2198 | if (!file_priv) |
2326 | |||
2327 | if (!i915_file_priv) | ||
2328 | return -ENOMEM; | 2199 | return -ENOMEM; |
2329 | 2200 | ||
2330 | file_priv->driver_priv = i915_file_priv; | 2201 | file->driver_priv = file_priv; |
2331 | 2202 | ||
2332 | INIT_LIST_HEAD(&i915_file_priv->mm.request_list); | 2203 | spin_lock_init(&file_priv->mm.lock); |
2204 | INIT_LIST_HEAD(&file_priv->mm.request_list); | ||
2333 | 2205 | ||
2334 | return 0; | 2206 | return 0; |
2335 | } | 2207 | } |
@@ -2351,7 +2223,7 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
2351 | drm_i915_private_t *dev_priv = dev->dev_private; | 2223 | drm_i915_private_t *dev_priv = dev->dev_private; |
2352 | 2224 | ||
2353 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { | 2225 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { |
2354 | drm_fb_helper_restore(); | 2226 | intel_fb_restore_mode(dev); |
2355 | vga_switcheroo_process_delayed_switch(); | 2227 | vga_switcheroo_process_delayed_switch(); |
2356 | return; | 2228 | return; |
2357 | } | 2229 | } |
@@ -2372,11 +2244,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | |||
2372 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | 2244 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); |
2373 | } | 2245 | } |
2374 | 2246 | ||
2375 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | 2247 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
2376 | { | 2248 | { |
2377 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 2249 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2378 | 2250 | ||
2379 | kfree(i915_file_priv); | 2251 | kfree(file_priv); |
2380 | } | 2252 | } |
2381 | 2253 | ||
2382 | struct drm_ioctl_desc i915_ioctls[] = { | 2254 | struct drm_ioctl_desc i915_ioctls[] = { |