aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_edid.c47
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/rv770.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/vga/Kconfig8
-rw-r--r--drivers/gpu/vga/vgaarb.c29
32 files changed, 466 insertions, 288 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f665b05592f3..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
598 return mode; 598 return mode;
599} 599}
600 600
601/*
602 * EDID is delightfully ambiguous about how interlaced modes are to be
603 * encoded. Our internal representation is of frame height, but some
604 * HDTV detailed timings are encoded as field height.
605 *
606 * The format list here is from CEA, in frame size. Technically we
607 * should be checking refresh rate too. Whatever.
608 */
609static void
610drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
611 struct detailed_pixel_timing *pt)
612{
613 int i;
614 static const struct {
615 int w, h;
616 } cea_interlaced[] = {
617 { 1920, 1080 },
618 { 720, 480 },
619 { 1440, 480 },
620 { 2880, 480 },
621 { 720, 576 },
622 { 1440, 576 },
623 { 2880, 576 },
624 };
625 static const int n_sizes =
626 sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
627
628 if (!(pt->misc & DRM_EDID_PT_INTERLACED))
629 return;
630
631 for (i = 0; i < n_sizes; i++) {
632 if ((mode->hdisplay == cea_interlaced[i].w) &&
633 (mode->vdisplay == cea_interlaced[i].h / 2)) {
634 mode->vdisplay *= 2;
635 mode->vsync_start *= 2;
636 mode->vsync_end *= 2;
637 mode->vtotal *= 2;
638 mode->vtotal |= 1;
639 }
640 }
641
642 mode->flags |= DRM_MODE_FLAG_INTERLACE;
643}
644
601/** 645/**
602 * drm_mode_detailed - create a new mode from an EDID detailed timing section 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section
603 * @dev: DRM device (needed to create new mode) 647 * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
680 724
681 drm_mode_set_name(mode); 725 drm_mode_set_name(mode);
682 726
683 if (pt->misc & DRM_EDID_PT_INTERLACED) 727 drm_mode_do_interlace_quirk(mode, pt);
684 mode->flags |= DRM_MODE_FLAG_INTERLACE;
685 728
686 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 729 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
687 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 730 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 79beffcf5936..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
176 176
177static int i915_drm_freeze(struct drm_device *dev) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
179 pci_save_state(dev->pdev); 181 pci_save_state(dev->pdev);
180 182
181 /* If KMS is active, we do the leavevt stuff here */ 183 /* If KMS is active, we do the leavevt stuff here */
@@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev)
191 193
192 i915_save_state(dev); 194 i915_save_state(dev);
193 195
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
201 intel_opregion_free(dev, 1); 196 intel_opregion_free(dev, 1);
202 197
203 /* Modeset on resume, not lid events */ 198 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0; 199 dev_priv->modeset_on_lid = 0;
200
201 return 0;
205} 202}
206 203
207static int i915_suspend(struct drm_device *dev, pm_message_t state) 204static int i915_suspend(struct drm_device *dev, pm_message_t state)
@@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
221 if (error) 218 if (error)
222 return error; 219 return error;
223 220
224 i915_drm_suspend(dev);
225
226 if (state.event == PM_EVENT_SUSPEND) { 221 if (state.event == PM_EVENT_SUSPEND) {
227 /* Shut down the device */ 222 /* Shut down the device */
228 pci_disable_device(dev->pdev); 223 pci_disable_device(dev->pdev);
@@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev)
237 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
238 int error = 0; 233 int error = 0;
239 234
235 i915_restore_state(dev);
236
237 intel_opregion_init(dev, 1);
238
240 /* KMS EnterVT equivalent */ 239 /* KMS EnterVT equivalent */
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 240 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 mutex_lock(&dev->struct_mutex); 241 mutex_lock(&dev->struct_mutex);
@@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev)
263 262
264 pci_set_master(dev->pdev); 263 pci_set_master(dev->pdev);
265 264
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev); 265 return i915_drm_thaw(dev);
271} 266}
272 267
@@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev)
423 if (error) 418 if (error)
424 return error; 419 return error;
425 420
426 i915_drm_suspend(drm_dev);
427
428 pci_disable_device(pdev); 421 pci_disable_device(pdev);
429 pci_set_power_state(pdev, PCI_D3hot); 422 pci_set_power_state(pdev, PCI_D3hot);
430 423
@@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev)
464{ 457{
465 struct pci_dev *pdev = to_pci_dev(dev); 458 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev); 459 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472 460
473 return error; 461 return i915_drm_freeze(drm_dev);
474} 462}
475 463
476const struct dev_pm_ops i915_pm_ops = { 464const struct dev_pm_ops i915_pm_ops = {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b1d0acbae4e4..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
637 }, 637 },
638 }, 638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
639 { } 646 { }
640}; 647};
641 648
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2cd0fad17dac..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5861 struct drm_nouveau_private *dev_priv = dev->dev_private; 5861 struct drm_nouveau_private *dev_priv = dev->dev_private;
5862 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5863 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5864 unsigned long flags;
5865 5864
5866 spin_lock_irqsave(&bios->lock, flags); 5865 mutex_lock(&bios->lock);
5867 bios->display.output = dcbent; 5866 bios->display.output = dcbent;
5868 parse_init_table(bios, table, &iexec); 5867 parse_init_table(bios, table, &iexec);
5869 bios->display.output = NULL; 5868 bios->display.output = NULL;
5870 spin_unlock_irqrestore(&bios->lock, flags); 5869 mutex_unlock(&bios->lock);
5871} 5870}
5872 5871
5873static bool NVInitVBIOS(struct drm_device *dev) 5872static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5876 struct nvbios *bios = &dev_priv->VBIOS; 5875 struct nvbios *bios = &dev_priv->VBIOS;
5877 5876
5878 memset(bios, 0, sizeof(struct nvbios)); 5877 memset(bios, 0, sizeof(struct nvbios));
5879 spin_lock_init(&bios->lock); 5878 mutex_init(&bios->lock);
5880 bios->dev = dev; 5879 bios->dev = dev;
5881 5880
5882 if (!NVShadowVBIOS(dev, bios->data)) 5881 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 68446fd4146b..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,7 +205,7 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 spinlock_t lock; 208 struct mutex lock;
209 209
210 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
211 unsigned int length; 211 unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5445cefdd03e..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
583 uint64_t vm_end; 583 uint64_t vm_end;
584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
585 int vm_vram_pt_nr; 585 int vm_vram_pt_nr;
586 uint64_t vram_sys_base;
586 587
587 /* the mtrr covering the FB */ 588 /* the mtrr covering the FB */
588 int fb_mtrr; 589 int fb_mtrr;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys) 285 uint32_t flags, uint64_t phys)
286{ 286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt; 288 struct nouveau_gpuobj *pgt;
289 unsigned psz, pfl, pages; 289 unsigned block;
290 290 int i;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
293 psz = 12;
294 pgt = &dev_priv->gart_info.sg_ctxdma;
295 pfl = 0x21;
296 virt -= dev_priv->vm_gart_base;
297 } else
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
300 psz = 16;
301 pgt = dev_priv->vm_vram_pt;
302 pfl = 0x01;
303 virt -= dev_priv->vm_vram_base;
304 } else {
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
307 return -EINVAL;
308 }
309 291
310 pages = size >> psz; 292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
311 301
312 dev_priv->engine.instmem.prepare_access(dev, true); 302 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) { 303 while (size) {
314 while (pages--) { 304 unsigned offset_h = upper_32_bits(phys);
315 struct nouveau_gpuobj *pt = pgt[virt >> 29]; 305 unsigned offset_l = lower_32_bits(phys);
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
317 314
318 nv_wo32(dev, pt, pte++, 0x00000000); 315 phys += block << 15;
319 nv_wo32(dev, pt, pte++, 0x00000000); 316 size -= block;
320 317
321 virt += (1 << psz); 318 while (block) {
322 } 319 pgt = dev_priv->vm_vram_pt[virt >> 14];
323 } else { 320 pte = virt & 0x3ffe;
324 while (pages--) {
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
329 321
330 nv_wo32(dev, pt, pte++, offset_l | pfl); 322 end = pte + block;
331 nv_wo32(dev, pt, pte++, offset_h | flags); 323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
332 327
333 phys += (1 << psz); 328 while (pte < end) {
334 virt += (1 << psz); 329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
335 } 332 }
336 } 333 }
337 dev_priv->engine.instmem.finish_access(dev); 334 dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
356void 353void
357nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 354nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
358{ 355{
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_gpuobj *pgt;
358 unsigned pages, pte, end;
359
360 virt -= dev_priv->vm_vram_base;
361 pages = (size >> 16) << 1;
362
363 dev_priv->engine.instmem.prepare_access(dev, true);
364 while (pages) {
365 pgt = dev_priv->vm_vram_pt[virt >> 29];
366 pte = (virt & 0x1ffe0000ULL) >> 15;
367
368 end = pte + pages;
369 if (end > 16384)
370 end = 16384;
371 pages -= (end - pte);
372 virt += (end - pte) << 15;
373
374 while (pte < end)
375 nv_wo32(dev, pgt, pte++, 0);
376 }
377 dev_priv->engine.instmem.finish_access(dev);
378
379 nv_wr32(dev, 0x100c80, 0x00050001);
380 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
381 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
382 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
383 return;
384 }
385
386 nv_wr32(dev, 0x100c80, 0x00000001);
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 }
360} 391}
361 392
362/* 393/*
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector) 119 struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1; 122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask; 123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i; 125 int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
135 /* only implemented for head A for now */ 135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0); 136 NVSetOwner(dev, 0);
137 137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140 143
@@ -203,6 +206,7 @@ out:
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
206 210
207 if (blue == 0x18) { 211 if (blue == 0x18) {
208 NV_INFO(dev, "Load detected on head A\n"); 212 NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 58b917c3341b..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
579 nouveau_encoder(encoder)->restore.output); 579 nouveau_encoder(encoder)->restore.output);
580 580
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
582} 584}
583 585
584static int nv17_tv_create_resources(struct drm_encoder *encoder, 586static int nv17_tv_create_resources(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 78
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
79 /* Reserve the last MiB of VRAM, we should probably try to avoid 84 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at 85 * setting up the below tables over the top of the VBIOS image at
81 * some point. 86 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
172 * We map the entire fake channel into the start of the PRAMIN BAR 177 * We map the entire fake channel into the start of the PRAMIN BAR
173 */ 178 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 179 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt); 180 0, &priv->pramin_pt);
176 if (ret) 181 if (ret)
177 return ret; 182 return ret;
178 183
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 184 v = c_offset | 1;
180 if (v < (c_offset + c_size)) 185 if (dev_priv->vram_sys_base) {
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 186 v += dev_priv->vram_sys_base;
182 else 187 v |= 0x30;
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 188 }
189
190 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
194 v += 0x1000;
195 i += 8;
196 }
197
198 while (i < pt_size) {
199 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 200 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
201 i += 8;
185 } 202 }
186 203
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 204 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{ 433{
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 435 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram; 436 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
437 uint32_t pte, pte_end;
438 uint64_t vram;
420 439
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 440 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL; 441 return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 443 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 444 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426 445
427 pte = (gpuobj->im_pramin->start >> 12) << 3; 446 pte = (gpuobj->im_pramin->start >> 12) << 1;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 447 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
429 vram = gpuobj->im_backing_start; 448 vram = gpuobj->im_backing_start;
430 449
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 450 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end); 451 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 452 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434 453
454 vram |= 1;
455 if (dev_priv->vram_sys_base) {
456 vram += dev_priv->vram_sys_base;
457 vram |= 0x30;
458 }
459
435 dev_priv->engine.instmem.prepare_access(dev, true); 460 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) { 461 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 462 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 463 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE; 464 vram += NV50_INSTMEM_PAGE_SIZE;
442 } 465 }
443 dev_priv->engine.instmem.finish_access(dev); 466 dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
470 if (gpuobj->im_bound == 0) 493 if (gpuobj->im_bound == 0)
471 return -EINVAL; 494 return -EINVAL;
472 495
473 pte = (gpuobj->im_pramin->start >> 12) << 3; 496 pte = (gpuobj->im_pramin->start >> 12) << 1;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 497 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
475 498
476 dev_priv->engine.instmem.prepare_access(dev, true); 499 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) { 500 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 501 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 502 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
480 pte += 8;
481 } 503 }
482 dev_priv->engine.instmem.finish_access(dev); 504 dev_priv->engine.instmem.finish_access(dev);
483 505
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e3b44562d265..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
640 uint8_t count = U8((*ptr)++); 643 uint8_t count = U8((*ptr)++);
641 SDEBUG(" count: %d\n", count); 644 SDEBUG(" count: %d\n", count);
642 if (arg == ATOM_UNIT_MICROSEC) 645 if (arg == ATOM_UNIT_MICROSEC)
643 schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 udelay(count);
644 else 647 else
645 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
646} 649}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b32eeea5bb8b..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -350,7 +350,7 @@ retry:
350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
351 351
352 if (args.ucReplyStatus && !args.ucDataOutLen) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
353 if (args.ucReplyStatus == 0x20 && retry_count < 10) 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry; 354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index af1c3ca8a4cb..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
543void r600_vb_ib_put(struct radeon_device *rdev) 543void r600_vb_ib_put(struct radeon_device *rdev)
544{ 544{
545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
546 mutex_lock(&rdev->ib_pool.mutex);
547 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
548 mutex_unlock(&rdev->ib_pool.mutex);
549 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
550} 547}
551 548
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e91..75bcf35a0931 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1428
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1429 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1430
1431 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1432 dev_priv->r600_max_backends, 1432 backend_map = 0x28;
1433 (0xff << dev_priv->r600_max_backends) & 0xff); 1433 else
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1434 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1435 1438
1436 cc_gc_shader_pipe_config = 1439 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f57480ba1355..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
96 * symbol; 96 * symbol;
97 */ 97 */
98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
99/* RADEON_IB_POOL_SIZE must be a power of 2 */
99#define RADEON_IB_POOL_SIZE 16 100#define RADEON_IB_POOL_SIZE 16
100#define RADEON_DEBUGFS_MAX_NUM_FILES 32 101#define RADEON_DEBUGFS_MAX_NUM_FILES 32
101#define RADEONFB_CONN_LIMIT 4 102#define RADEONFB_CONN_LIMIT 4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
363 */ 364 */
364struct radeon_ib { 365struct radeon_ib {
365 struct list_head list; 366 struct list_head list;
366 unsigned long idx; 367 unsigned idx;
367 uint64_t gpu_addr; 368 uint64_t gpu_addr;
368 struct radeon_fence *fence; 369 struct radeon_fence *fence;
369 uint32_t *ptr; 370 uint32_t *ptr;
370 uint32_t length_dw; 371 uint32_t length_dw;
372 bool free;
371}; 373};
372 374
373/* 375/*
@@ -377,10 +379,9 @@ struct radeon_ib {
377struct radeon_ib_pool { 379struct radeon_ib_pool {
378 struct mutex mutex; 380 struct mutex mutex;
379 struct radeon_bo *robj; 381 struct radeon_bo *robj;
380 struct list_head scheduled_ibs;
381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
382 bool ready; 383 bool ready;
383 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 unsigned head_id;
384}; 385};
385 386
386struct radeon_cp { 387struct radeon_cp {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2dcda6115874..4d8831548a5f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
206 *connector_type = DRM_MODE_CONNECTOR_DVID; 206 *connector_type = DRM_MODE_CONNECTOR_DVID;
207 } 207 }
208 208
209 /* Asrock RS600 board lists the DVI port as HDMI */
210 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x1849) &&
212 (dev->pdev->subsystem_device == 0x7941)) {
213 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
214 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
215 *connector_type = DRM_MODE_CONNECTOR_DVID;
216 }
217
209 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 218 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
210 if ((dev->pdev->device == 0x7941) && 219 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x147b) && 220 (dev->pdev->subsystem_vendor == 0x147b) &&
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e7b19440102e..22d476160d52 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1279,47 +1279,47 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1279 rdev->mode_info.connector_table = radeon_connector_table; 1279 rdev->mode_info.connector_table = radeon_connector_table;
1280 if (rdev->mode_info.connector_table == CT_NONE) { 1280 if (rdev->mode_info.connector_table == CT_NONE) {
1281#ifdef CONFIG_PPC_PMAC 1281#ifdef CONFIG_PPC_PMAC
1282 if (machine_is_compatible("PowerBook3,3")) { 1282 if (of_machine_is_compatible("PowerBook3,3")) {
1283 /* powerbook with VGA */ 1283 /* powerbook with VGA */
1284 rdev->mode_info.connector_table = CT_POWERBOOK_VGA; 1284 rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
1285 } else if (machine_is_compatible("PowerBook3,4") || 1285 } else if (of_machine_is_compatible("PowerBook3,4") ||
1286 machine_is_compatible("PowerBook3,5")) { 1286 of_machine_is_compatible("PowerBook3,5")) {
1287 /* powerbook with internal tmds */ 1287 /* powerbook with internal tmds */
1288 rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; 1288 rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
1289 } else if (machine_is_compatible("PowerBook5,1") || 1289 } else if (of_machine_is_compatible("PowerBook5,1") ||
1290 machine_is_compatible("PowerBook5,2") || 1290 of_machine_is_compatible("PowerBook5,2") ||
1291 machine_is_compatible("PowerBook5,3") || 1291 of_machine_is_compatible("PowerBook5,3") ||
1292 machine_is_compatible("PowerBook5,4") || 1292 of_machine_is_compatible("PowerBook5,4") ||
1293 machine_is_compatible("PowerBook5,5")) { 1293 of_machine_is_compatible("PowerBook5,5")) {
1294 /* powerbook with external single link tmds (sil164) */ 1294 /* powerbook with external single link tmds (sil164) */
1295 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; 1295 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1296 } else if (machine_is_compatible("PowerBook5,6")) { 1296 } else if (of_machine_is_compatible("PowerBook5,6")) {
1297 /* powerbook with external dual or single link tmds */ 1297 /* powerbook with external dual or single link tmds */
1298 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; 1298 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1299 } else if (machine_is_compatible("PowerBook5,7") || 1299 } else if (of_machine_is_compatible("PowerBook5,7") ||
1300 machine_is_compatible("PowerBook5,8") || 1300 of_machine_is_compatible("PowerBook5,8") ||
1301 machine_is_compatible("PowerBook5,9")) { 1301 of_machine_is_compatible("PowerBook5,9")) {
1302 /* PowerBook6,2 ? */ 1302 /* PowerBook6,2 ? */
1303 /* powerbook with external dual link tmds (sil1178?) */ 1303 /* powerbook with external dual link tmds (sil1178?) */
1304 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; 1304 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1305 } else if (machine_is_compatible("PowerBook4,1") || 1305 } else if (of_machine_is_compatible("PowerBook4,1") ||
1306 machine_is_compatible("PowerBook4,2") || 1306 of_machine_is_compatible("PowerBook4,2") ||
1307 machine_is_compatible("PowerBook4,3") || 1307 of_machine_is_compatible("PowerBook4,3") ||
1308 machine_is_compatible("PowerBook6,3") || 1308 of_machine_is_compatible("PowerBook6,3") ||
1309 machine_is_compatible("PowerBook6,5") || 1309 of_machine_is_compatible("PowerBook6,5") ||
1310 machine_is_compatible("PowerBook6,7")) { 1310 of_machine_is_compatible("PowerBook6,7")) {
1311 /* ibook */ 1311 /* ibook */
1312 rdev->mode_info.connector_table = CT_IBOOK; 1312 rdev->mode_info.connector_table = CT_IBOOK;
1313 } else if (machine_is_compatible("PowerMac4,4")) { 1313 } else if (of_machine_is_compatible("PowerMac4,4")) {
1314 /* emac */ 1314 /* emac */
1315 rdev->mode_info.connector_table = CT_EMAC; 1315 rdev->mode_info.connector_table = CT_EMAC;
1316 } else if (machine_is_compatible("PowerMac10,1")) { 1316 } else if (of_machine_is_compatible("PowerMac10,1")) {
1317 /* mini with internal tmds */ 1317 /* mini with internal tmds */
1318 rdev->mode_info.connector_table = CT_MINI_INTERNAL; 1318 rdev->mode_info.connector_table = CT_MINI_INTERNAL;
1319 } else if (machine_is_compatible("PowerMac10,2")) { 1319 } else if (of_machine_is_compatible("PowerMac10,2")) {
1320 /* mini with external tmds */ 1320 /* mini with external tmds */
1321 rdev->mode_info.connector_table = CT_MINI_EXTERNAL; 1321 rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
1322 } else if (machine_is_compatible("PowerMac12,1")) { 1322 } else if (of_machine_is_compatible("PowerMac12,1")) {
1323 /* PowerMac8,1 ? */ 1323 /* PowerMac8,1 ? */
1324 /* imac g5 isight */ 1324 /* imac g5 isight */
1325 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1325 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 238188540017..65f81942f399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
780 * connected and the DVI port disconnected. If the edid doesn't 780 * connected and the DVI port disconnected. If the edid doesn't
781 * say HDMI, vice versa. 781 * say HDMI, vice versa.
782 */ 782 */
783 if (radeon_connector->shared_ddc && connector_status_connected) { 783 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
784 struct drm_device *dev = connector->dev; 784 struct drm_device *dev = connector->dev;
785 struct drm_connector *list_connector; 785 struct drm_connector *list_connector;
786 struct radeon_connector *list_radeon_connector; 786 struct radeon_connector *list_radeon_connector;
@@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1060 return; 1060 return;
1061 } 1061 }
1062 if (radeon_connector->ddc_bus && i2c_bus->valid) { 1062 if (radeon_connector->ddc_bus && i2c_bus->valid) {
1063 if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, 1063 if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
1064 sizeof(struct radeon_i2c_bus_rec)) == 0) {
1065 radeon_connector->shared_ddc = true; 1064 radeon_connector->shared_ddc = true;
1066 shared_ddc = true; 1065 shared_ddc = true;
1067 } 1066 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1190148cf5e6..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error && parser->ib) { 192 if (!error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 parser->ib->fence);
195 } else {
196 radeon_bo_list_unreserve(&parser->validated);
197 } 194 }
195 radeon_bo_list_unreserve(&parser->validated);
198 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
199 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj) {
200 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a82..c57ad606504d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
106 * 1.29- R500 3D cmd buffer support 106 * 1.29- R500 3D cmd buffer support
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup
109 */ 110 */
110#define DRIVER_MAJOR 1 111#define DRIVER_MAJOR 1
111#define DRIVER_MINOR 31 112#define DRIVER_MINOR 32
112#define DRIVER_PATCHLEVEL 0 113#define DRIVER_PATCHLEVEL 0
113 114
114enum radeon_cp_microcode_version { 115enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d72a71bff218..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
306 } 306 }
307} 307}
308 308
309int radeon_bo_list_validate(struct list_head *head, void *fence) 309int radeon_bo_list_validate(struct list_head *head)
310{ 310{
311 struct radeon_bo_list *lobj; 311 struct radeon_bo_list *lobj;
312 struct radeon_bo *bo; 312 struct radeon_bo *bo;
313 struct radeon_fence *old_fence = NULL;
314 int r; 313 int r;
315 314
316 r = radeon_bo_list_reserve(head); 315 r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
334 } 333 }
335 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 334 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
336 lobj->tiling_flags = bo->tiling_flags; 335 lobj->tiling_flags = bo->tiling_flags;
337 if (fence) {
338 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
339 bo->tbo.sync_obj = radeon_fence_ref(fence);
340 bo->tbo.sync_obj_arg = NULL;
341 }
342 if (old_fence) {
343 radeon_fence_unref(&old_fence);
344 }
345 } 336 }
346 return 0; 337 return 0;
347} 338}
348 339
349void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 340void radeon_bo_list_fence(struct list_head *head, void *fence)
350{ 341{
351 struct radeon_bo_list *lobj; 342 struct radeon_bo_list *lobj;
352 struct radeon_fence *old_fence; 343 struct radeon_bo *bo;
353 344 struct radeon_fence *old_fence = NULL;
354 if (fence) 345
355 list_for_each_entry(lobj, head, list) { 346 list_for_each_entry(lobj, head, list) {
356 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 347 bo = lobj->bo;
357 if (old_fence == fence) { 348 spin_lock(&bo->tbo.lock);
358 lobj->bo->tbo.sync_obj = NULL; 349 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
359 radeon_fence_unref(&old_fence); 350 bo->tbo.sync_obj = radeon_fence_ref(fence);
360 } 351 bo->tbo.sync_obj_arg = NULL;
352 spin_unlock(&bo->tbo.lock);
353 if (old_fence) {
354 radeon_fence_unref(&old_fence);
361 } 355 }
362 radeon_bo_list_unreserve(head); 356 }
363} 357}
364 358
365int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 359int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head); 156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head); 157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head); 158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head, void *fence); 159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma); 162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..6579eb4c1f28 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 41{
42 struct radeon_fence *fence; 42 struct radeon_fence *fence;
43 struct radeon_ib *nib; 43 struct radeon_ib *nib;
44 unsigned long i; 44 int r = 0, i, c;
45 int r = 0;
46 45
47 *ib = NULL; 46 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 47 r = radeon_fence_create(rdev, &fence);
49 if (r) { 48 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 49 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 50 return r;
52 } 51 }
53 mutex_lock(&rdev->ib_pool.mutex); 52 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 53 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 54 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 55 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 56 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 57 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 58 }
60 goto out;
61 } 59 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 60 if (nib == NULL) {
63 /* we go do nothings here */ 61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 67 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 68 radeon_fence_unref(&fence);
66 r = -EINVAL; 69 return -EBUSY;
67 goto out;
68 } 70 }
69 /* get the first ib on the scheduled list */ 71 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 72 nib->free = false;
71 struct radeon_ib, list); 73 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 74 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 76 if (r) {
77 goto out; 77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 79 mutex_lock(&rdev->ib_pool.mutex);
80 80 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 81 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 82 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 84 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 85 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 86 }
88 radeon_fence_unref(&nib->fence); 87 radeon_fence_unref(&nib->fence);
89 88 nib->fence = fence;
90 nib->length_dw = 0; 89 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 90 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 91 *ib = nib;
99out: 92 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 93}
107 94
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113 if (tmp == NULL) { 100 if (tmp == NULL) {
114 return; 101 return;
115 } 102 }
116 mutex_lock(&rdev->ib_pool.mutex); 103 if (!tmp->fence->emited)
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence); 104 radeon_fence_unref(&tmp->fence);
126 105 mutex_lock(&rdev->ib_pool.mutex);
127 tmp->length_dw = 0; 106 tmp->free = true;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 107 mutex_unlock(&rdev->ib_pool.mutex);
130} 108}
131 109
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 113
136 if (!ib->length_dw || !rdev->cp.ready) { 114 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 115 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 116 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 117 return -EINVAL;
140 } 118 }
141 119
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 126 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 127 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 128 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 129 /* once scheduled IB is considered free and protected by the fence */
130 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 131 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 132 radeon_ring_unlock_commit(rdev);
154 return 0; 133 return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
164 if (rdev->ib_pool.robj) 143 if (rdev->ib_pool.robj)
165 return 0; 144 return 0;
166 /* Allocate 1M object buffer */ 145 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 147 true, RADEON_GEM_DOMAIN_GTT,
170 &rdev->ib_pool.robj); 148 &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 173 rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 rdev->ib_pool.ibs[i].idx = i; 174 rdev->ib_pool.ibs[i].idx = i;
197 rdev->ib_pool.ibs[i].length_dw = 0; 175 rdev->ib_pool.ibs[i].length_dw = 0;
198 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 176 rdev->ib_pool.ibs[i].free = true;
199 } 177 }
200 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 178 rdev->ib_pool.head_id = 0;
201 rdev->ib_pool.ready = true; 179 rdev->ib_pool.ready = true;
202 DRM_INFO("radeon: ib pool ready.\n"); 180 DRM_INFO("radeon: ib pool ready.\n");
203 if (radeon_debugfs_ib_init(rdev)) { 181 if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
214 return; 192 return;
215 } 193 }
216 mutex_lock(&rdev->ib_pool.mutex); 194 mutex_lock(&rdev->ib_pool.mutex);
217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 if (rdev->ib_pool.robj) { 195 if (rdev->ib_pool.robj) {
219 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 196 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 if (likely(r == 0)) { 197 if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
363 if (ib == NULL) { 340 if (ib == NULL) {
364 return 0; 341 return 0;
365 } 342 }
366 seq_printf(m, "IB %04lu\n", ib->idx); 343 seq_printf(m, "IB %04u\n", ib->idx);
367 seq_printf(m, "IB fence %p\n", ib->fence); 344 seq_printf(m, "IB fence %p\n", ib->fence);
368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 345 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 for (i = 0; i < ib->length_dw; i++) { 346 for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5943d561fd1e..03021674d097 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 549
550 gb_tiling_config |= BANK_SWAPS(1); 550 gb_tiling_config |= BANK_SWAPS(1);
551 551
552 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 552 if (rdev->family == CHIP_RV740)
553 rdev->config.rv770.max_backends, 553 backend_map = 0x28;
554 (0xff << rdev->config.rv770.max_backends) & 0xff); 554 else
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
555 gb_tiling_config |= BACKEND_MAP(backend_map); 558 gb_tiling_config |= BACKEND_MAP(backend_map);
556 559
557 cc_gc_shader_pipe_config = 560 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a3e909b7bba..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1020 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1021{ 1021{
1022 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1023 1029
1024 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1025 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e2123af7775a..3d47a2c12322 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
196 196
197#ifdef CONFIG_X86 197#ifdef CONFIG_X86
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_old,
200 enum ttm_caching_state c_new)
200{ 201{
201 int ret = 0; 202 int ret = 0;
202 203
203 if (PageHighMem(p)) 204 if (PageHighMem(p))
204 return 0; 205 return 0;
205 206
206 if (get_page_memtype(p) != -1) { 207 if (c_old != tt_cached) {
207 /* p isn't in the default caching state, set it to 208 /* p isn't in the default caching state, set it to
208 * writeback first to free its current memtype. */ 209 * writeback first to free its current memtype. */
209 210
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
212 return ret; 213 return ret;
213 } 214 }
214 215
215 if (c_state == tt_wc) 216 if (c_new == tt_wc)
216 ret = set_memory_wc((unsigned long) page_address(p), 1); 217 ret = set_memory_wc((unsigned long) page_address(p), 1);
217 else if (c_state == tt_uncached) 218 else if (c_new == tt_uncached)
218 ret = set_pages_uc(p, 1); 219 ret = set_pages_uc(p, 1);
219 220
220 return ret; 221 return ret;
221} 222}
222#else /* CONFIG_X86 */ 223#else /* CONFIG_X86 */
223static inline int ttm_tt_set_page_caching(struct page *p, 224static inline int ttm_tt_set_page_caching(struct page *p,
224 enum ttm_caching_state c_state) 225 enum ttm_caching_state c_old,
226 enum ttm_caching_state c_new)
225{ 227{
226 return 0; 228 return 0;
227} 229}
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
254 for (i = 0; i < ttm->num_pages; ++i) { 256 for (i = 0; i < ttm->num_pages; ++i) {
255 cur_page = ttm->pages[i]; 257 cur_page = ttm->pages[i];
256 if (likely(cur_page != NULL)) { 258 if (likely(cur_page != NULL)) {
257 ret = ttm_tt_set_page_caching(cur_page, c_state); 259 ret = ttm_tt_set_page_caching(cur_page,
260 ttm->caching_state,
261 c_state);
258 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
259 goto out_err; 263 goto out_err;
260 } 264 }
@@ -268,7 +272,7 @@ out_err:
268 for (j = 0; j < i; ++j) { 272 for (j = 0; j < i; ++j) {
269 cur_page = ttm->pages[j]; 273 cur_page = ttm->pages[j];
270 if (likely(cur_page != NULL)) { 274 if (likely(cur_page != NULL)) {
271 (void)ttm_tt_set_page_caching(cur_page, 275 (void)ttm_tt_set_page_caching(cur_page, c_state,
272 ttm->caching_state); 276 ttm->caching_state);
273 } 277 }
274 } 278 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a6e8f687fa64..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
348 */ 348 */
349 349
350 DRM_INFO("It appears like vesafb is loaded. " 350 DRM_INFO("It appears like vesafb is loaded. "
351 "Ignore above error if any. Entering stealth mode.\n"); 351 "Ignore above error if any.\n");
352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
353 if (unlikely(ret != 0)) { 353 if (unlikely(ret != 0)) {
354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
355 goto out_no_device; 355 goto out_no_device;
356 } 356 }
357 vmw_kms_init(dev_priv);
358 vmw_overlay_init(dev_priv);
359 } else {
360 ret = vmw_request_device(dev_priv);
361 if (unlikely(ret != 0))
362 goto out_no_device;
363 vmw_kms_init(dev_priv);
364 vmw_overlay_init(dev_priv);
365 vmw_fb_init(dev_priv);
366 } 357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
367 364
368 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
369 register_pm_notifier(&dev_priv->pm_nb); 366 register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
406 403
407 unregister_pm_notifier(&dev_priv->pm_nb); 404 unregister_pm_notifier(&dev_priv->pm_nb);
408 405
409 if (!dev_priv->stealth) { 406 vmw_fb_close(dev_priv);
410 vmw_fb_close(dev_priv); 407 vmw_kms_close(dev_priv);
411 vmw_kms_close(dev_priv); 408 vmw_overlay_close(dev_priv);
412 vmw_overlay_close(dev_priv); 409 vmw_release_device(dev_priv);
413 vmw_release_device(dev_priv); 410 if (dev_priv->stealth)
414 pci_release_regions(dev->pdev);
415 } else {
416 vmw_kms_close(dev_priv);
417 vmw_overlay_close(dev_priv);
418 pci_release_region(dev->pdev, 2); 411 pci_release_region(dev->pdev, 2);
419 } 412 else
413 pci_release_regions(dev->pdev);
414
420 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
421 drm_irq_uninstall(dev_priv->dev); 416 drm_irq_uninstall(dev_priv->dev);
422 if (dev->devname == vmw_devname) 417 if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
585 int ret = 0; 580 int ret = 0;
586 581
587 DRM_INFO("Master set.\n"); 582 DRM_INFO("Master set.\n");
588 if (dev_priv->stealth) {
589 ret = vmw_request_device(dev_priv);
590 if (unlikely(ret != 0))
591 return ret;
592 }
593 583
594 if (active) { 584 if (active) {
595 BUG_ON(active != &dev_priv->fbdev_master); 585 BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
649 639
650 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
651 641
652 if (dev_priv->stealth) {
653 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
654 if (unlikely(ret != 0))
655 DRM_ERROR("Unable to clean VRAM on master drop.\n");
656 vmw_release_device(dev_priv);
657 }
658 dev_priv->active_master = &dev_priv->fbdev_master; 642 dev_priv->active_master = &dev_priv->fbdev_master;
659 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
660 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
661 645
662 if (!dev_priv->stealth) 646 vmw_fb_on(dev_priv);
663 vmw_fb_on(dev_priv);
664} 647}
665 648
666 649
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183} 183}
184 184
185static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context, 186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header) 187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
188{ 189{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL; 190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo; 191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL; 192 uint32_t handle = ptr->gmrId;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc; 193 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node; 194 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf; 195 struct ttm_validate_buffer *val_buf;
196 int ret;
201 197
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n"); 200 DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
209 bo = &vmw_bo->base; 203 bo = &vmw_bo->base;
210 204
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission" 206 DRM_ERROR("Max number relocations per submission"
213 " exceeded\n"); 207 " exceeded\n");
214 ret = -EINVAL; 208 ret = -EINVAL;
215 goto out_no_reloc; 209 goto out_no_reloc;
216 } 210 }
217 211
218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr; 213 reloc->location = ptr;
220 214
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { 216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf; 229 ++sw_context->cur_val_buf;
236 } 230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
237 265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf); 315 cmd->dma.host.sid, &srf);
240 if (ret) { 316 if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check) 462 &vmw_cmd_blt_surf_screen_check)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4f4f6432be8b..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
562 /* 565 /*
563 * Dirty & Deferred IO 566 * Dirty & Deferred IO
564 */ 567 */
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 790e675b13eb..0920492cea0a 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -8,3 +8,11 @@ config VGA_ARB
8 are accessed at same time they need some kind of coordination. Please 8 are accessed at same time they need some kind of coordination. Please
9 see Documentation/vgaarbiter.txt for more details. Select this to 9 see Documentation/vgaarbiter.txt for more details. Select this to
10 enable VGA arbiter. 10 enable VGA arbiter.
11
12config VGA_ARB_MAX_GPUS
13 int "Maximum number of GPUs"
14 default 16
15 depends on VGA_ARB
16 help
17 Reserves space in the kernel to maintain resource locking for
18 multiple GPUS. The overhead for each GPU is very small.
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 24b56dc54597..8827814d0735 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -688,7 +688,7 @@ EXPORT_SYMBOL(vga_client_register);
688 * the arbiter. 688 * the arbiter.
689 */ 689 */
690 690
691#define MAX_USER_CARDS 16 691#define MAX_USER_CARDS CONFIG_VGA_ARB_MAX_GPUS
692#define PCI_INVALID_CARD ((struct pci_dev *)-1UL) 692#define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
693 693
694/* 694/*
@@ -954,6 +954,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
954 } 954 }
955 955
956 } else if (strncmp(curr_pos, "target ", 7) == 0) { 956 } else if (strncmp(curr_pos, "target ", 7) == 0) {
957 struct pci_bus *pbus;
957 unsigned int domain, bus, devfn; 958 unsigned int domain, bus, devfn;
958 struct vga_device *vgadev; 959 struct vga_device *vgadev;
959 960
@@ -961,7 +962,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 remaining -= 7; 962 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv); 963 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */ 964 /* if target is default */
964 if (!strncmp(kbuf, "default", 7)) 965 if (!strncmp(curr_pos, "default", 7))
965 pdev = pci_dev_get(vga_default_device()); 966 pdev = pci_dev_get(vga_default_device());
966 else { 967 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining, 968 if (!vga_pci_str_to_vars(curr_pos, remaining,
@@ -969,18 +970,31 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
969 ret_val = -EPROTO; 970 ret_val = -EPROTO;
970 goto done; 971 goto done;
971 } 972 }
972 973 pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
973 pdev = pci_get_bus_and_slot(bus, devfn); 974 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
975
976 pbus = pci_find_bus(domain, bus);
977 pr_devel("vgaarb: pbus %p\n", pbus);
978 if (pbus == NULL) {
979 pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
980 domain, bus);
981 ret_val = -ENODEV;
982 goto done;
983 }
984 pdev = pci_get_slot(pbus, devfn);
985 pr_devel("vgaarb: pdev %p\n", pdev);
974 if (!pdev) { 986 if (!pdev) {
975 pr_info("vgaarb: invalid PCI address!\n"); 987 pr_err("vgaarb: invalid PCI address %x:%x\n",
988 bus, devfn);
976 ret_val = -ENODEV; 989 ret_val = -ENODEV;
977 goto done; 990 goto done;
978 } 991 }
979 } 992 }
980 993
981 vgadev = vgadev_find(pdev); 994 vgadev = vgadev_find(pdev);
995 pr_devel("vgaarb: vgadev %p\n", vgadev);
982 if (vgadev == NULL) { 996 if (vgadev == NULL) {
983 pr_info("vgaarb: this pci device is not a vga device\n"); 997 pr_err("vgaarb: this pci device is not a vga device\n");
984 pci_dev_put(pdev); 998 pci_dev_put(pdev);
985 ret_val = -ENODEV; 999 ret_val = -ENODEV;
986 goto done; 1000 goto done;
@@ -998,7 +1012,8 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
998 } 1012 }
999 } 1013 }
1000 if (i == MAX_USER_CARDS) { 1014 if (i == MAX_USER_CARDS) {
1001 pr_err("vgaarb: maximum user cards number reached!\n"); 1015 pr_err("vgaarb: maximum user cards (%d) number reached!\n",
1016 MAX_USER_CARDS);
1002 pci_dev_put(pdev); 1017 pci_dev_put(pdev);
1003 /* XXX: which value to return? */ 1018 /* XXX: which value to return? */
1004 ret_val = -ENOMEM; 1019 ret_val = -ENOMEM;