aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c64
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c90
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c33
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c294
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h139
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c390
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c132
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c35
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c732
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c81
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c116
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c205
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c245
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c159
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c61
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c116
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c14
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h801
-rw-r--r--drivers/gpu/drm/radeon/atom.c109
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c39
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c42
-rw-r--r--drivers/gpu/drm/radeon/r200.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c48
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c207
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c179
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h56
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420795
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60068
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5156
-rw-r--r--drivers/gpu/drm/radeon/rs400.c30
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c88
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c75
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c125
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c64
-rw-r--r--drivers/gpu/vga/vgaarb.c2
144 files changed, 5724 insertions, 3080 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e050..305c59003963 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
66 66
67 If M is selected, the module will be called radeon. 67 If M is selected, the module will be called radeon.
68 68
69source "drivers/gpu/drm/radeon/Kconfig"
70
69config DRM_I810 71config DRM_I810
70 tristate "Intel I810" 72 tristate "Intel I810"
71 depends on DRM && AGP && AGP_INTEL 73 depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3e9b83..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
39 struct drm_ati_pcigart_info *gart_info) 39 struct drm_ati_pcigart_info *gart_info)
40{ 40{
41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, 41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
42 PAGE_SIZE, 42 PAGE_SIZE);
43 gart_info->table_mask);
44 if (gart_info->table_handle == NULL) 43 if (gart_info->table_handle == NULL)
45 return -ENOMEM; 44 return -ENOMEM;
46 45
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
112 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { 111 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
113 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); 112 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
114 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 (unsigned long long)gart_info->table_mask);
117 ret = 1;
118 goto done;
119 }
120
115 ret = drm_ati_alloc_pcigart_table(dev, gart_info); 121 ret = drm_ati_alloc_pcigart_table(dev, gart_info);
116 if (ret) { 122 if (ret) {
117 DRM_ERROR("cannot allocate PCI GART page!\n"); 123 DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e304f6f4..8417cc4c43f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
326 * As we're limiting the address to 2^32-1 (or less), 326 * As we're limiting the address to 2^32-1 (or less),
327 * casting it down to 32 bits is no problem, but we 327 * casting it down to 32 bits is no problem, but we
328 * need to point to a 64bit variable first. */ 328 * need to point to a 64bit variable first. */
329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 329 dmah = drm_pci_alloc(dev, map->size, map->size);
330 if (!dmah) { 330 if (!dmah) {
331 kfree(map); 331 kfree(map);
332 return -ENOMEM; 332 return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
885 885
886 while (entry->buf_count < count) { 886 while (entry->buf_count < count) {
887 887
888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); 888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
889 889
890 if (!dmah) { 890 if (!dmah) {
891 /* Set count correctly so we free the proper amount. */ 891 /* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5124401f266a..d91fb8c0b7b3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
158 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, 158 { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
159 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, 159 { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
160 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 160 { DRM_MODE_CONNECTOR_TV, "TV", 0 },
161 { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
161}; 162};
162 163
163static struct drm_prop_enum_list drm_encoder_enum_list[] = 164static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4231d6db72ec..7d0f00a935fa 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
216EXPORT_SYMBOL(drm_helper_crtc_in_use); 216EXPORT_SYMBOL(drm_helper_crtc_in_use);
217 217
218/** 218/**
219 * drm_disable_unused_functions - disable unused objects 219 * drm_helper_disable_unused_functions - disable unused objects
220 * @dev: DRM device 220 * @dev: DRM device
221 * 221 *
222 * LOCKING: 222 * LOCKING:
@@ -702,7 +702,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
702 if (encoder->crtc != crtc) 702 if (encoder->crtc != crtc)
703 continue; 703 continue;
704 704
705 DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder), 705 DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
706 mode->name, mode->base.id); 706 mode->name, mode->base.id);
707 encoder_funcs = encoder->helper_private; 707 encoder_funcs = encoder->helper_private;
708 encoder_funcs->mode_set(encoder, mode, adjusted_mode); 708 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -1032,7 +1032,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
1032 /* 1032 /*
1033 * we shouldn't end up with no modes here. 1033 * we shouldn't end up with no modes here.
1034 */ 1034 */
1035 WARN(!count, "No connectors reported connected with modes\n"); 1035 if (count == 0)
1036 printk(KERN_INFO "No connectors reported connected with modes\n");
1036 1037
1037 drm_setup_crtcs(dev); 1038 drm_setup_crtcs(dev);
1038 1039
@@ -1162,6 +1163,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
1162int drm_helper_resume_force_mode(struct drm_device *dev) 1163int drm_helper_resume_force_mode(struct drm_device *dev)
1163{ 1164{
1164 struct drm_crtc *crtc; 1165 struct drm_crtc *crtc;
1166 struct drm_encoder *encoder;
1167 struct drm_encoder_helper_funcs *encoder_funcs;
1168 struct drm_crtc_helper_funcs *crtc_funcs;
1165 int ret; 1169 int ret;
1166 1170
1167 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1171 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1174,6 +1178,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
1174 1178
1175 if (ret == false) 1179 if (ret == false)
1176 DRM_ERROR("failed to set mode on crtc %p\n", crtc); 1180 DRM_ERROR("failed to set mode on crtc %p\n", crtc);
1181
1182 /* Turn off outputs that were already powered off */
1183 if (drm_helper_choose_crtc_dpms(crtc)) {
1184 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1185
1186 if(encoder->crtc != crtc)
1187 continue;
1188
1189 encoder_funcs = encoder->helper_private;
1190 if (encoder_funcs->dpms)
1191 (*encoder_funcs->dpms) (encoder,
1192 drm_helper_choose_encoder_dpms(encoder));
1193
1194 crtc_funcs = crtc->helper_private;
1195 if (crtc_funcs->dpms)
1196 (*crtc_funcs->dpms) (crtc,
1197 drm_helper_choose_crtc_dpms(crtc));
1198 }
1199 }
1177 } 1200 }
1178 /* disable the unused connectors while restoring the modesetting */ 1201 /* disable the unused connectors while restoring the modesetting */
1179 drm_helper_disable_unused_functions(dev); 1202 drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5c9f79877cbf..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
598 return mode; 598 return mode;
599} 599}
600 600
601/*
602 * EDID is delightfully ambiguous about how interlaced modes are to be
603 * encoded. Our internal representation is of frame height, but some
604 * HDTV detailed timings are encoded as field height.
605 *
606 * The format list here is from CEA, in frame size. Technically we
607 * should be checking refresh rate too. Whatever.
608 */
609static void
610drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
611 struct detailed_pixel_timing *pt)
612{
613 int i;
614 static const struct {
615 int w, h;
616 } cea_interlaced[] = {
617 { 1920, 1080 },
618 { 720, 480 },
619 { 1440, 480 },
620 { 2880, 480 },
621 { 720, 576 },
622 { 1440, 576 },
623 { 2880, 576 },
624 };
625 static const int n_sizes =
626 sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
627
628 if (!(pt->misc & DRM_EDID_PT_INTERLACED))
629 return;
630
631 for (i = 0; i < n_sizes; i++) {
632 if ((mode->hdisplay == cea_interlaced[i].w) &&
633 (mode->vdisplay == cea_interlaced[i].h / 2)) {
634 mode->vdisplay *= 2;
635 mode->vsync_start *= 2;
636 mode->vsync_end *= 2;
637 mode->vtotal *= 2;
638 mode->vtotal |= 1;
639 }
640 }
641
642 mode->flags |= DRM_MODE_FLAG_INTERLACE;
643}
644
601/** 645/**
602 * drm_mode_detailed - create a new mode from an EDID detailed timing section 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section
603 * @dev: DRM device (needed to create new mode) 647 * @dev: DRM device (needed to create new mode)
@@ -633,8 +677,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
633 return NULL; 677 return NULL;
634 } 678 }
635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 679 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
636 printk(KERN_WARNING "integrated sync not supported\n"); 680 printk(KERN_WARNING "composite sync not supported\n");
637 return NULL;
638 } 681 }
639 682
640 /* it is incorrect if hsync/vsync width is zero */ 683 /* it is incorrect if hsync/vsync width is zero */
@@ -681,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
681 724
682 drm_mode_set_name(mode); 725 drm_mode_set_name(mode);
683 726
684 if (pt->misc & DRM_EDID_PT_INTERLACED) 727 drm_mode_do_interlace_quirk(mode, pt);
685 mode->flags |= DRM_MODE_FLAG_INTERLACE;
686 728
687 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 729 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
688 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 730 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
@@ -911,23 +953,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
911 struct drm_device *dev = connector->dev; 953 struct drm_device *dev = connector->dev;
912 struct cvt_timing *cvt; 954 struct cvt_timing *cvt;
913 const int rates[] = { 60, 85, 75, 60, 50 }; 955 const int rates[] = { 60, 85, 75, 60, 50 };
956 const u8 empty[3] = { 0, 0, 0 };
914 957
915 for (i = 0; i < 4; i++) { 958 for (i = 0; i < 4; i++) {
916 int uninitialized_var(width), height; 959 int uninitialized_var(width), height;
917 cvt = &(timing->data.other_data.data.cvt[i]); 960 cvt = &(timing->data.other_data.data.cvt[i]);
918 961
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; 962 if (!memcmp(cvt->code, empty, 3))
920 switch (cvt->code[1] & 0xc0) { 963 continue;
964
965 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
966 switch (cvt->code[1] & 0x0c) {
921 case 0x00: 967 case 0x00:
922 width = height * 4 / 3; 968 width = height * 4 / 3;
923 break; 969 break;
924 case 0x40: 970 case 0x04:
925 width = height * 16 / 9; 971 width = height * 16 / 9;
926 break; 972 break;
927 case 0x80: 973 case 0x08:
928 width = height * 16 / 10; 974 width = height * 16 / 10;
929 break; 975 break;
930 case 0xc0: 976 case 0x0c:
931 width = height * 15 / 9; 977 width = height * 15 / 9;
932 break; 978 break;
933 } 979 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa055f4f..0f9e90552dc4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
156 force = DRM_FORCE_ON; 156 force = DRM_FORCE_ON;
157 break; 157 break;
158 case 'D': 158 case 'D':
159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) || 159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
161 force = DRM_FORCE_ON; 161 force = DRM_FORCE_ON;
162 else 162 else
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
389 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */ 390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */ 394 /* Display: Off; HSync: Off, VSync: On */
395 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
@@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
606 return -EINVAL; 606 return -EINVAL;
607 607
608 /* Need to resize the fb object !!! */ 608 /* Need to resize the fb object !!! */
609 if (var->xres > fb->width || var->yres > fb->height) { 609 if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
610 DRM_ERROR("Requested width/height is greater than current fb " 610 DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
611 "object %dx%d > %dx%d\n", var->xres, var->yres, 611 "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
612 fb->width, fb->height); 612 fb->width, fb->height, fb->bits_per_pixel);
613 DRM_ERROR("Need resizing code.\n");
614 return -EINVAL; 613 return -EINVAL;
615 } 614 }
616 615
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c469..8bf3770f294e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
158 kref_init(&obj->refcount); 145 kref_init(&obj->refcount);
159 kref_init(&obj->handlecount); 146 kref_init(&obj->handlecount);
160 obj->size = size; 147 obj->size = size;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7998ee66b317..b98384dbd9a7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
115 115
116 dev->num_crtcs = 0; 116 dev->num_crtcs = 0;
117} 117}
118EXPORT_SYMBOL(drm_vblank_cleanup);
118 119
119int drm_vblank_init(struct drm_device *dev, int num_crtcs) 120int drm_vblank_init(struct drm_device *dev, int num_crtcs)
120{ 121{
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
163 } 164 }
164 165
165 dev->vblank_disable_allowed = 0; 166 dev->vblank_disable_allowed = 0;
166
167 return 0; 167 return 0;
168 168
169err: 169err:
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
493 */ 493 */
494void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) 494void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
495{ 495{
496 /* vblank is not initialized (IRQ not installed ?) */
497 if (!dev->num_crtcs)
498 return;
496 /* 499 /*
497 * To avoid all the problems that might happen if interrupts 500 * To avoid all the problems that might happen if interrupts
498 * were enabled/disabled around or between these calls, we just 501 * were enabled/disabled around or between these calls, we just
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6d81a02463a3..76d63394c776 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1,9 +1,4 @@
1/* 1/*
2 * The list_sort function is (presumably) licensed under the GPL (see the
3 * top level "COPYING" file for details).
4 *
5 * The remainder of this file is:
6 *
7 * Copyright © 1997-2003 by The XFree86 Project, Inc. 2 * Copyright © 1997-2003 by The XFree86 Project, Inc.
8 * Copyright © 2007 Dave Airlie 3 * Copyright © 2007 Dave Airlie
9 * Copyright © 2007-2008 Intel Corporation 4 * Copyright © 2007-2008 Intel Corporation
@@ -36,6 +31,7 @@
36 */ 31 */
37 32
38#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/list_sort.h>
39#include "drmP.h" 35#include "drmP.h"
40#include "drm.h" 36#include "drm.h"
41#include "drm_crtc.h" 37#include "drm_crtc.h"
@@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
855 851
856/** 852/**
857 * drm_mode_compare - compare modes for favorability 853 * drm_mode_compare - compare modes for favorability
854 * @priv: unused
858 * @lh_a: list_head for first mode 855 * @lh_a: list_head for first mode
859 * @lh_b: list_head for second mode 856 * @lh_b: list_head for second mode
860 * 857 *
@@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
868 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or 865 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
869 * positive if @lh_b is better than @lh_a. 866 * positive if @lh_b is better than @lh_a.
870 */ 867 */
871static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) 868static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
872{ 869{
873 struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); 870 struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
874 struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); 871 struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
@@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
885 return diff; 882 return diff;
886} 883}
887 884
888/* FIXME: what we don't have a list sort function? */
889/* list sort from Mark J Roberts (mjr@znex.org) */
890void list_sort(struct list_head *head,
891 int (*cmp)(struct list_head *a, struct list_head *b))
892{
893 struct list_head *p, *q, *e, *list, *tail, *oldhead;
894 int insize, nmerges, psize, qsize, i;
895
896 list = head->next;
897 list_del(head);
898 insize = 1;
899 for (;;) {
900 p = oldhead = list;
901 list = tail = NULL;
902 nmerges = 0;
903
904 while (p) {
905 nmerges++;
906 q = p;
907 psize = 0;
908 for (i = 0; i < insize; i++) {
909 psize++;
910 q = q->next == oldhead ? NULL : q->next;
911 if (!q)
912 break;
913 }
914
915 qsize = insize;
916 while (psize > 0 || (qsize > 0 && q)) {
917 if (!psize) {
918 e = q;
919 q = q->next;
920 qsize--;
921 if (q == oldhead)
922 q = NULL;
923 } else if (!qsize || !q) {
924 e = p;
925 p = p->next;
926 psize--;
927 if (p == oldhead)
928 p = NULL;
929 } else if (cmp(p, q) <= 0) {
930 e = p;
931 p = p->next;
932 psize--;
933 if (p == oldhead)
934 p = NULL;
935 } else {
936 e = q;
937 q = q->next;
938 qsize--;
939 if (q == oldhead)
940 q = NULL;
941 }
942 if (tail)
943 tail->next = e;
944 else
945 list = e;
946 e->prev = tail;
947 tail = e;
948 }
949 p = q;
950 }
951
952 tail->next = list;
953 list->prev = tail;
954
955 if (nmerges <= 1)
956 break;
957
958 insize *= 2;
959 }
960
961 head->next = list;
962 head->prev = list->prev;
963 list->prev->next = head;
964 list->prev = head;
965}
966
967/** 885/**
968 * drm_mode_sort - sort mode list 886 * drm_mode_sort - sort mode list
969 * @mode_list: list to sort 887 * @mode_list: list to sort
@@ -975,7 +893,7 @@ void list_sort(struct list_head *head,
975 */ 893 */
976void drm_mode_sort(struct list_head *mode_list) 894void drm_mode_sort(struct list_head *mode_list)
977{ 895{
978 list_sort(mode_list, drm_mode_compare); 896 list_sort(NULL, mode_list, drm_mode_compare);
979} 897}
980EXPORT_SYMBOL(drm_mode_sort); 898EXPORT_SYMBOL(drm_mode_sort);
981 899
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094fb1995..e68ebf92fa2a 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
47/** 47/**
48 * \brief Allocate a PCI consistent memory block, for DMA. 48 * \brief Allocate a PCI consistent memory block, for DMA.
49 */ 49 */
50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, 50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
51 dma_addr_t maxaddr)
52{ 51{
53 drm_dma_handle_t *dmah; 52 drm_dma_handle_t *dmah;
54#if 1 53#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
63 if (align > size) 62 if (align > size)
64 return NULL; 63 return NULL;
65 64
66 if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
67 DRM_ERROR("Setting pci dma mask failed\n");
68 return NULL;
69 }
70
71 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); 65 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
72 if (!dmah) 66 if (!dmah)
73 return NULL; 67 return NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf0b580..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
272 mem = kmap_atomic(pages[page], KM_USER0); 272 mem = kmap_atomic(pages[page], KM_USER0);
273 for (i = 0; i < PAGE_SIZE; i += 4) 273 for (i = 0; i < PAGE_SIZE; i += 4)
274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
275 kunmap_atomic(pages[page], KM_USER0); 275 kunmap_atomic(mem, KM_USER0);
276 } 276 }
277} 277}
278 278
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
291 obj = obj_priv->obj; 291 obj = obj_priv->obj;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj); 293 ret = i915_gem_object_get_pages(obj, 0);
294 if (ret) { 294 if (ret) {
295 DRM_ERROR("Failed to get pages: %d\n", ret); 295 DRM_ERROR("Failed to get pages: %d\n", ret);
296 spin_unlock(&dev_priv->mm.active_list_lock); 296 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -386,34 +386,6 @@ out:
386 return 0; 386 return 0;
387} 387}
388 388
389static int i915_registers_info(struct seq_file *m, void *data) {
390 struct drm_info_node *node = (struct drm_info_node *) m->private;
391 struct drm_device *dev = node->minor->dev;
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 uint32_t reg;
394
395#define DUMP_RANGE(start, end) \
396 for (reg=start; reg < end; reg += 4) \
397 seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
398
399 DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
400 DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
401 DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
402 DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
403 DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
404 DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
405 DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
406 DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
407 DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
408 DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
409 DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
410 DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
411 DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
412 DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
413
414 return 0;
415}
416
417static int 389static int
418i915_wedged_open(struct inode *inode, 390i915_wedged_open(struct inode *inode,
419 struct file *filp) 391 struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
519} 491}
520 492
521static struct drm_info_list i915_debugfs_list[] = { 493static struct drm_info_list i915_debugfs_list[] = {
522 {"i915_regs", i915_registers_info, 0},
523 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 494 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
524 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 495 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
525 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 496 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfeac7f57..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
123 drm_i915_private_t *dev_priv = dev->dev_private; 123 drm_i915_private_t *dev_priv = dev->dev_private;
124 /* Program Hardware Status Page */ 124 /* Program Hardware Status Page */
125 dev_priv->status_page_dmah = 125 dev_priv->status_page_dmah =
126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
127 127
128 if (!dev_priv->status_page_dmah) { 128 if (!dev_priv->status_page_dmah) {
129 DRM_ERROR("Can not allocate hardware status page\n"); 129 DRM_ERROR("Can not allocate hardware status page\n");
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
134 134
135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
136 136
137 if (IS_I965G(dev))
138 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
139 0xf0;
140
137 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 141 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
138 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 142 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
139 return 0; 143 return 0;
@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
731 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
732 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
733 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
734 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
735 goto fail_batch_free; 740 goto fail_batch_free;
741 }
736 742
737 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
738 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
@@ -813,9 +819,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
813 case I915_PARAM_HAS_PAGEFLIPPING: 819 case I915_PARAM_HAS_PAGEFLIPPING:
814 value = 1; 820 value = 1;
815 break; 821 break;
822 case I915_PARAM_HAS_EXECBUF2:
823 /* depends on GEM */
824 value = dev_priv->has_gem;
825 break;
816 default: 826 default:
817 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 827 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
818 param->param); 828 param->param);
819 return -EINVAL; 829 return -EINVAL;
820 } 830 }
821 831
@@ -1117,7 +1127,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1117{ 1127{
1118 struct drm_i915_private *dev_priv = dev->dev_private; 1128 struct drm_i915_private *dev_priv = dev->dev_private;
1119 struct drm_mm_node *compressed_fb, *compressed_llb; 1129 struct drm_mm_node *compressed_fb, *compressed_llb;
1120 unsigned long cfb_base, ll_base; 1130 unsigned long cfb_base;
1131 unsigned long ll_base = 0;
1121 1132
1122 /* Leave 1M for line length buffer & misc. */ 1133 /* Leave 1M for line length buffer & misc. */
1123 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1211,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1200 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1211 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
1201 0xff000000; 1212 0xff000000;
1202 1213
1203 if (IS_MOBILE(dev) || IS_I9XX(dev))
1204 dev_priv->cursor_needs_physical = true;
1205 else
1206 dev_priv->cursor_needs_physical = false;
1207
1208 if (IS_I965G(dev) || IS_G33(dev))
1209 dev_priv->cursor_needs_physical = false;
1210
1211 /* Basic memrange allocator for stolen space (aka vram) */ 1214 /* Basic memrange allocator for stolen space (aka vram) */
1212 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1215 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1213 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1216 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1260,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
1257 if (ret) 1260 if (ret)
1258 goto destroy_ringbuffer; 1261 goto destroy_ringbuffer;
1259 1262
1263 intel_modeset_init(dev);
1264
1260 ret = drm_irq_install(dev); 1265 ret = drm_irq_install(dev);
1261 if (ret) 1266 if (ret)
1262 goto destroy_ringbuffer; 1267 goto destroy_ringbuffer;
@@ -1271,8 +1276,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1271 1276
1272 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1277 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1273 1278
1274 intel_modeset_init(dev);
1275
1276 drm_helper_initial_config(dev); 1279 drm_helper_initial_config(dev);
1277 1280
1278 return 0; 1281 return 0;
@@ -1360,7 +1363,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1360{ 1363{
1361 struct drm_i915_private *dev_priv = dev->dev_private; 1364 struct drm_i915_private *dev_priv = dev->dev_private;
1362 resource_size_t base, size; 1365 resource_size_t base, size;
1363 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1366 int ret = 0, mmio_bar;
1364 uint32_t agp_size, prealloc_size, prealloc_start; 1367 uint32_t agp_size, prealloc_size, prealloc_start;
1365 1368
1366 /* i915 has 4 more counters */ 1369 /* i915 has 4 more counters */
@@ -1376,8 +1379,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1376 1379
1377 dev->dev_private = (void *)dev_priv; 1380 dev->dev_private = (void *)dev_priv;
1378 dev_priv->dev = dev; 1381 dev_priv->dev = dev;
1382 dev_priv->info = (struct intel_device_info *) flags;
1379 1383
1380 /* Add register map (needed for suspend/resume) */ 1384 /* Add register map (needed for suspend/resume) */
1385 mmio_bar = IS_I9XX(dev) ? 0 : 1;
1381 base = drm_get_resource_start(dev, mmio_bar); 1386 base = drm_get_resource_start(dev, mmio_bar);
1382 size = drm_get_resource_len(dev, mmio_bar); 1387 size = drm_get_resource_len(dev, mmio_bar);
1383 1388
@@ -1652,6 +1657,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1652 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1657 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1653 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1658 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1654 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1659 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1660 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1661 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1656 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1662 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1657 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1663 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 24286ca168fc..79beffcf5936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#include "drm_pciids.h"
37#include <linux/console.h> 36#include <linux/console.h>
38#include "drm_crtc_helper.h" 37#include "drm_crtc_helper.h"
39 38
@@ -46,88 +45,229 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 45unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 46module_param_named(powersave, i915_powersave, int, 0400);
48 47
48unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50
49static struct drm_driver driver; 51static struct drm_driver driver;
50 52
51static struct pci_device_id pciidlist[] = { 53#define INTEL_VGA_DEVICE(id, info) { \
52 i915_PCI_IDS 54 .class = PCI_CLASS_DISPLAY_VGA << 8, \
55 .class_mask = 0xffff00, \
56 .vendor = 0x8086, \
57 .device = id, \
58 .subvendor = PCI_ANY_ID, \
59 .subdevice = PCI_ANY_ID, \
60 .driver_data = (unsigned long) info }
61
62const static struct intel_device_info intel_i830_info = {
63 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
64};
65
66const static struct intel_device_info intel_845g_info = {
67 .is_i8xx = 1,
68};
69
70const static struct intel_device_info intel_i85x_info = {
71 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
72};
73
74const static struct intel_device_info intel_i865g_info = {
75 .is_i8xx = 1,
76};
77
78const static struct intel_device_info intel_i915g_info = {
79 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
80};
81const static struct intel_device_info intel_i915gm_info = {
82 .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
83 .cursor_needs_physical = 1,
84};
85const static struct intel_device_info intel_i945g_info = {
86 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
87};
88const static struct intel_device_info intel_i945gm_info = {
89 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
90 .has_hotplug = 1, .cursor_needs_physical = 1,
91};
92
93const static struct intel_device_info intel_i965g_info = {
94 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
95};
96
97const static struct intel_device_info intel_i965gm_info = {
98 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
99 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
100 .has_hotplug = 1,
101};
102
103const static struct intel_device_info intel_g33_info = {
104 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
105 .has_hotplug = 1,
106};
107
108const static struct intel_device_info intel_g45_info = {
109 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
110 .has_pipe_cxsr = 1,
111 .has_hotplug = 1,
112};
113
114const static struct intel_device_info intel_gm45_info = {
115 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
116 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
117 .has_pipe_cxsr = 1,
118 .has_hotplug = 1,
119};
120
121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .need_gfx_hws = 1,
124 .has_hotplug = 1,
125};
126
127const static struct intel_device_info intel_ironlake_d_info = {
128 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
129 .has_pipe_cxsr = 1,
130 .has_hotplug = 1,
131};
132
133const static struct intel_device_info intel_ironlake_m_info = {
134 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
135 .need_gfx_hws = 1, .has_rc6 = 1,
136 .has_hotplug = 1,
137};
138
139const static struct pci_device_id pciidlist[] = {
140 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
141 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
142 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
143 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
144 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
145 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
146 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
147 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
148 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
149 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
150 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
151 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
152 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
153 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
154 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
155 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
156 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
157 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
158 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
159 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
160 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
161 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
162 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
163 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
164 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
165 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
166 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
167 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
168 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
169 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
170 {0, 0, 0}
53}; 171};
54 172
55#if defined(CONFIG_DRM_I915_KMS) 173#if defined(CONFIG_DRM_I915_KMS)
56MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
57#endif 175#endif
58 176
59static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
60{ 178{
61 struct drm_i915_private *dev_priv = dev->dev_private;
62
63 if (!dev || !dev_priv) {
64 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
65 DRM_ERROR("DRM not initialized, aborting suspend.\n");
66 return -ENODEV;
67 }
68
69 if (state.event == PM_EVENT_PRETHAW)
70 return 0;
71
72 pci_save_state(dev->pdev); 179 pci_save_state(dev->pdev);
73 180
74 /* If KMS is active, we do the leavevt stuff here */ 181 /* If KMS is active, we do the leavevt stuff here */
75 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 182 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
76 if (i915_gem_idle(dev)) 183 int error = i915_gem_idle(dev);
184 if (error) {
77 dev_err(&dev->pdev->dev, 185 dev_err(&dev->pdev->dev,
78 "GEM idle failed, resume may fail\n"); 186 "GEM idle failed, resume might fail\n");
187 return error;
188 }
79 drm_irq_uninstall(dev); 189 drm_irq_uninstall(dev);
80 } 190 }
81 191
82 i915_save_state(dev); 192 i915_save_state(dev);
83 193
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
84 intel_opregion_free(dev, 1); 201 intel_opregion_free(dev, 1);
85 202
203 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0;
205}
206
207static int i915_suspend(struct drm_device *dev, pm_message_t state)
208{
209 int error;
210
211 if (!dev || !dev->dev_private) {
212 DRM_ERROR("dev: %p\n", dev);
213 DRM_ERROR("DRM not initialized, aborting suspend.\n");
214 return -ENODEV;
215 }
216
217 if (state.event == PM_EVENT_PRETHAW)
218 return 0;
219
220 error = i915_drm_freeze(dev);
221 if (error)
222 return error;
223
224 i915_drm_suspend(dev);
225
86 if (state.event == PM_EVENT_SUSPEND) { 226 if (state.event == PM_EVENT_SUSPEND) {
87 /* Shut down the device */ 227 /* Shut down the device */
88 pci_disable_device(dev->pdev); 228 pci_disable_device(dev->pdev);
89 pci_set_power_state(dev->pdev, PCI_D3hot); 229 pci_set_power_state(dev->pdev, PCI_D3hot);
90 } 230 }
91 231
92 /* Modeset on resume, not lid events */
93 dev_priv->modeset_on_lid = 0;
94
95 return 0; 232 return 0;
96} 233}
97 234
98static int i915_resume(struct drm_device *dev) 235static int i915_drm_thaw(struct drm_device *dev)
99{ 236{
100 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
101 int ret = 0; 238 int error = 0;
102
103 if (pci_enable_device(dev->pdev))
104 return -1;
105 pci_set_master(dev->pdev);
106
107 i915_restore_state(dev);
108
109 intel_opregion_init(dev, 1);
110 239
111 /* KMS EnterVT equivalent */ 240 /* KMS EnterVT equivalent */
112 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
113 mutex_lock(&dev->struct_mutex); 242 mutex_lock(&dev->struct_mutex);
114 dev_priv->mm.suspended = 0; 243 dev_priv->mm.suspended = 0;
115 244
116 ret = i915_gem_init_ringbuffer(dev); 245 error = i915_gem_init_ringbuffer(dev);
117 if (ret != 0)
118 ret = -1;
119 mutex_unlock(&dev->struct_mutex); 246 mutex_unlock(&dev->struct_mutex);
120 247
121 drm_irq_install(dev); 248 drm_irq_install(dev);
122 } 249
123 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
124 /* Resume the modeset for every activated CRTC */ 250 /* Resume the modeset for every activated CRTC */
125 drm_helper_resume_force_mode(dev); 251 drm_helper_resume_force_mode(dev);
126 } 252 }
127 253
128 dev_priv->modeset_on_lid = 0; 254 dev_priv->modeset_on_lid = 0;
129 255
130 return ret; 256 return error;
257}
258
259static int i915_resume(struct drm_device *dev)
260{
261 if (pci_enable_device(dev->pdev))
262 return -EIO;
263
264 pci_set_master(dev->pdev);
265
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev);
131} 271}
132 272
133/** 273/**
@@ -268,22 +408,80 @@ i915_pci_remove(struct pci_dev *pdev)
268 drm_put_dev(dev); 408 drm_put_dev(dev);
269} 409}
270 410
271static int 411static int i915_pm_suspend(struct device *dev)
272i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
273{ 412{
274 struct drm_device *dev = pci_get_drvdata(pdev); 413 struct pci_dev *pdev = to_pci_dev(dev);
414 struct drm_device *drm_dev = pci_get_drvdata(pdev);
415 int error;
275 416
276 return i915_suspend(dev, state); 417 if (!drm_dev || !drm_dev->dev_private) {
418 dev_err(dev, "DRM not initialized, aborting suspend.\n");
419 return -ENODEV;
420 }
421
422 error = i915_drm_freeze(drm_dev);
423 if (error)
424 return error;
425
426 i915_drm_suspend(drm_dev);
427
428 pci_disable_device(pdev);
429 pci_set_power_state(pdev, PCI_D3hot);
430
431 return 0;
277} 432}
278 433
279static int 434static int i915_pm_resume(struct device *dev)
280i915_pci_resume(struct pci_dev *pdev)
281{ 435{
282 struct drm_device *dev = pci_get_drvdata(pdev); 436 struct pci_dev *pdev = to_pci_dev(dev);
437 struct drm_device *drm_dev = pci_get_drvdata(pdev);
438
439 return i915_resume(drm_dev);
440}
441
442static int i915_pm_freeze(struct device *dev)
443{
444 struct pci_dev *pdev = to_pci_dev(dev);
445 struct drm_device *drm_dev = pci_get_drvdata(pdev);
446
447 if (!drm_dev || !drm_dev->dev_private) {
448 dev_err(dev, "DRM not initialized, aborting suspend.\n");
449 return -ENODEV;
450 }
283 451
284 return i915_resume(dev); 452 return i915_drm_freeze(drm_dev);
285} 453}
286 454
455static int i915_pm_thaw(struct device *dev)
456{
457 struct pci_dev *pdev = to_pci_dev(dev);
458 struct drm_device *drm_dev = pci_get_drvdata(pdev);
459
460 return i915_drm_thaw(drm_dev);
461}
462
463static int i915_pm_poweroff(struct device *dev)
464{
465 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472
473 return error;
474}
475
476const struct dev_pm_ops i915_pm_ops = {
477 .suspend = i915_pm_suspend,
478 .resume = i915_pm_resume,
479 .freeze = i915_pm_freeze,
480 .thaw = i915_pm_thaw,
481 .poweroff = i915_pm_poweroff,
482 .restore = i915_pm_resume,
483};
484
287static struct vm_operations_struct i915_gem_vm_ops = { 485static struct vm_operations_struct i915_gem_vm_ops = {
288 .fault = i915_gem_fault, 486 .fault = i915_gem_fault,
289 .open = drm_gem_vm_open, 487 .open = drm_gem_vm_open,
@@ -303,8 +501,11 @@ static struct drm_driver driver = {
303 .lastclose = i915_driver_lastclose, 501 .lastclose = i915_driver_lastclose,
304 .preclose = i915_driver_preclose, 502 .preclose = i915_driver_preclose,
305 .postclose = i915_driver_postclose, 503 .postclose = i915_driver_postclose,
504
505 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
306 .suspend = i915_suspend, 506 .suspend = i915_suspend,
307 .resume = i915_resume, 507 .resume = i915_resume,
508
308 .device_is_agp = i915_driver_device_is_agp, 509 .device_is_agp = i915_driver_device_is_agp,
309 .enable_vblank = i915_enable_vblank, 510 .enable_vblank = i915_enable_vblank,
310 .disable_vblank = i915_disable_vblank, 511 .disable_vblank = i915_disable_vblank,
@@ -344,10 +545,7 @@ static struct drm_driver driver = {
344 .id_table = pciidlist, 545 .id_table = pciidlist,
345 .probe = i915_pci_probe, 546 .probe = i915_pci_probe,
346 .remove = i915_pci_remove, 547 .remove = i915_pci_remove,
347#ifdef CONFIG_PM 548 .driver.pm = &i915_pm_ops,
348 .resume = i915_pci_resume,
349 .suspend = i915_pci_suspend,
350#endif
351 }, 549 },
352 550
353 .name = DRIVER_NAME, 551 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac72f5bb..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
172 172
173struct intel_overlay; 173struct intel_overlay;
174 174
175struct intel_device_info {
176 u8 is_mobile : 1;
177 u8 is_i8xx : 1;
178 u8 is_i915g : 1;
179 u8 is_i9xx : 1;
180 u8 is_i945gm : 1;
181 u8 is_i965g : 1;
182 u8 is_i965gm : 1;
183 u8 is_g33 : 1;
184 u8 need_gfx_hws : 1;
185 u8 is_g4x : 1;
186 u8 is_pineview : 1;
187 u8 is_ironlake : 1;
188 u8 has_fbc : 1;
189 u8 has_rc6 : 1;
190 u8 has_pipe_cxsr : 1;
191 u8 has_hotplug : 1;
192 u8 cursor_needs_physical : 1;
193};
194
175typedef struct drm_i915_private { 195typedef struct drm_i915_private {
176 struct drm_device *dev; 196 struct drm_device *dev;
177 197
198 const struct intel_device_info *info;
199
178 int has_gem; 200 int has_gem;
179 201
180 void __iomem *regs; 202 void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
232 int hangcheck_count; 254 int hangcheck_count;
233 uint32_t last_acthd; 255 uint32_t last_acthd;
234 256
235 bool cursor_needs_physical;
236
237 struct drm_mm vram; 257 struct drm_mm vram;
238 258
239 unsigned long cfb_size; 259 unsigned long cfb_size;
@@ -263,6 +283,7 @@ typedef struct drm_i915_private {
263 unsigned int lvds_use_ssc:1; 283 unsigned int lvds_use_ssc:1;
264 unsigned int edp_support:1; 284 unsigned int edp_support:1;
265 int lvds_ssc_freq; 285 int lvds_ssc_freq;
286 int edp_bpp;
266 287
267 struct notifier_block lid_notifier; 288 struct notifier_block lid_notifier;
268 289
@@ -287,8 +308,6 @@ typedef struct drm_i915_private {
287 u32 saveDSPACNTR; 308 u32 saveDSPACNTR;
288 u32 saveDSPBCNTR; 309 u32 saveDSPBCNTR;
289 u32 saveDSPARB; 310 u32 saveDSPARB;
290 u32 saveRENDERSTANDBY;
291 u32 savePWRCTXA;
292 u32 saveHWS; 311 u32 saveHWS;
293 u32 savePIPEACONF; 312 u32 savePIPEACONF;
294 u32 savePIPEBCONF; 313 u32 savePIPEBCONF;
@@ -474,6 +493,15 @@ typedef struct drm_i915_private {
474 struct list_head flushing_list; 493 struct list_head flushing_list;
475 494
476 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
477 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
478 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
479 * 507 *
@@ -561,6 +589,7 @@ typedef struct drm_i915_private {
561 u16 orig_clock; 589 u16 orig_clock;
562 int child_dev_num; 590 int child_dev_num;
563 struct child_device_config *child_dev; 591 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector;
564} drm_i915_private_t; 593} drm_i915_private_t;
565 594
566/** driver private structure attached to each drm_gem_object */ 595/** driver private structure attached to each drm_gem_object */
@@ -572,6 +601,8 @@ struct drm_i915_gem_object {
572 601
573 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
574 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
575 606
576 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
577 struct list_head fence_list; 608 struct list_head fence_list;
@@ -703,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
703extern int i915_max_ioctl; 734extern int i915_max_ioctl;
704extern unsigned int i915_fbpercrtc; 735extern unsigned int i915_fbpercrtc;
705extern unsigned int i915_powersave; 736extern unsigned int i915_powersave;
737extern unsigned int i915_lvds_downclock;
706 738
707extern void i915_save_display(struct drm_device *dev); 739extern void i915_save_display(struct drm_device *dev);
708extern void i915_restore_display(struct drm_device *dev); 740extern void i915_restore_display(struct drm_device *dev);
@@ -794,6 +826,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
794 struct drm_file *file_priv); 826 struct drm_file *file_priv);
795int i915_gem_execbuffer(struct drm_device *dev, void *data, 827int i915_gem_execbuffer(struct drm_device *dev, void *data,
796 struct drm_file *file_priv); 828 struct drm_file *file_priv);
829int i915_gem_execbuffer2(struct drm_device *dev, void *data,
830 struct drm_file *file_priv);
797int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 831int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file_priv); 832 struct drm_file *file_priv);
799int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 833int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -843,12 +877,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
843int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 877int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
844int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 878int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
845 int write); 879 int write);
880int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
846int i915_gem_attach_phys_object(struct drm_device *dev, 881int i915_gem_attach_phys_object(struct drm_device *dev,
847 struct drm_gem_object *obj, int id); 882 struct drm_gem_object *obj, int id);
848void i915_gem_detach_phys_object(struct drm_device *dev, 883void i915_gem_detach_phys_object(struct drm_device *dev,
849 struct drm_gem_object *obj); 884 struct drm_gem_object *obj);
850void i915_gem_free_all_phys_object(struct drm_device *dev); 885void i915_gem_free_all_phys_object(struct drm_device *dev);
851int i915_gem_object_get_pages(struct drm_gem_object *obj); 886int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
852void i915_gem_object_put_pages(struct drm_gem_object *obj); 887void i915_gem_object_put_pages(struct drm_gem_object *obj);
853void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 888void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
854void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 889void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
@@ -860,6 +895,9 @@ void i915_gem_shrinker_exit(void);
860void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 895void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
861void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 896void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
862void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
863 901
864/* i915_gem_debug.c */ 902/* i915_gem_debug.c */
865void i915_gem_dump_object(struct drm_gem_object *obj, int len, 903void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1020,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
982extern int i915_wrap_ring(struct drm_device * dev); 1020extern int i915_wrap_ring(struct drm_device * dev);
983extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 1021extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
984 1022
985#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1023#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
986#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1024
987#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1025#define IS_I830(dev) ((dev)->pci_device == 0x3577)
988#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1026#define IS_845G(dev) ((dev)->pci_device == 0x2562)
989#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) 1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
990 1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
991#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
992#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
993#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
994#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
995 (dev)->pci_device == 0x27AE) 1033#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
996#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 1034#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
997 (dev)->pci_device == 0x2982 || \ 1035#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
998 (dev)->pci_device == 0x2992 || \ 1036#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
999 (dev)->pci_device == 0x29A2 || \ 1037#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1000 (dev)->pci_device == 0x2A02 || \ 1038#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1001 (dev)->pci_device == 0x2A12 || \ 1039#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1002 (dev)->pci_device == 0x2A42 || \ 1040#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1003 (dev)->pci_device == 0x2E02 || \ 1041#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1004 (dev)->pci_device == 0x2E12 || \
1005 (dev)->pci_device == 0x2E22 || \
1006 (dev)->pci_device == 0x2E32 || \
1007 (dev)->pci_device == 0x2E42 || \
1008 (dev)->pci_device == 0x0042 || \
1009 (dev)->pci_device == 0x0046)
1010
1011#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
1012 (dev)->pci_device == 0x2A12)
1013
1014#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1015
1016#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
1017 (dev)->pci_device == 0x2E12 || \
1018 (dev)->pci_device == 0x2E22 || \
1019 (dev)->pci_device == 0x2E32 || \
1020 (dev)->pci_device == 0x2E42 || \
1021 IS_GM45(dev))
1022
1023#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1024#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1025#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
1026
1027#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
1028 (dev)->pci_device == 0x29B2 || \
1029 (dev)->pci_device == 0x29D2 || \
1030 (IS_PINEVIEW(dev)))
1031
1032#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1042#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1033#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1043#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1034#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) 1044#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1035 1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1036#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1037 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1038 IS_IRONLAKE(dev))
1039 1047
1040#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1041 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1042 IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
1043 1049
1044#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
1045 IS_IRONLAKE(dev))
1046/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1047 * rows, which changed the alignment requirements and fence programming. 1051 * rows, which changed the alignment requirements and fence programming.
1048 */ 1052 */
@@ -1054,17 +1058,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1054#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1058#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1055#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1059#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1056 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1060 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1057#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1061#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1058/* dsparb controlled by hw only */ 1062/* dsparb controlled by hw only */
1059#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1063#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1060 1064
1061#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1065#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1062#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1066#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1063#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1064 (IS_I9XX(dev) || IS_GM45(dev)) && \ 1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1065 !IS_PINEVIEW(dev) && \
1066 !IS_IRONLAKE(dev))
1067#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
1068 1069
1069#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1070#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1070 1071
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf2050a..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1568 else 1552 else
1569 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1571 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1572 if (obj_priv->active) { 1558 if (obj_priv->active) {
1573 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1638 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1639 1625
1640 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1642 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1643 1630
1644 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1646 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1647 1634
1648 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1649 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1650 1638
1651 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2021,9 +2009,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2021 /* blow away mappings if mapped through GTT */ 2009 /* blow away mappings if mapped through GTT */
2022 i915_gem_release_mmap(obj); 2010 i915_gem_release_mmap(obj);
2023 2011
2024 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2025 i915_gem_clear_fence_reg(obj);
2026
2027 /* Move the object to the CPU domain to ensure that 2012 /* Move the object to the CPU domain to ensure that
2028 * any possible CPU writes while it's not in the GTT 2013 * any possible CPU writes while it's not in the GTT
2029 * are flushed when we go to remap it. This will 2014 * are flushed when we go to remap it. This will
@@ -2039,6 +2024,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2039 2024
2040 BUG_ON(obj_priv->active); 2025 BUG_ON(obj_priv->active);
2041 2026
2027 /* release the fence reg _after_ flushing */
2028 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2029 i915_gem_clear_fence_reg(obj);
2030
2042 if (obj_priv->agp_mem != NULL) { 2031 if (obj_priv->agp_mem != NULL) {
2043 drm_unbind_agp(obj_priv->agp_mem); 2032 drm_unbind_agp(obj_priv->agp_mem);
2044 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2033 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2099,8 +2088,8 @@ static int
2099i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2100{ 2089{
2101 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2102 uint32_t seqno;
2103 int ret; 2091 int ret;
2092 uint32_t seqno;
2104 bool lists_empty; 2093 bool lists_empty;
2105 2094
2106 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2122,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2122 if (ret) 2111 if (ret)
2123 return ret; 2112 return ret;
2124 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2125 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2126 if (ret) 2117 if (ret)
2127 return ret; 2118 return ret;
@@ -2229,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2229} 2220}
2230 2221
2231int 2222int
2232i915_gem_object_get_pages(struct drm_gem_object *obj) 2223i915_gem_object_get_pages(struct drm_gem_object *obj,
2224 gfp_t gfpmask)
2233{ 2225{
2234 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2226 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2235 int page_count, i; 2227 int page_count, i;
@@ -2255,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2255 inode = obj->filp->f_path.dentry->d_inode; 2247 inode = obj->filp->f_path.dentry->d_inode;
2256 mapping = inode->i_mapping; 2248 mapping = inode->i_mapping;
2257 for (i = 0; i < page_count; i++) { 2249 for (i = 0; i < page_count; i++) {
2258 page = read_mapping_page(mapping, i, NULL); 2250 page = read_cache_page_gfp(mapping, i,
2251 mapping_gfp_mask (mapping) |
2252 __GFP_COLD |
2253 gfpmask);
2259 if (IS_ERR(page)) { 2254 if (IS_ERR(page)) {
2260 ret = PTR_ERR(page); 2255 ret = PTR_ERR(page);
2261 i915_gem_object_put_pages(obj); 2256 i915_gem_object_put_pages(obj);
@@ -2578,12 +2573,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2578 drm_i915_private_t *dev_priv = dev->dev_private; 2573 drm_i915_private_t *dev_priv = dev->dev_private;
2579 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2574 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2580 struct drm_mm_node *free_space; 2575 struct drm_mm_node *free_space;
2581 bool retry_alloc = false; 2576 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2582 int ret; 2577 int ret;
2583 2578
2584 if (dev_priv->mm.suspended)
2585 return -EBUSY;
2586
2587 if (obj_priv->madv != I915_MADV_WILLNEED) { 2579 if (obj_priv->madv != I915_MADV_WILLNEED) {
2588 DRM_ERROR("Attempting to bind a purgeable object\n"); 2580 DRM_ERROR("Attempting to bind a purgeable object\n");
2589 return -EINVAL; 2581 return -EINVAL;
@@ -2625,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2625 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2617 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2626 obj->size, obj_priv->gtt_offset); 2618 obj->size, obj_priv->gtt_offset);
2627#endif 2619#endif
2628 if (retry_alloc) { 2620 ret = i915_gem_object_get_pages(obj, gfpmask);
2629 i915_gem_object_set_page_gfp_mask (obj,
2630 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2631 }
2632 ret = i915_gem_object_get_pages(obj);
2633 if (retry_alloc) {
2634 i915_gem_object_set_page_gfp_mask (obj,
2635 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2636 }
2637 if (ret) { 2621 if (ret) {
2638 drm_mm_put_block(obj_priv->gtt_space); 2622 drm_mm_put_block(obj_priv->gtt_space);
2639 obj_priv->gtt_space = NULL; 2623 obj_priv->gtt_space = NULL;
@@ -2643,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2643 ret = i915_gem_evict_something(dev, obj->size); 2627 ret = i915_gem_evict_something(dev, obj->size);
2644 if (ret) { 2628 if (ret) {
2645 /* now try to shrink everyone else */ 2629 /* now try to shrink everyone else */
2646 if (! retry_alloc) { 2630 if (gfpmask) {
2647 retry_alloc = true; 2631 gfpmask = 0;
2648 goto search_free; 2632 goto search_free;
2649 } 2633 }
2650 2634
2651 return ret; 2635 return ret;
@@ -2723,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2723 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2724 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2725 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2726 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2727 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2728 2712
2729 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -2839,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2839 return 0; 2823 return 0;
2840} 2824}
2841 2825
2826/*
2827 * Prepare buffer for display plane. Use uninterruptible for possible flush
2828 * wait, as in modesetting process we're not supposed to be interrupted.
2829 */
2830int
2831i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2832{
2833 struct drm_device *dev = obj->dev;
2834 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2835 uint32_t old_write_domain, old_read_domains;
2836 int ret;
2837
2838 /* Not valid to be called on unbound objects. */
2839 if (obj_priv->gtt_space == NULL)
2840 return -EINVAL;
2841
2842 i915_gem_object_flush_gpu_write_domain(obj);
2843
2844 /* Wait on any GPU rendering and flushing to occur. */
2845 if (obj_priv->active) {
2846#if WATCH_BUF
2847 DRM_INFO("%s: object %p wait for seqno %08x\n",
2848 __func__, obj, obj_priv->last_rendering_seqno);
2849#endif
2850 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2851 if (ret != 0)
2852 return ret;
2853 }
2854
2855 old_write_domain = obj->write_domain;
2856 old_read_domains = obj->read_domains;
2857
2858 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2859
2860 i915_gem_object_flush_cpu_write_domain(obj);
2861
2862 /* It should now be out of any other write domains, and we can update
2863 * the domain values for our changes.
2864 */
2865 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2866 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2867 obj->write_domain = I915_GEM_DOMAIN_GTT;
2868 obj_priv->dirty = 1;
2869
2870 trace_i915_gem_object_change_domain(obj,
2871 old_read_domains,
2872 old_write_domain);
2873
2874 return 0;
2875}
2876
2842/** 2877/**
2843 * Moves a single object to the CPU read, and possibly write domain. 2878 * Moves a single object to the CPU read, and possibly write domain.
2844 * 2879 *
@@ -3198,7 +3233,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3198static int 3233static int
3199i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3234i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3200 struct drm_file *file_priv, 3235 struct drm_file *file_priv,
3201 struct drm_i915_gem_exec_object *entry, 3236 struct drm_i915_gem_exec_object2 *entry,
3202 struct drm_i915_gem_relocation_entry *relocs) 3237 struct drm_i915_gem_relocation_entry *relocs)
3203{ 3238{
3204 struct drm_device *dev = obj->dev; 3239 struct drm_device *dev = obj->dev;
@@ -3206,12 +3241,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3206 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3241 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3207 int i, ret; 3242 int i, ret;
3208 void __iomem *reloc_page; 3243 void __iomem *reloc_page;
3244 bool need_fence;
3245
3246 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3247 obj_priv->tiling_mode != I915_TILING_NONE;
3248
3249 /* Check fence reg constraints and rebind if necessary */
3250 if (need_fence && !i915_obj_fenceable(dev, obj))
3251 i915_gem_object_unbind(obj);
3209 3252
3210 /* Choose the GTT offset for our buffer and put it there. */ 3253 /* Choose the GTT offset for our buffer and put it there. */
3211 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3254 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3212 if (ret) 3255 if (ret)
3213 return ret; 3256 return ret;
3214 3257
3258 /*
3259 * Pre-965 chips need a fence register set up in order to
3260 * properly handle blits to/from tiled surfaces.
3261 */
3262 if (need_fence) {
3263 ret = i915_gem_object_get_fence_reg(obj);
3264 if (ret != 0) {
3265 if (ret != -EBUSY && ret != -ERESTARTSYS)
3266 DRM_ERROR("Failure to install fence: %d\n",
3267 ret);
3268 i915_gem_object_unpin(obj);
3269 return ret;
3270 }
3271 }
3272
3215 entry->offset = obj_priv->gtt_offset; 3273 entry->offset = obj_priv->gtt_offset;
3216 3274
3217 /* Apply the relocations, using the GTT aperture to avoid cache 3275 /* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3431,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3373 */ 3431 */
3374static int 3432static int
3375i915_dispatch_gem_execbuffer(struct drm_device *dev, 3433i915_dispatch_gem_execbuffer(struct drm_device *dev,
3376 struct drm_i915_gem_execbuffer *exec, 3434 struct drm_i915_gem_execbuffer2 *exec,
3377 struct drm_clip_rect *cliprects, 3435 struct drm_clip_rect *cliprects,
3378 uint64_t exec_offset) 3436 uint64_t exec_offset)
3379{ 3437{
@@ -3463,7 +3521,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3463} 3521}
3464 3522
3465static int 3523static int
3466i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, 3524i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3467 uint32_t buffer_count, 3525 uint32_t buffer_count,
3468 struct drm_i915_gem_relocation_entry **relocs) 3526 struct drm_i915_gem_relocation_entry **relocs)
3469{ 3527{
@@ -3478,8 +3536,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3478 } 3536 }
3479 3537
3480 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3538 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3481 if (*relocs == NULL) 3539 if (*relocs == NULL) {
3540 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3482 return -ENOMEM; 3541 return -ENOMEM;
3542 }
3483 3543
3484 for (i = 0; i < buffer_count; i++) { 3544 for (i = 0; i < buffer_count; i++) {
3485 struct drm_i915_gem_relocation_entry __user *user_relocs; 3545 struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,13 +3563,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3503} 3563}
3504 3564
3505static int 3565static int
3506i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, 3566i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3507 uint32_t buffer_count, 3567 uint32_t buffer_count,
3508 struct drm_i915_gem_relocation_entry *relocs) 3568 struct drm_i915_gem_relocation_entry *relocs)
3509{ 3569{
3510 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3511 int ret = 0; 3571 int ret = 0;
3512 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3513 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3514 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3515 int unwritten; 3578 int unwritten;
@@ -3536,7 +3599,7 @@ err:
3536} 3599}
3537 3600
3538static int 3601static int
3539i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, 3602i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3540 uint64_t exec_offset) 3603 uint64_t exec_offset)
3541{ 3604{
3542 uint32_t exec_start, exec_len; 3605 uint32_t exec_start, exec_len;
@@ -3589,18 +3652,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3589} 3652}
3590 3653
3591int 3654int
3592i915_gem_execbuffer(struct drm_device *dev, void *data, 3655i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3593 struct drm_file *file_priv) 3656 struct drm_file *file_priv,
3657 struct drm_i915_gem_execbuffer2 *args,
3658 struct drm_i915_gem_exec_object2 *exec_list)
3594{ 3659{
3595 drm_i915_private_t *dev_priv = dev->dev_private; 3660 drm_i915_private_t *dev_priv = dev->dev_private;
3596 struct drm_i915_gem_execbuffer *args = data;
3597 struct drm_i915_gem_exec_object *exec_list = NULL;
3598 struct drm_gem_object **object_list = NULL; 3661 struct drm_gem_object **object_list = NULL;
3599 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3600 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3601 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3602 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3603 int ret, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3604 uint64_t exec_offset; 3667 uint64_t exec_offset;
3605 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
3606 int pin_tries, flips; 3669 int pin_tries, flips;
@@ -3614,31 +3677,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3614 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3677 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3615 return -EINVAL; 3678 return -EINVAL;
3616 } 3679 }
3617 /* Copy in the exec list from userland */
3618 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3619 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); 3680 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3620 if (exec_list == NULL || object_list == NULL) { 3681 if (object_list == NULL) {
3621 DRM_ERROR("Failed to allocate exec or object list " 3682 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3622 "for %d buffers\n",
3623 args->buffer_count); 3683 args->buffer_count);
3624 ret = -ENOMEM; 3684 ret = -ENOMEM;
3625 goto pre_mutex_err; 3685 goto pre_mutex_err;
3626 } 3686 }
3627 ret = copy_from_user(exec_list,
3628 (struct drm_i915_relocation_entry __user *)
3629 (uintptr_t) args->buffers_ptr,
3630 sizeof(*exec_list) * args->buffer_count);
3631 if (ret != 0) {
3632 DRM_ERROR("copy %d exec entries failed %d\n",
3633 args->buffer_count, ret);
3634 goto pre_mutex_err;
3635 }
3636 3687
3637 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3638 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3639 GFP_KERNEL); 3690 GFP_KERNEL);
3640 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3641 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3642 3695
3643 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3644 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3680,6 +3733,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3680 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3681 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3682 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3683 ret = -EBADF; 3738 ret = -EBADF;
3684 goto err; 3739 goto err;
3685 } 3740 }
@@ -3688,6 +3743,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3688 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3689 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3690 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3691 ret = -EBADF; 3748 ret = -EBADF;
3692 goto err; 3749 goto err;
3693 } 3750 }
@@ -3801,16 +3858,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3801 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3802 dev->invalidate_domains, 3859 dev->invalidate_domains,
3803 dev->flush_domains); 3860 dev->flush_domains);
3804 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3805 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3806 dev->flush_domains); 3863 dev->flush_domains);
3807 } 3864 }
3808 3865
3809 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3810 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3811 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3812 3870
3813 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3814 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3815 obj->read_domains, 3879 obj->read_domains,
3816 old_write_domain); 3880 old_write_domain);
@@ -3884,8 +3948,101 @@ err:
3884 3948
3885 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3886 3950
3951pre_mutex_err:
3952 /* Copy the updated relocations out regardless of current error
3953 * state. Failure to update the relocs would mean that the next
3954 * time userland calls execbuf, it would do so with presumed offset
3955 * state that didn't match the actual object state.
3956 */
3957 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3958 relocs);
3959 if (ret2 != 0) {
3960 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3961
3962 if (ret == 0)
3963 ret = ret2;
3964 }
3965
3966 drm_free_large(object_list);
3967 kfree(cliprects);
3968
3969 return ret;
3970}
3971
3972/*
3973 * Legacy execbuffer just creates an exec2 list from the original exec object
3974 * list array and passes it to the real function.
3975 */
3976int
3977i915_gem_execbuffer(struct drm_device *dev, void *data,
3978 struct drm_file *file_priv)
3979{
3980 struct drm_i915_gem_execbuffer *args = data;
3981 struct drm_i915_gem_execbuffer2 exec2;
3982 struct drm_i915_gem_exec_object *exec_list = NULL;
3983 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3984 int ret, i;
3985
3986#if WATCH_EXEC
3987 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3988 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3989#endif
3990
3991 if (args->buffer_count < 1) {
3992 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3993 return -EINVAL;
3994 }
3995
3996 /* Copy in the exec list from userland */
3997 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3998 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3999 if (exec_list == NULL || exec2_list == NULL) {
4000 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4001 args->buffer_count);
4002 drm_free_large(exec_list);
4003 drm_free_large(exec2_list);
4004 return -ENOMEM;
4005 }
4006 ret = copy_from_user(exec_list,
4007 (struct drm_i915_relocation_entry __user *)
4008 (uintptr_t) args->buffers_ptr,
4009 sizeof(*exec_list) * args->buffer_count);
4010 if (ret != 0) {
4011 DRM_ERROR("copy %d exec entries failed %d\n",
4012 args->buffer_count, ret);
4013 drm_free_large(exec_list);
4014 drm_free_large(exec2_list);
4015 return -EFAULT;
4016 }
4017
4018 for (i = 0; i < args->buffer_count; i++) {
4019 exec2_list[i].handle = exec_list[i].handle;
4020 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4021 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4022 exec2_list[i].alignment = exec_list[i].alignment;
4023 exec2_list[i].offset = exec_list[i].offset;
4024 if (!IS_I965G(dev))
4025 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4026 else
4027 exec2_list[i].flags = 0;
4028 }
4029
4030 exec2.buffers_ptr = args->buffers_ptr;
4031 exec2.buffer_count = args->buffer_count;
4032 exec2.batch_start_offset = args->batch_start_offset;
4033 exec2.batch_len = args->batch_len;
4034 exec2.DR1 = args->DR1;
4035 exec2.DR4 = args->DR4;
4036 exec2.num_cliprects = args->num_cliprects;
4037 exec2.cliprects_ptr = args->cliprects_ptr;
4038 exec2.flags = 0;
4039
4040 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3887 if (!ret) { 4041 if (!ret) {
3888 /* Copy the new buffer offsets back to the user's exec list. */ 4042 /* Copy the new buffer offsets back to the user's exec list. */
4043 for (i = 0; i < args->buffer_count; i++)
4044 exec_list[i].offset = exec2_list[i].offset;
4045 /* ... and back out to userspace */
3889 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 4046 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3890 (uintptr_t) args->buffers_ptr, 4047 (uintptr_t) args->buffers_ptr,
3891 exec_list, 4048 exec_list,
@@ -3898,25 +4055,62 @@ err:
3898 } 4055 }
3899 } 4056 }
3900 4057
3901 /* Copy the updated relocations out regardless of current error 4058 drm_free_large(exec_list);
3902 * state. Failure to update the relocs would mean that the next 4059 drm_free_large(exec2_list);
3903 * time userland calls execbuf, it would do so with presumed offset 4060 return ret;
3904 * state that didn't match the actual object state. 4061}
3905 */
3906 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3907 relocs);
3908 if (ret2 != 0) {
3909 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3910 4062
3911 if (ret == 0) 4063int
3912 ret = ret2; 4064i915_gem_execbuffer2(struct drm_device *dev, void *data,
4065 struct drm_file *file_priv)
4066{
4067 struct drm_i915_gem_execbuffer2 *args = data;
4068 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4069 int ret;
4070
4071#if WATCH_EXEC
4072 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4073 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4074#endif
4075
4076 if (args->buffer_count < 1) {
4077 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4078 return -EINVAL;
3913 } 4079 }
3914 4080
3915pre_mutex_err: 4081 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3916 drm_free_large(object_list); 4082 if (exec2_list == NULL) {
3917 drm_free_large(exec_list); 4083 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3918 kfree(cliprects); 4084 args->buffer_count);
4085 return -ENOMEM;
4086 }
4087 ret = copy_from_user(exec2_list,
4088 (struct drm_i915_relocation_entry __user *)
4089 (uintptr_t) args->buffers_ptr,
4090 sizeof(*exec2_list) * args->buffer_count);
4091 if (ret != 0) {
4092 DRM_ERROR("copy %d exec entries failed %d\n",
4093 args->buffer_count, ret);
4094 drm_free_large(exec2_list);
4095 return -EFAULT;
4096 }
4097
4098 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4099 if (!ret) {
4100 /* Copy the new buffer offsets back to the user's exec list. */
4101 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4102 (uintptr_t) args->buffers_ptr,
4103 exec2_list,
4104 sizeof(*exec2_list) * args->buffer_count);
4105 if (ret) {
4106 ret = -EFAULT;
4107 DRM_ERROR("failed to copy %d exec entries "
4108 "back to user (%d)\n",
4109 args->buffer_count, ret);
4110 }
4111 }
3919 4112
4113 drm_free_large(exec2_list);
3920 return ret; 4114 return ret;
3921} 4115}
3922 4116
@@ -3933,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3933 if (ret) 4127 if (ret)
3934 return ret; 4128 return ret;
3935 } 4129 }
3936 /* 4130
3937 * Pre-965 chips need a fence register set up in order to
3938 * properly handle tiled surfaces.
3939 */
3940 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3941 ret = i915_gem_object_get_fence_reg(obj);
3942 if (ret != 0) {
3943 if (ret != -EBUSY && ret != -ERESTARTSYS)
3944 DRM_ERROR("Failure to install fence: %d\n",
3945 ret);
3946 return ret;
3947 }
3948 }
3949 obj_priv->pin_count++; 4131 obj_priv->pin_count++;
3950 4132
3951 /* If the object is not active and not pending a flush, 4133 /* If the object is not active and not pending a flush,
@@ -4203,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4203 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4204 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4205 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4206 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4207 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4208 4391
@@ -4654,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4654 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4655 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4656 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4657 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4658 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4659 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4708,7 +4892,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4708 4892
4709 phys_obj->id = id; 4893 phys_obj->id = id;
4710 4894
4711 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); 4895 phys_obj->handle = drm_pci_alloc(dev, size, 0);
4712 if (!phys_obj->handle) { 4896 if (!phys_obj->handle) {
4713 ret = -ENOMEM; 4897 ret = -ENOMEM;
4714 goto kfree_obj; 4898 goto kfree_obj;
@@ -4766,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4766 if (!obj_priv->phys_obj) 4950 if (!obj_priv->phys_obj)
4767 return; 4951 return;
4768 4952
4769 ret = i915_gem_object_get_pages(obj); 4953 ret = i915_gem_object_get_pages(obj, 0);
4770 if (ret) 4954 if (ret)
4771 goto out; 4955 goto out;
4772 4956
@@ -4824,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4824 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5008 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4825 obj_priv->phys_obj->cur_obj = obj; 5009 obj_priv->phys_obj->cur_obj = obj;
4826 5010
4827 ret = i915_gem_object_get_pages(obj); 5011 ret = i915_gem_object_get_pages(obj, 0);
4828 if (ret) { 5012 if (ret) {
4829 DRM_ERROR("failed to get page list\n"); 5013 DRM_ERROR("failed to get page list\n");
4830 goto out; 5014 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c09bb..df278b2685bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
304 304
305 305
306/** 306/**
307 * Returns the size of the fence for a tiled object of the given size. 307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
308 */ 309 */
309static int 310bool
310i915_get_fence_size(struct drm_device *dev, int size) 311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
311{ 312{
312 int i; 313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
313 int start;
314 314
315 if (IS_I965G(dev)) { 315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */ 316 /* The 965 can have fences at any page boundary. */
317 return ALIGN(size, 4096); 317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
318 } else { 323 } else {
319 /* Align the size to a power of two greater than the smallest 324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
320 * fence size. 325 return false;
321 */ 326 }
322 if (IS_I9XX(dev))
323 start = 1024 * 1024;
324 else
325 start = 512 * 1024;
326 327
327 for (i = start; i < size; i <<= 1) 328 /* Power of two sized... */
328 ; 329 if (obj->size & (obj->size - 1))
330 return false;
329 331
330 return i; 332 /* Objects must be size aligned as well */
331 } 333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
332} 336}
333 337
334/* Check pitch constriants for all chips & tiling formats */ 338/* Check pitch constriants for all chips & tiling formats */
335static bool 339bool
336i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
337{ 341{
338 int tile_width; 342 int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
384 if (stride & (stride - 1)) 388 if (stride & (stride - 1))
385 return false; 389 return false;
386 390
387 /* We don't 0handle the aperture area covered by the fence being bigger
388 * than the object size.
389 */
390 if (i915_get_fence_size(dev, size) != size)
391 return false;
392
393 return true; 391 return true;
394} 392}
395 393
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5de97e2..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275 int ret = IRQ_NONE; 275 int ret = IRQ_NONE;
276 u32 de_iir, gt_iir, de_ier, pch_iir; 276 u32 de_iir, gt_iir, de_ier, pch_iir;
277 u32 new_de_iir, new_gt_iir, new_pch_iir;
278 struct drm_i915_master_private *master_priv; 277 struct drm_i915_master_private *master_priv;
279 278
280 /* disable master interrupt before clearing iir */ 279 /* disable master interrupt before clearing iir */
@@ -286,49 +285,58 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
286 gt_iir = I915_READ(GTIIR); 285 gt_iir = I915_READ(GTIIR);
287 pch_iir = I915_READ(SDEIIR); 286 pch_iir = I915_READ(SDEIIR);
288 287
289 for (;;) { 288 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
290 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 289 goto done;
291 break;
292 290
293 ret = IRQ_HANDLED; 291 ret = IRQ_HANDLED;
294 292
295 /* should clear PCH hotplug event before clear CPU irq */ 293 if (dev->primary->master) {
296 I915_WRITE(SDEIIR, pch_iir); 294 master_priv = dev->primary->master->driver_priv;
297 new_pch_iir = I915_READ(SDEIIR); 295 if (master_priv->sarea_priv)
296 master_priv->sarea_priv->last_dispatch =
297 READ_BREADCRUMB(dev_priv);
298 }
298 299
299 I915_WRITE(DEIIR, de_iir); 300 if (gt_iir & GT_USER_INTERRUPT) {
300 new_de_iir = I915_READ(DEIIR); 301 u32 seqno = i915_get_gem_seqno(dev);
301 I915_WRITE(GTIIR, gt_iir); 302 dev_priv->mm.irq_gem_seqno = seqno;
302 new_gt_iir = I915_READ(GTIIR); 303 trace_i915_gem_request_complete(dev, seqno);
304 DRM_WAKEUP(&dev_priv->irq_queue);
305 dev_priv->hangcheck_count = 0;
306 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
307 }
303 308
304 if (dev->primary->master) { 309 if (de_iir & DE_GSE)
305 master_priv = dev->primary->master->driver_priv; 310 ironlake_opregion_gse_intr(dev);
306 if (master_priv->sarea_priv)
307 master_priv->sarea_priv->last_dispatch =
308 READ_BREADCRUMB(dev_priv);
309 }
310 311
311 if (gt_iir & GT_USER_INTERRUPT) { 312 if (de_iir & DE_PLANEA_FLIP_DONE) {
312 u32 seqno = i915_get_gem_seqno(dev); 313 intel_prepare_page_flip(dev, 0);
313 dev_priv->mm.irq_gem_seqno = seqno; 314 intel_finish_page_flip(dev, 0);
314 trace_i915_gem_request_complete(dev, seqno); 315 }
315 DRM_WAKEUP(&dev_priv->irq_queue);
316 }
317 316
318 if (de_iir & DE_GSE) 317 if (de_iir & DE_PLANEB_FLIP_DONE) {
319 ironlake_opregion_gse_intr(dev); 318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
320 321
321 /* check event from PCH */ 322 if (de_iir & DE_PIPEA_VBLANK)
322 if ((de_iir & DE_PCH_EVENT) && 323 drm_handle_vblank(dev, 0);
323 (pch_iir & SDE_HOTPLUG_MASK)) {
324 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
325 }
326 324
327 de_iir = new_de_iir; 325 if (de_iir & DE_PIPEB_VBLANK)
328 gt_iir = new_gt_iir; 326 drm_handle_vblank(dev, 1);
329 pch_iir = new_pch_iir; 327
328 /* check event from PCH */
329 if ((de_iir & DE_PCH_EVENT) &&
330 (pch_iir & SDE_HOTPLUG_MASK)) {
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
330 } 332 }
331 333
334 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir);
337 I915_WRITE(DEIIR, de_iir);
338
339done:
332 I915_WRITE(DEIER, de_ier); 340 I915_WRITE(DEIER, de_ier);
333 (void)I915_READ(DEIER); 341 (void)I915_READ(DEIER);
334 342
@@ -852,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
852 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
853 return -EINVAL; 861 return -EINVAL;
854 862
855 if (IS_IRONLAKE(dev))
856 return 0;
857
858 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
859 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
860 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
861 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
862 else 870 else
@@ -874,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
874 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
875 unsigned long irqflags; 883 unsigned long irqflags;
876 884
877 if (IS_IRONLAKE(dev))
878 return;
879
880 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
881 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
882 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
883 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
884 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
885} 894}
886 895
@@ -1023,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1023{ 1032{
1024 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1025 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1026 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1027 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1028 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1029 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1030 1040
1031 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1032 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1033 1043
1034 /* should always can generate irq */ 1044 /* should always can generate irq */
1035 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
@@ -1084,6 +1094,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1084 (void) I915_READ(IER); 1094 (void) I915_READ(IER);
1085} 1095}
1086 1096
1097/*
1098 * Must be called after intel_modeset_init or hotplug interrupts won't be
1099 * enabled correctly.
1100 */
1087int i915_driver_irq_postinstall(struct drm_device *dev) 1101int i915_driver_irq_postinstall(struct drm_device *dev)
1088{ 1102{
1089 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1103 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1120,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1106 if (I915_HAS_HOTPLUG(dev)) { 1120 if (I915_HAS_HOTPLUG(dev)) {
1107 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1121 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1108 1122
1109 /* Leave other bits alone */ 1123 /* Note HDMI and DP share bits */
1110 hotplug_en |= HOTPLUG_EN_MASK; 1124 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1125 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1126 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1127 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1128 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1129 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1130 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1131 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1132 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1133 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1134 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1135 hotplug_en |= CRT_HOTPLUG_INT_EN;
1136 /* Ignore TV since it's buggy */
1137
1111 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1138 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1112 1139
1113 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1114 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1115 SDVOB_HOTPLUG_INT_STATUS;
1116 if (IS_G4X(dev)) {
1117 dev_priv->hotplug_supported_mask |=
1118 HDMIB_HOTPLUG_INT_STATUS |
1119 HDMIC_HOTPLUG_INT_STATUS |
1120 HDMID_HOTPLUG_INT_STATUS;
1121 }
1122 /* Enable in IER... */ 1140 /* Enable in IER... */
1123 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1141 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1124 /* and unmask in IMR */ 1142 /* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf70618..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
@@ -879,13 +880,6 @@
879#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 880#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
880#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 881#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
881#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 882#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
882#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
883 HDMIC_HOTPLUG_INT_EN | \
884 HDMID_HOTPLUG_INT_EN | \
885 SDVOB_HOTPLUG_INT_EN | \
886 SDVOC_HOTPLUG_INT_EN | \
887 CRT_HOTPLUG_INT_EN)
888
889 883
890#define PORT_HOTPLUG_STAT 0x61114 884#define PORT_HOTPLUG_STAT 0x61114
891#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 885#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +976,8 @@
982#define LVDS_PORT_EN (1 << 31) 976#define LVDS_PORT_EN (1 << 31)
983/* Selects pipe B for LVDS data. Must be set on pre-965. */ 977/* Selects pipe B for LVDS data. Must be set on pre-965. */
984#define LVDS_PIPEB_SELECT (1 << 30) 978#define LVDS_PIPEB_SELECT (1 << 30)
979/* LVDS dithering flag on 965/g4x platform */
980#define LVDS_ENABLE_DITHER (1 << 25)
985/* Enable border for unscaled (or aspect-scaled) display */ 981/* Enable border for unscaled (or aspect-scaled) display */
986#define LVDS_BORDER_ENABLE (1 << 15) 982#define LVDS_BORDER_ENABLE (1 << 15)
987/* 983/*
@@ -1751,6 +1747,8 @@
1751 1747
1752/* Display & cursor control */ 1748/* Display & cursor control */
1753 1749
1750/* dithering flag on Ironlake */
1751#define PIPE_ENABLE_DITHER (1 << 4)
1754/* Pipe A */ 1752/* Pipe A */
1755#define PIPEADSL 0x70000 1753#define PIPEADSL 0x70000
1756#define PIPEACONF 0x70008 1754#define PIPEACONF 0x70008
@@ -1818,7 +1816,7 @@
1818#define DSPFW_PLANEB_SHIFT 8 1816#define DSPFW_PLANEB_SHIFT 8
1819#define DSPFW2 0x70038 1817#define DSPFW2 0x70038
1820#define DSPFW_CURSORA_MASK 0x00003f00 1818#define DSPFW_CURSORA_MASK 0x00003f00
1821#define DSPFW_CURSORA_SHIFT 16 1819#define DSPFW_CURSORA_SHIFT 8
1822#define DSPFW3 0x7003c 1820#define DSPFW3 0x7003c
1823#define DSPFW_HPLL_SR_EN (1<<31) 1821#define DSPFW_HPLL_SR_EN (1<<31)
1824#define DSPFW_CURSOR_SR_SHIFT 24 1822#define DSPFW_CURSOR_SR_SHIFT 24
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00a9d49..a3b90c9561dc 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
732 732
733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
734 734
735 /* Render Standby */
736 if (I915_HAS_RC6(dev)) {
737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
738 dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
739 }
740
741 /* Hardware status page */ 735 /* Hardware status page */
742 dev_priv->saveHWS = I915_READ(HWS_PGA); 736 dev_priv->saveHWS = I915_READ(HWS_PGA);
743 737
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
793 787
794 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 788 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
795 789
796 /* Render Standby */
797 if (I915_HAS_RC6(dev)) {
798 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
799 I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
800 }
801
802 /* Hardware status page */ 790 /* Hardware status page */
803 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 791 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
804 792
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f27567747580..15fbc1b5a83e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
33#define SLAVE_ADDR1 0x70 33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72 34#define SLAVE_ADDR2 0x72
35 35
36static int panel_type;
37
36static void * 38static void *
37find_section(struct bdb_header *bdb, int section_id) 39find_section(struct bdb_header *bdb, int section_id)
38{ 40{
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
128 dev_priv->lvds_dither = lvds_options->pixel_dither; 130 dev_priv->lvds_dither = lvds_options->pixel_dither;
129 if (lvds_options->panel_type == 0xff) 131 if (lvds_options->panel_type == 0xff)
130 return; 132 return;
133 panel_type = lvds_options->panel_type;
131 134
132 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 135 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
133 if (!lvds_lfp_data) 136 if (!lvds_lfp_data)
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
197 memset(temp_mode, 0, sizeof(*temp_mode)); 200 memset(temp_mode, 0, sizeof(*temp_mode));
198 } 201 }
199 kfree(temp_mode); 202 kfree(temp_mode);
200 if (temp_downclock < panel_fixed_mode->clock) { 203 if (temp_downclock < panel_fixed_mode->clock &&
204 i915_lvds_downclock) {
201 dev_priv->lvds_downclock_avail = 1; 205 dev_priv->lvds_downclock_avail = 1;
202 dev_priv->lvds_downclock = temp_downclock; 206 dev_priv->lvds_downclock = temp_downclock;
203 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 207 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv,
405} 409}
406 410
407static void 411static void
412parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
413{
414 struct bdb_edp *edp;
415
416 edp = find_section(bdb, BDB_EDP);
417 if (!edp) {
418 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
419 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
420 assume 18bpp panel color depth.\n");
421 dev_priv->edp_bpp = 18;
422 }
423 return;
424 }
425
426 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
427 case EDP_18BPP:
428 dev_priv->edp_bpp = 18;
429 break;
430 case EDP_24BPP:
431 dev_priv->edp_bpp = 24;
432 break;
433 case EDP_30BPP:
434 dev_priv->edp_bpp = 30;
435 break;
436 }
437}
438
439static void
408parse_device_mapping(struct drm_i915_private *dev_priv, 440parse_device_mapping(struct drm_i915_private *dev_priv,
409 struct bdb_header *bdb) 441 struct bdb_header *bdb)
410{ 442{
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev)
521 parse_sdvo_device_mapping(dev_priv, bdb); 553 parse_sdvo_device_mapping(dev_priv, bdb);
522 parse_device_mapping(dev_priv, bdb); 554 parse_device_mapping(dev_priv, bdb);
523 parse_driver_features(dev_priv, bdb); 555 parse_driver_features(dev_priv, bdb);
556 parse_edp(dev_priv, bdb);
524 557
525 pci_unmap_rom(pdev, bios); 558 pci_unmap_rom(pdev, bios);
526 559
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d7f724..4c18514f6f80 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
98#define BDB_SDVO_LVDS_PNP_IDS 24 98#define BDB_SDVO_LVDS_PNP_IDS 24
99#define BDB_SDVO_LVDS_POWER_SEQ 25 99#define BDB_SDVO_LVDS_POWER_SEQ 25
100#define BDB_TV_OPTIONS 26 100#define BDB_TV_OPTIONS 26
101#define BDB_EDP 27
101#define BDB_LVDS_OPTIONS 40 102#define BDB_LVDS_OPTIONS 40
102#define BDB_LVDS_LFP_DATA_PTRS 41 103#define BDB_LVDS_LFP_DATA_PTRS 41
103#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
426 u8 custom_vbt_version; 427 u8 custom_vbt_version;
427} __attribute__((packed)); 428} __attribute__((packed));
428 429
430#define EDP_18BPP 0
431#define EDP_24BPP 1
432#define EDP_30BPP 2
433#define EDP_RATE_1_62 0
434#define EDP_RATE_2_7 1
435#define EDP_LANE_1 0
436#define EDP_LANE_2 1
437#define EDP_LANE_4 3
438#define EDP_PREEMPHASIS_NONE 0
439#define EDP_PREEMPHASIS_3_5dB 1
440#define EDP_PREEMPHASIS_6dB 2
441#define EDP_PREEMPHASIS_9_5dB 3
442#define EDP_VSWING_0_4V 0
443#define EDP_VSWING_0_6V 1
444#define EDP_VSWING_0_8V 2
445#define EDP_VSWING_1_2V 3
446
447struct edp_power_seq {
448 u16 t3;
449 u16 t7;
450 u16 t9;
451 u16 t10;
452 u16 t12;
453} __attribute__ ((packed));
454
455struct edp_link_params {
456 u8 rate:4;
457 u8 lanes:4;
458 u8 preemphasis:4;
459 u8 vswing:4;
460} __attribute__ ((packed));
461
462struct bdb_edp {
463 struct edp_power_seq power_seqs[16];
464 u32 color_depth;
465 u32 sdrrs_msa_timing_delay;
466 struct edp_link_params link_params[16];
467} __attribute__ ((packed));
468
429bool intel_init_bios(struct drm_device *dev); 469bool intel_init_bios(struct drm_device *dev);
430 470
431/* 471/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e563414..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -548,4 +551,6 @@ void intel_crt_init(struct drm_device *dev)
548 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 551 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
549 552
550 drm_sysfs_connector_add(connector); 553 drm_sysfs_connector_add(connector);
554
555 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
551} 556}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da2..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
70 intel_p2_t p2; 70 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 72 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 73};
76 74
77#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -242,38 +240,93 @@ struct intel_limit {
242#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
243#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
244#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
245#define IRONLAKE_N_MIN 1
246#define IRONLAKE_N_MAX 5
247#define IRONLAKE_M_MIN 79
248#define IRONLAKE_M_MAX 118
249#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
250#define IRONLAKE_M1_MAX 23 244#define IRONLAKE_M1_MAX 22
251#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
252#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
253#define IRONLAKE_P_SDVO_DAC_MIN 5
254#define IRONLAKE_P_SDVO_DAC_MAX 80
255#define IRONLAKE_P_LVDS_MIN 28
256#define IRONLAKE_P_LVDS_MAX 112
257#define IRONLAKE_P1_MIN 1
258#define IRONLAKE_P1_MAX 8
259#define IRONLAKE_P2_SDVO_DAC_SLOW 10
260#define IRONLAKE_P2_SDVO_DAC_FAST 5
261#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 248
249/* We have parameter ranges for different type of outputs. */
250
251/* DAC & HDMI Refclk 120Mhz */
252#define IRONLAKE_DAC_N_MIN 1
253#define IRONLAKE_DAC_N_MAX 5
254#define IRONLAKE_DAC_M_MIN 79
255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
323
265static bool 324static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 326 int target, int refclk, intel_clock_t *best_clock);
268static bool 327static bool
269intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
270 int target, int refclk, intel_clock_t *best_clock);
271static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 328intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 329 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 330
278static bool 331static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 332intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 347 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 348 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
296 .find_pll = intel_find_best_PLL, 349 .find_pll = intel_find_best_PLL,
297 .find_reduced_pll = intel_find_best_reduced_PLL,
298}; 350};
299 351
300static const intel_limit_t intel_limits_i8xx_lvds = { 352static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
309 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 361 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
310 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 362 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
311 .find_pll = intel_find_best_PLL, 363 .find_pll = intel_find_best_PLL,
312 .find_reduced_pll = intel_find_best_reduced_PLL,
313}; 364};
314 365
315static const intel_limit_t intel_limits_i9xx_sdvo = { 366static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
324 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 375 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
325 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 376 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
326 .find_pll = intel_find_best_PLL, 377 .find_pll = intel_find_best_PLL,
327 .find_reduced_pll = intel_find_best_reduced_PLL,
328}; 378};
329 379
330static const intel_limit_t intel_limits_i9xx_lvds = { 380static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
342 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 392 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
343 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 393 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
344 .find_pll = intel_find_best_PLL, 394 .find_pll = intel_find_best_PLL,
345 .find_reduced_pll = intel_find_best_reduced_PLL,
346}; 395};
347 396
348 /* below parameter and function is for G4X Chipset Family*/ 397 /* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
360 .p2_fast = G4X_P2_SDVO_FAST 409 .p2_fast = G4X_P2_SDVO_FAST
361 }, 410 },
362 .find_pll = intel_g4x_find_best_PLL, 411 .find_pll = intel_g4x_find_best_PLL,
363 .find_reduced_pll = intel_g4x_find_best_PLL,
364}; 412};
365 413
366static const intel_limit_t intel_limits_g4x_hdmi = { 414static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
377 .p2_fast = G4X_P2_HDMI_DAC_FAST 425 .p2_fast = G4X_P2_HDMI_DAC_FAST
378 }, 426 },
379 .find_pll = intel_g4x_find_best_PLL, 427 .find_pll = intel_g4x_find_best_PLL,
380 .find_reduced_pll = intel_g4x_find_best_PLL,
381}; 428};
382 429
383static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 430static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
402 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 449 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
403 }, 450 },
404 .find_pll = intel_g4x_find_best_PLL, 451 .find_pll = intel_g4x_find_best_PLL,
405 .find_reduced_pll = intel_g4x_find_best_PLL,
406}; 452};
407 453
408static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 454static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
427 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 473 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
428 }, 474 },
429 .find_pll = intel_g4x_find_best_PLL, 475 .find_pll = intel_g4x_find_best_PLL,
430 .find_reduced_pll = intel_g4x_find_best_PLL,
431}; 476};
432 477
433static const intel_limit_t intel_limits_g4x_display_port = { 478static const intel_limit_t intel_limits_g4x_display_port = {
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 510 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
466 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 511 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
467 .find_pll = intel_find_best_PLL, 512 .find_pll = intel_find_best_PLL,
468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 513};
470 514
471static const intel_limit_t intel_limits_pineview_lvds = { 515static const intel_limit_t intel_limits_pineview_lvds = {
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = {
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 525 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 526 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 528};
486 529
487static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
541 .p2_fast = IRONLAKE_DAC_P2_FAST },
542 .find_pll = intel_g4x_find_best_PLL,
543};
544
545static const intel_limit_t intel_limits_ironlake_single_lvds = {
546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
488 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
489 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
490 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
491 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
492 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
493 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
494 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
495 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
499 .find_pll = intel_ironlake_find_best_PLL, 572 .find_pll = intel_g4x_find_best_PLL,
500}; 573};
501 574
502static const intel_limit_t intel_limits_ironlake_lvds = { 575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
503 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
504 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
505 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
506 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
507 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
508 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
509 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
510 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
514 .find_pll = intel_ironlake_find_best_PLL, 587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
602 .find_pll = intel_g4x_find_best_PLL,
603};
604
605static const intel_limit_t intel_limits_ironlake_display_port = {
606 .dot = { .min = IRONLAKE_DOT_MIN,
607 .max = IRONLAKE_DOT_MAX },
608 .vco = { .min = IRONLAKE_VCO_MIN,
609 .max = IRONLAKE_VCO_MAX},
610 .n = { .min = IRONLAKE_DP_N_MIN,
611 .max = IRONLAKE_DP_N_MAX },
612 .m = { .min = IRONLAKE_DP_M_MIN,
613 .max = IRONLAKE_DP_M_MAX },
614 .m1 = { .min = IRONLAKE_M1_MIN,
615 .max = IRONLAKE_M1_MAX },
616 .m2 = { .min = IRONLAKE_M2_MIN,
617 .max = IRONLAKE_M2_MAX },
618 .p = { .min = IRONLAKE_DP_P_MIN,
619 .max = IRONLAKE_DP_P_MAX },
620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
621 .max = IRONLAKE_DP_P1_MAX},
622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
623 .p2_slow = IRONLAKE_DP_P2_SLOW,
624 .p2_fast = IRONLAKE_DP_P2_FAST },
625 .find_pll = intel_find_pll_ironlake_dp,
515}; 626};
516 627
517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
518{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
519 const intel_limit_t *limit; 632 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
521 limit = &intel_limits_ironlake_lvds; 634
635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
653 HAS_eDP)
654 limit = &intel_limits_ironlake_display_port;
522 else 655 else
523 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
524 657
525 return limit; 658 return limit;
526} 659}
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
737 return (err != target); 870 return (err != target);
738} 871}
739 872
740
741static bool
742intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
743 int target, int refclk, intel_clock_t *best_clock)
744
745{
746 struct drm_device *dev = crtc->dev;
747 intel_clock_t clock;
748 int err = target;
749 bool found = false;
750
751 memcpy(&clock, best_clock, sizeof(intel_clock_t));
752
753 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
754 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
755 /* m1 is always 0 in Pineview */
756 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
757 break;
758 for (clock.n = limit->n.min; clock.n <= limit->n.max;
759 clock.n++) {
760 int this_err;
761
762 intel_clock(dev, refclk, &clock);
763
764 if (!intel_PLL_is_valid(crtc, &clock))
765 continue;
766
767 this_err = abs(clock.dot - target);
768 if (this_err < err) {
769 *best_clock = clock;
770 err = this_err;
771 found = true;
772 }
773 }
774 }
775 }
776
777 return found;
778}
779
780static bool 873static bool
781intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 874intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
782 int target, int refclk, intel_clock_t *best_clock) 875 int target, int refclk, intel_clock_t *best_clock)
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
791 found = false; 884 found = false;
792 885
793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
794 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 887 int lvds_reg;
888
889 if (IS_IRONLAKE(dev))
890 lvds_reg = PCH_LVDS;
891 else
892 lvds_reg = LVDS;
893 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
795 LVDS_CLKB_POWER_UP) 894 LVDS_CLKB_POWER_UP)
796 clock.p2 = limit->p2.p2_fast; 895 clock.p2 = limit->p2.p2_fast;
797 else 896 else
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
839{ 938{
840 struct drm_device *dev = crtc->dev; 939 struct drm_device *dev = crtc->dev;
841 intel_clock_t clock; 940 intel_clock_t clock;
941
942 /* return directly when it is eDP */
943 if (HAS_eDP)
944 return true;
945
842 if (target < 200000) { 946 if (target < 200000) {
843 clock.n = 1; 947 clock.n = 1;
844 clock.p1 = 2; 948 clock.p1 = 2;
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857 return true; 961 return true;
858} 962}
859 963
860static bool
861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
862 int target, int refclk, intel_clock_t *best_clock)
863{
864 struct drm_device *dev = crtc->dev;
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 intel_clock_t clock;
867 int err_most = 47;
868 int err_min = 10000;
869
870 /* eDP has only 2 clock choice, no n/m/p setting */
871 if (HAS_eDP)
872 return true;
873
874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
875 return intel_find_pll_ironlake_dp(limit, crtc, target,
876 refclk, best_clock);
877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
880 LVDS_CLKB_POWER_UP)
881 clock.p2 = limit->p2.p2_fast;
882 else
883 clock.p2 = limit->p2.p2_slow;
884 } else {
885 if (target < limit->p2.dot_limit)
886 clock.p2 = limit->p2.p2_slow;
887 else
888 clock.p2 = limit->p2.p2_fast;
889 }
890
891 memset(best_clock, 0, sizeof(*best_clock));
892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 /* based on hardware requriment prefer smaller n to precision */
894 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
895 /* based on hardware requirment prefere larger m1,m2 */
896 for (clock.m1 = limit->m1.max;
897 clock.m1 >= limit->m1.min; clock.m1--) {
898 for (clock.m2 = limit->m2.max;
899 clock.m2 >= limit->m2.min; clock.m2--) {
900 int this_err;
901
902 intel_clock(dev, refclk, &clock);
903 if (!intel_PLL_is_valid(crtc, &clock))
904 continue;
905 this_err = abs((10000 - (target*10000/clock.dot)));
906 if (this_err < err_most) {
907 *best_clock = clock;
908 /* found on first matching */
909 goto out;
910 } else if (this_err < err_min) {
911 *best_clock = clock;
912 err_min = this_err;
913 }
914 }
915 }
916 }
917 }
918out:
919 return true;
920}
921
922/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 964/* DisplayPort has only two frequencies, 162MHz and 270MHz */
923static bool 965static bool
924intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 966intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
989 1031
990 /* enable it... */ 1032 /* enable it... */
991 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
992 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
993 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
994 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1282,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1282 return ret; 1326 return ret;
1283 } 1327 }
1284 1328
1285 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1329 ret = i915_gem_object_set_to_display_plane(obj);
1286 if (ret != 0) { 1330 if (ret != 0) {
1287 i915_gem_object_unpin(obj); 1331 i915_gem_object_unpin(obj);
1288 mutex_unlock(&dev->struct_mutex); 1332 mutex_unlock(&dev->struct_mutex);
@@ -1493,6 +1537,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1537 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1494 u32 temp; 1538 u32 temp;
1495 int tries = 5, j, n; 1539 int tries = 5, j, n;
1540 u32 pipe_bpc;
1541
1542 temp = I915_READ(pipeconf_reg);
1543 pipe_bpc = temp & PIPE_BPC_MASK;
1496 1544
1497 /* XXX: When our outputs are all unaware of DPMS modes other than off 1545 /* XXX: When our outputs are all unaware of DPMS modes other than off
1498 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1546 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1572,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1524 1572
1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1573 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1526 temp = I915_READ(fdi_rx_reg); 1574 temp = I915_READ(fdi_rx_reg);
1575 /*
1576 * make the BPC in FDI Rx be consistent with that in
1577 * pipeconf reg.
1578 */
1579 temp &= ~(0x7 << 16);
1580 temp |= (pipe_bpc << 11);
1527 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1581 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1528 FDI_SEL_PCDCLK | 1582 FDI_SEL_PCDCLK |
1529 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1583 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1720,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1666 1720
1667 /* enable PCH transcoder */ 1721 /* enable PCH transcoder */
1668 temp = I915_READ(transconf_reg); 1722 temp = I915_READ(transconf_reg);
1723 /*
1724 * make the BPC in transcoder be consistent with
1725 * that in pipeconf reg.
1726 */
1727 temp &= ~PIPE_BPC_MASK;
1728 temp |= pipe_bpc;
1669 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1729 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1670 I915_READ(transconf_reg); 1730 I915_READ(transconf_reg);
1671 1731
@@ -1697,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1697 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1698 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1699 1759
1760 drm_vblank_off(dev, pipe);
1700 /* Disable display plane */ 1761 /* Disable display plane */
1701 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1702 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -1745,6 +1806,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1745 I915_READ(fdi_tx_reg); 1806 I915_READ(fdi_tx_reg);
1746 1807
1747 temp = I915_READ(fdi_rx_reg); 1808 temp = I915_READ(fdi_rx_reg);
1809 /* BPC in FDI rx is consistent with that in pipeconf */
1810 temp &= ~(0x07 << 16);
1811 temp |= (pipe_bpc << 11);
1748 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1812 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1749 I915_READ(fdi_rx_reg); 1813 I915_READ(fdi_rx_reg);
1750 1814
@@ -1789,7 +1853,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1789 } 1853 }
1790 } 1854 }
1791 } 1855 }
1792 1856 temp = I915_READ(transconf_reg);
1857 /* BPC in transcoder is consistent with that in pipeconf */
1858 temp &= ~PIPE_BPC_MASK;
1859 temp |= pipe_bpc;
1860 I915_WRITE(transconf_reg, temp);
1861 I915_READ(transconf_reg);
1793 udelay(100); 1862 udelay(100);
1794 1863
1795 /* disable PCH DPLL */ 1864 /* disable PCH DPLL */
@@ -2448,7 +2517,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2448 * A value of 5us seems to be a good balance; safe for very low end 2517 * A value of 5us seems to be a good balance; safe for very low end
2449 * platforms but not overly aggressive on lower latency configs. 2518 * platforms but not overly aggressive on lower latency configs.
2450 */ 2519 */
2451const static int latency_ns = 5000; 2520static const int latency_ns = 5000;
2452 2521
2453static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2522static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2454{ 2523{
@@ -2559,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2559 /* Calc sr entries for one plane configs */ 2628 /* Calc sr entries for one plane configs */
2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2629 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2561 /* self-refresh has much higher latency */ 2630 /* self-refresh has much higher latency */
2562 const static int sr_latency_ns = 12000; 2631 static const int sr_latency_ns = 12000;
2563 2632
2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2633 sr_clock = planea_clock ? planea_clock : planeb_clock;
2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2634 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2570,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2570 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2571 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2572 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2573 } 2646 }
2574 2647
2575 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2598,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2598 /* Calc sr entries for one plane configs */ 2671 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2672 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */ 2673 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000; 2674 static const int sr_latency_ns = 12000;
2602 2675
2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2676 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2677 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2613,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2613 srwm = 1; 2686 srwm = 1;
2614 srwm &= 0x3f; 2687 srwm &= 0x3f;
2615 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2616 } 2693 }
2617 2694
2618 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2667,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2744 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2668 (!planea_clock || !planeb_clock)) { 2745 (!planea_clock || !planeb_clock)) {
2669 /* self-refresh has much higher latency */ 2746 /* self-refresh has much higher latency */
2670 const static int sr_latency_ns = 6000; 2747 static const int sr_latency_ns = 6000;
2671 2748
2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2749 sr_clock = planea_clock ? planea_clock : planeb_clock;
2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2750 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2681,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2681 if (srwm < 0) 2758 if (srwm < 0)
2682 srwm = 1; 2759 srwm = 1;
2683 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2684 } 2765 }
2685 2766
2686 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2906,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2906 return -EINVAL; 2987 return -EINVAL;
2907 } 2988 }
2908 2989
2909 if (is_lvds && limit->find_reduced_pll && 2990 if (is_lvds && dev_priv->lvds_downclock_avail) {
2910 dev_priv->lvds_downclock_avail) { 2991 has_reduced_clock = limit->find_pll(limit, crtc,
2911 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2912 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2913 dev_priv->lvds_downclock, 2992 dev_priv->lvds_downclock,
2914 refclk, 2993 refclk,
2915 &reduced_clock); 2994 &reduced_clock);
@@ -2969,6 +3048,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2969 3048
2970 /* determine panel color depth */ 3049 /* determine panel color depth */
2971 temp = I915_READ(pipeconf_reg); 3050 temp = I915_READ(pipeconf_reg);
3051 temp &= ~PIPE_BPC_MASK;
3052 if (is_lvds) {
3053 int lvds_reg = I915_READ(PCH_LVDS);
3054 /* the BPC will be 6 if it is 18-bit LVDS panel */
3055 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3056 temp |= PIPE_8BPC;
3057 else
3058 temp |= PIPE_6BPC;
3059 } else if (is_edp) {
3060 switch (dev_priv->edp_bpp/3) {
3061 case 8:
3062 temp |= PIPE_8BPC;
3063 break;
3064 case 10:
3065 temp |= PIPE_10BPC;
3066 break;
3067 case 6:
3068 temp |= PIPE_6BPC;
3069 break;
3070 case 12:
3071 temp |= PIPE_12BPC;
3072 break;
3073 }
3074 } else
3075 temp |= PIPE_8BPC;
3076 I915_WRITE(pipeconf_reg, temp);
3077 I915_READ(pipeconf_reg);
2972 3078
2973 switch (temp & PIPE_BPC_MASK) { 3079 switch (temp & PIPE_BPC_MASK) {
2974 case PIPE_8BPC: 3080 case PIPE_8BPC:
@@ -3195,7 +3301,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3195 * appropriately here, but we need to look more thoroughly into how 3301 * appropriately here, but we need to look more thoroughly into how
3196 * panels behave in the two modes. 3302 * panels behave in the two modes.
3197 */ 3303 */
3198 3304 /* set the dithering flag */
3305 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER;
3309 else
3310 lvds |= LVDS_ENABLE_DITHER;
3311 } else {
3312 if (IS_IRONLAKE(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else
3315 lvds &= ~LVDS_ENABLE_DITHER;
3316 }
3317 }
3199 I915_WRITE(lvds_reg, lvds); 3318 I915_WRITE(lvds_reg, lvds);
3200 I915_READ(lvds_reg); 3319 I915_READ(lvds_reg);
3201 } 3320 }
@@ -3385,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3385 3504
3386 /* we only need to pin inside GTT if cursor is non-phy */ 3505 /* we only need to pin inside GTT if cursor is non-phy */
3387 mutex_lock(&dev->struct_mutex); 3506 mutex_lock(&dev->struct_mutex);
3388 if (!dev_priv->cursor_needs_physical) { 3507 if (!dev_priv->info->cursor_needs_physical) {
3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3508 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3390 if (ret) { 3509 if (ret) {
3391 DRM_ERROR("failed to pin cursor bo\n"); 3510 DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3420 I915_WRITE(base, addr); 3539 I915_WRITE(base, addr);
3421 3540
3422 if (intel_crtc->cursor_bo) { 3541 if (intel_crtc->cursor_bo) {
3423 if (dev_priv->cursor_needs_physical) { 3542 if (dev_priv->info->cursor_needs_physical) {
3424 if (intel_crtc->cursor_bo != bo) 3543 if (intel_crtc->cursor_bo != bo)
3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3544 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3426 } else 3545 } else
@@ -3779,125 +3898,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3898 queue_work(dev_priv->wq, &dev_priv->idle_work);
3780} 3899}
3781 3900
3782void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3783{
3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785
3786 if (IS_IRONLAKE(dev))
3787 return;
3788
3789 if (!dev_priv->render_reclock_avail) {
3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3791 return;
3792 }
3793
3794 /* Restore render clock frequency to original value */
3795 if (IS_G4X(dev) || IS_I9XX(dev))
3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3797 else if (IS_I85X(dev))
3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3800
3801 /* Schedule downclock */
3802 if (schedule)
3803 mod_timer(&dev_priv->idle_timer, jiffies +
3804 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3805}
3806
3807void intel_decrease_renderclock(struct drm_device *dev)
3808{
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3810
3811 if (IS_IRONLAKE(dev))
3812 return;
3813
3814 if (!dev_priv->render_reclock_avail) {
3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3816 return;
3817 }
3818
3819 if (IS_G4X(dev)) {
3820 u16 gcfgc;
3821
3822 /* Adjust render clock... */
3823 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3824
3825 /* Down to minimum... */
3826 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3827 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3828
3829 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3830 } else if (IS_I965G(dev)) {
3831 u16 gcfgc;
3832
3833 /* Adjust render clock... */
3834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3835
3836 /* Down to minimum... */
3837 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3838 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3839
3840 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3841 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3842 u16 gcfgc;
3843
3844 /* Adjust render clock... */
3845 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3846
3847 /* Down to minimum... */
3848 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3849 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3850
3851 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3852 } else if (IS_I915G(dev)) {
3853 u16 gcfgc;
3854
3855 /* Adjust render clock... */
3856 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3857
3858 /* Down to minimum... */
3859 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3860 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3861
3862 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3863 } else if (IS_I85X(dev)) {
3864 u16 hpllcc;
3865
3866 /* Adjust render clock... */
3867 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3868
3869 /* Up to maximum... */
3870 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3871 hpllcc |= GC_CLOCK_133_200;
3872
3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3874 }
3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3876}
3877
3878/* Note that no increase function is needed for this - increase_renderclock()
3879 * will also rewrite these bits
3880 */
3881void intel_decrease_displayclock(struct drm_device *dev)
3882{
3883 if (IS_IRONLAKE(dev))
3884 return;
3885
3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3887 IS_I915GM(dev)) {
3888 u16 gcfgc;
3889
3890 /* Adjust render clock... */
3891 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3892
3893 /* Down to minimum... */
3894 gcfgc &= ~0xf0;
3895 gcfgc |= 0x80;
3896
3897 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3898 }
3899}
3900
3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3902 3902
3903static void intel_crtc_idle_timer(unsigned long arg) 3903static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +4011,6 @@ static void intel_idle_update(struct work_struct *work)
4011 4011
4012 mutex_lock(&dev->struct_mutex); 4012 mutex_lock(&dev->struct_mutex);
4013 4013
4014 /* GPU isn't processing, downclock it. */
4015 if (!dev_priv->busy) {
4016 intel_decrease_renderclock(dev);
4017 intel_decrease_displayclock(dev);
4018 }
4019
4020 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4021 /* Skip inactive CRTCs */ 4015 /* Skip inactive CRTCs */
4022 if (!crtc->fb) 4016 if (!crtc->fb)
@@ -4050,13 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4044 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4051 return; 4045 return;
4052 4046
4053 if (!dev_priv->busy) { 4047 if (!dev_priv->busy)
4054 dev_priv->busy = true; 4048 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true); 4049 else
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies + 4050 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4051 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
4060 4052
4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4053 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4062 if (!crtc->fb) 4054 if (!crtc->fb)
@@ -4089,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4089struct intel_unpin_work { 4081struct intel_unpin_work {
4090 struct work_struct work; 4082 struct work_struct work;
4091 struct drm_device *dev; 4083 struct drm_device *dev;
4092 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
4093 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
4094 int pending; 4087 int pending;
4095}; 4088};
@@ -4100,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4100 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
4101 4094
4102 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
4103 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
4104 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
4105 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
4106 kfree(work); 4100 kfree(work);
4107} 4101}
@@ -4124,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4124 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
4125 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
4126 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
4127 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
4128 return; 4128 return;
4129 } 4129 }
@@ -4144,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4144 4144
4145 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4146 4146
4147 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4148 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4149 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4150 schedule_work(&work->work); 4153 schedule_work(&work->work);
4151} 4154}
@@ -4158,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4158 unsigned long flags; 4161 unsigned long flags;
4159 4162
4160 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4161 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4162 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4163 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4164} 4170}
4165 4171
@@ -4175,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4176 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4177 unsigned long flags; 4183 unsigned long flags;
4178 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4179 RING_LOCALS; 4186 RING_LOCALS;
4180 4187
4181 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4187,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4187 work->event = event; 4194 work->event = event;
4188 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4189 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4190 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4191 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4192 4199
4193 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4194 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4195 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4196 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4197 kfree(work); 4205 kfree(work);
4198 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4206,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4206 4214
4207 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4208 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4209 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4210 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4211 return ret; 4222 return ret;
4212 } 4223 }
4213 4224
4214 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4215 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4216 4228
4217 crtc->fb = fb; 4229 crtc->fb = fb;
4218 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4219 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4220 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4221 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4222 4235
4223 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4224 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4226,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4226 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4227 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4228 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4229 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4230 } else { 4244 } else {
4231 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4232 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
@@ -4400,29 +4414,43 @@ static void intel_setup_outputs(struct drm_device *dev)
4400 bool found = false; 4414 bool found = false;
4401 4415
4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4416 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4417 DRM_DEBUG_KMS("probing SDVOB\n");
4403 found = intel_sdvo_init(dev, SDVOB); 4418 found = intel_sdvo_init(dev, SDVOB);
4404 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4419 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4420 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4405 intel_hdmi_init(dev, SDVOB); 4421 intel_hdmi_init(dev, SDVOB);
4422 }
4406 4423
4407 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4424 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4425 DRM_DEBUG_KMS("probing DP_B\n");
4408 intel_dp_init(dev, DP_B); 4426 intel_dp_init(dev, DP_B);
4427 }
4409 } 4428 }
4410 4429
4411 /* Before G4X SDVOC doesn't have its own detect register */ 4430 /* Before G4X SDVOC doesn't have its own detect register */
4412 4431
4413 if (I915_READ(SDVOB) & SDVO_DETECTED) 4432 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4433 DRM_DEBUG_KMS("probing SDVOC\n");
4414 found = intel_sdvo_init(dev, SDVOC); 4434 found = intel_sdvo_init(dev, SDVOC);
4435 }
4415 4436
4416 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4437 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4417 4438
4418 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4439 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4440 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4419 intel_hdmi_init(dev, SDVOC); 4441 intel_hdmi_init(dev, SDVOC);
4420 if (SUPPORTS_INTEGRATED_DP(dev)) 4442 }
4443 if (SUPPORTS_INTEGRATED_DP(dev)) {
4444 DRM_DEBUG_KMS("probing DP_C\n");
4421 intel_dp_init(dev, DP_C); 4445 intel_dp_init(dev, DP_C);
4446 }
4422 } 4447 }
4423 4448
4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4449 if (SUPPORTS_INTEGRATED_DP(dev) &&
4450 (I915_READ(DP_D) & DP_DETECTED)) {
4451 DRM_DEBUG_KMS("probing DP_D\n");
4425 intel_dp_init(dev, DP_D); 4452 intel_dp_init(dev, DP_D);
4453 }
4426 } else if (IS_I8XX(dev)) 4454 } else if (IS_I8XX(dev))
4427 intel_dvo_init(dev); 4455 intel_dvo_init(dev);
4428 4456
@@ -4527,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4527 .fb_changed = intelfb_probe, 4555 .fb_changed = intelfb_probe,
4528}; 4556};
4529 4557
4558static struct drm_gem_object *
4559intel_alloc_power_context(struct drm_device *dev)
4560{
4561 struct drm_gem_object *pwrctx;
4562 int ret;
4563
4564 pwrctx = drm_gem_object_alloc(dev, 4096);
4565 if (!pwrctx) {
4566 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4567 return NULL;
4568 }
4569
4570 mutex_lock(&dev->struct_mutex);
4571 ret = i915_gem_object_pin(pwrctx, 4096);
4572 if (ret) {
4573 DRM_ERROR("failed to pin power context: %d\n", ret);
4574 goto err_unref;
4575 }
4576
4577 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4578 if (ret) {
4579 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4580 goto err_unpin;
4581 }
4582 mutex_unlock(&dev->struct_mutex);
4583
4584 return pwrctx;
4585
4586err_unpin:
4587 i915_gem_object_unpin(pwrctx);
4588err_unref:
4589 drm_gem_object_unreference(pwrctx);
4590 mutex_unlock(&dev->struct_mutex);
4591 return NULL;
4592}
4593
4530void intel_init_clock_gating(struct drm_device *dev) 4594void intel_init_clock_gating(struct drm_device *dev)
4531{ 4595{
4532 struct drm_i915_private *dev_priv = dev->dev_private; 4596 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4643,27 @@ void intel_init_clock_gating(struct drm_device *dev)
4579 * GPU can automatically power down the render unit if given a page 4643 * GPU can automatically power down the render unit if given a page
4580 * to save state. 4644 * to save state.
4581 */ 4645 */
4582 if (I915_HAS_RC6(dev)) { 4646 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4583 struct drm_gem_object *pwrctx; 4647 struct drm_i915_gem_object *obj_priv = NULL;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586 4648
4587 if (dev_priv->pwrctx) { 4649 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private; 4650 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else { 4651 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096); 4652 struct drm_gem_object *pwrctx;
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596 4653
4597 ret = i915_gem_object_pin(pwrctx, 4096); 4654 pwrctx = intel_alloc_power_context(dev);
4598 if (ret) { 4655 if (pwrctx) {
4599 DRM_ERROR("failed to pin power context: %d\n", 4656 dev_priv->pwrctx = pwrctx;
4600 ret); 4657 obj_priv = pwrctx->driver_private;
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 } 4658 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 } 4659 }
4610 4660
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4661 if (obj_priv) {
4612 I915_WRITE(MCHBAR_RENDER_STANDBY, 4662 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4663 I915_WRITE(MCHBAR_RENDER_STANDBY,
4664 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4665 }
4614 } 4666 }
4615
4616out:
4617 return;
4618} 4667}
4619 4668
4620/* Set up chip specific display functions */ 4669/* Set up chip specific display functions */
@@ -4770,7 +4819,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
4770 del_timer_sync(&intel_crtc->idle_timer); 4819 del_timer_sync(&intel_crtc->idle_timer);
4771 } 4820 }
4772 4821
4773 intel_increase_renderclock(dev, false);
4774 del_timer_sync(&dev_priv->idle_timer); 4822 del_timer_sync(&dev_priv->idle_timer);
4775 4823
4776 if (dev_priv->display.disable_fbc) 4824 if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b7b938..439506cefc14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
125 125
126/* I think this is a fiction */ 126/* I think this is a fiction */
127static int 127static int
128intel_dp_link_required(int pixel_clock) 128intel_dp_link_required(struct drm_device *dev,
129 struct intel_output *intel_output, int pixel_clock)
129{ 130{
130 return pixel_clock * 3; 131 struct drm_i915_private *dev_priv = dev->dev_private;
132
133 if (IS_eDP(intel_output))
134 return (pixel_clock * dev_priv->edp_bpp) / 8;
135 else
136 return pixel_clock * 3;
131} 137}
132 138
133static int 139static int
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
138 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
139 int max_lanes = intel_dp_max_lane_count(intel_output); 145 int max_lanes = intel_dp_max_lane_count(intel_output);
140 146
141 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) 147 if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
148 > max_link_clock * max_lanes)
142 return MODE_CLOCK_HIGH; 149 return MODE_CLOCK_HIGH;
143 150
144 if (mode->clock < 10000) 151 if (mode->clock < 10000)
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
492 for (clock = 0; clock <= max_clock; clock++) { 499 for (clock = 0; clock <= max_clock; clock++) {
493 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 500 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
494 501
495 if (intel_dp_link_required(mode->clock) <= link_avail) { 502 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
503 <= link_avail) {
496 dp_priv->link_bw = bws[clock]; 504 dp_priv->link_bw = bws[clock];
497 dp_priv->lane_count = lane_count; 505 dp_priv->lane_count = lane_count;
498 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 506 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output)
1289 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1297 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1290 intel_dp_check_link_status(intel_output); 1298 intel_dp_check_link_status(intel_output);
1291} 1299}
1292/* 1300
1293 * Enumerate the child dev array parsed from VBT to check whether
1294 * the given DP is present.
1295 * If it is present, return 1.
1296 * If it is not present, return false.
1297 * If no child dev is parsed from VBT, it is assumed that the given
1298 * DP is present.
1299 */
1300static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 struct child_device_config *p_child;
1304 int i, dp_port, ret;
1305
1306 if (!dev_priv->child_dev_num)
1307 return 1;
1308
1309 dp_port = 0;
1310 if (dp_reg == DP_B || dp_reg == PCH_DP_B)
1311 dp_port = PORT_IDPB;
1312 else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
1313 dp_port = PORT_IDPC;
1314 else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
1315 dp_port = PORT_IDPD;
1316
1317 ret = 0;
1318 for (i = 0; i < dev_priv->child_dev_num; i++) {
1319 p_child = dev_priv->child_dev + i;
1320 /*
1321 * If the device type is not DP, continue.
1322 */
1323 if (p_child->device_type != DEVICE_TYPE_DP &&
1324 p_child->device_type != DEVICE_TYPE_eDP)
1325 continue;
1326 /* Find the eDP port */
1327 if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
1328 ret = 1;
1329 break;
1330 }
1331 /* Find the DP port */
1332 if (p_child->dvo_port == dp_port) {
1333 ret = 1;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1339void 1301void
1340intel_dp_init(struct drm_device *dev, int output_reg) 1302intel_dp_init(struct drm_device *dev, int output_reg)
1341{ 1303{
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1345 struct intel_dp_priv *dp_priv; 1307 struct intel_dp_priv *dp_priv;
1346 const char *name = NULL; 1308 const char *name = NULL;
1347 1309
1348 if (!dp_is_present_in_vbt(dev, output_reg)) {
1349 DRM_DEBUG_KMS("DP is not present. Ignore it\n");
1350 return;
1351 }
1352 intel_output = kcalloc(sizeof(struct intel_output) + 1310 intel_output = kcalloc(sizeof(struct intel_output) +
1353 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1311 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1354 if (!intel_output) 1312 if (!intel_output)
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1373 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1331 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1374 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1332 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1375 1333
1376 if (IS_eDP(intel_output)) { 1334 if (IS_eDP(intel_output))
1377 intel_output->crtc_mask = (1 << 1);
1378 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1335 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1379 } else 1336
1380 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1337 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1381 connector->interlace_allowed = true; 1338 connector->interlace_allowed = true;
1382 connector->doublescan_allowed = 0; 1339 connector->doublescan_allowed = 0;
1383 1340
@@ -1402,14 +1359,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1402 break; 1359 break;
1403 case DP_B: 1360 case DP_B:
1404 case PCH_DP_B: 1361 case PCH_DP_B:
1362 dev_priv->hotplug_supported_mask |=
1363 HDMIB_HOTPLUG_INT_STATUS;
1405 name = "DPDDC-B"; 1364 name = "DPDDC-B";
1406 break; 1365 break;
1407 case DP_C: 1366 case DP_C:
1408 case PCH_DP_C: 1367 case PCH_DP_C:
1368 dev_priv->hotplug_supported_mask |=
1369 HDMIC_HOTPLUG_INT_STATUS;
1409 name = "DPDDC-C"; 1370 name = "DPDDC-C";
1410 break; 1371 break;
1411 case DP_D: 1372 case DP_D:
1412 case PCH_DP_D: 1373 case PCH_DP_D:
1374 dev_priv->hotplug_supported_mask |=
1375 HDMID_HOTPLUG_INT_STATUS;
1413 name = "DPDDC-D"; 1376 name = "DPDDC-D";
1414 break; 1377 break;
1415 } 1378 }
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe7d400..0e268deed761 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 225 .destroy = intel_hdmi_enc_destroy,
226}; 226};
227 227
228/*
229 * Enumerate the child dev array parsed from VBT to check whether
230 * the given HDMI is present.
231 * If it is present, return 1.
232 * If it is not present, return false.
233 * If no child dev is parsed from VBT, it assumes that the given
234 * HDMI is present.
235 */
236static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct child_device_config *p_child;
240 int i, hdmi_port, ret;
241
242 if (!dev_priv->child_dev_num)
243 return 1;
244
245 if (hdmi_reg == SDVOB)
246 hdmi_port = DVO_B;
247 else if (hdmi_reg == SDVOC)
248 hdmi_port = DVO_C;
249 else if (hdmi_reg == HDMIB)
250 hdmi_port = DVO_B;
251 else if (hdmi_reg == HDMIC)
252 hdmi_port = DVO_C;
253 else if (hdmi_reg == HDMID)
254 hdmi_port = DVO_D;
255 else
256 return 0;
257
258 ret = 0;
259 for (i = 0; i < dev_priv->child_dev_num; i++) {
260 p_child = dev_priv->child_dev + i;
261 /*
262 * If the device type is not HDMI, continue.
263 */
264 if (p_child->device_type != DEVICE_TYPE_HDMI)
265 continue;
266 /* Find the HDMI port */
267 if (p_child->dvo_port == hdmi_port) {
268 ret = 1;
269 break;
270 }
271 }
272 return ret;
273}
274void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 228void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
275{ 229{
276 struct drm_i915_private *dev_priv = dev->dev_private; 230 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
278 struct intel_output *intel_output; 232 struct intel_output *intel_output;
279 struct intel_hdmi_priv *hdmi_priv; 233 struct intel_hdmi_priv *hdmi_priv;
280 234
281 if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
282 DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
283 return;
284 }
285 intel_output = kcalloc(sizeof(struct intel_output) + 235 intel_output = kcalloc(sizeof(struct intel_output) +
286 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 236 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
287 if (!intel_output) 237 if (!intel_output)
@@ -303,21 +253,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
303 if (sdvox_reg == SDVOB) { 253 if (sdvox_reg == SDVOB) {
304 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 254 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
305 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 255 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
256 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
306 } else if (sdvox_reg == SDVOC) { 257 } else if (sdvox_reg == SDVOC) {
307 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 258 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
308 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 259 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
260 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
309 } else if (sdvox_reg == HDMIB) { 261 } else if (sdvox_reg == HDMIB) {
310 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 262 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
311 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 263 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
312 "HDMIB"); 264 "HDMIB");
265 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
313 } else if (sdvox_reg == HDMIC) { 266 } else if (sdvox_reg == HDMIC) {
314 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 267 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
315 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 268 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
316 "HDMIC"); 269 "HDMIC");
270 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
317 } else if (sdvox_reg == HDMID) { 271 } else if (sdvox_reg == HDMID) {
318 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 272 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
319 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 273 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
320 "HDMID"); 274 "HDMID");
275 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
321 } 276 }
322 if (!intel_output->ddc_bus) 277 if (!intel_output->ddc_bus)
323 goto err_connector; 278 goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce274e67..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -602,12 +602,47 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
602/* Some lid devices report incorrect lid status, assume they're connected */ 602/* Some lid devices report incorrect lid status, assume they're connected */
603static const struct dmi_system_id bad_lid_status[] = { 603static const struct dmi_system_id bad_lid_status[] = {
604 { 604 {
605 .ident = "Compaq nx9020",
606 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
608 DMI_MATCH(DMI_BOARD_NAME, "3084"),
609 },
610 },
611 {
612 .ident = "Samsung SX20S",
613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 },
617 },
618 {
605 .ident = "Aspire One", 619 .ident = "Aspire One",
606 .matches = { 620 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 621 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 622 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
609 }, 623 },
610 }, 624 },
625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
633 .ident = "PC-81005",
634 .matches = {
635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
637 },
638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
611 { } 646 { }
612}; 647};
613 648
@@ -622,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
622{ 657{
623 enum drm_connector_status status = connector_status_connected; 658 enum drm_connector_status status = connector_status_connected;
624 659
625 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
626 status = connector_status_disconnected; 661 status = connector_status_disconnected;
627 662
628 return status; 663 return status;
@@ -679,7 +714,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
679 struct drm_i915_private *dev_priv = 714 struct drm_i915_private *dev_priv =
680 container_of(nb, struct drm_i915_private, lid_notifier); 715 container_of(nb, struct drm_i915_private, lid_notifier);
681 struct drm_device *dev = dev_priv->dev; 716 struct drm_device *dev = dev_priv->dev;
717 struct drm_connector *connector = dev_priv->int_lvds_connector;
682 718
719 /*
720 * check and update the status of LVDS connector after receiving
721 * the LID nofication event.
722 */
723 if (connector)
724 connector->status = connector->funcs->detect(connector);
683 if (!acpi_lid_open()) { 725 if (!acpi_lid_open()) {
684 dev_priv->modeset_on_lid = 1; 726 dev_priv->modeset_on_lid = 1;
685 return NOTIFY_OK; 727 return NOTIFY_OK;
@@ -854,65 +896,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
854 { } /* terminating entry */ 896 { } /* terminating entry */
855}; 897};
856 898
857#ifdef CONFIG_ACPI
858/*
859 * check_lid_device -- check whether @handle is an ACPI LID device.
860 * @handle: ACPI device handle
861 * @level : depth in the ACPI namespace tree
862 * @context: the number of LID device when we find the device
863 * @rv: a return value to fill if desired (Not use)
864 */
865static acpi_status
866check_lid_device(acpi_handle handle, u32 level, void *context,
867 void **return_value)
868{
869 struct acpi_device *acpi_dev;
870 int *lid_present = context;
871
872 acpi_dev = NULL;
873 /* Get the acpi device for device handle */
874 if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
875 /* If there is no ACPI device for handle, return */
876 return AE_OK;
877 }
878
879 if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
880 *lid_present = 1;
881
882 return AE_OK;
883}
884
885/**
886 * check whether there exists the ACPI LID device by enumerating the ACPI
887 * device tree.
888 */
889static int intel_lid_present(void)
890{
891 int lid_present = 0;
892
893 if (acpi_disabled) {
894 /* If ACPI is disabled, there is no ACPI device tree to
895 * check, so assume the LID device would have been present.
896 */
897 return 1;
898 }
899
900 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
901 ACPI_UINT32_MAX,
902 check_lid_device, NULL, &lid_present, NULL);
903
904 return lid_present;
905}
906#else
907static int intel_lid_present(void)
908{
909 /* In the absence of ACPI built in, assume that the LID device would
910 * have been present.
911 */
912 return 1;
913}
914#endif
915
916/** 899/**
917 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID 900 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
918 * @dev: drm device 901 * @dev: drm device
@@ -957,7 +940,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
957 } 940 }
958 } 941 }
959 mutex_unlock(&dev->mode_config.mutex); 942 mutex_unlock(&dev->mode_config.mutex);
960 if (temp_downclock < panel_fixed_mode->clock) { 943 if (temp_downclock < panel_fixed_mode->clock &&
944 i915_lvds_downclock) {
961 /* We found the downclock for LVDS. */ 945 /* We found the downclock for LVDS. */
962 dev_priv->lvds_downclock_avail = 1; 946 dev_priv->lvds_downclock_avail = 1;
963 dev_priv->lvds_downclock = temp_downclock; 947 dev_priv->lvds_downclock = temp_downclock;
@@ -1031,12 +1015,8 @@ void intel_lvds_init(struct drm_device *dev)
1031 if (dmi_check_system(intel_no_lvds)) 1015 if (dmi_check_system(intel_no_lvds))
1032 return; 1016 return;
1033 1017
1034 /* 1018 if (!lvds_is_present_in_vbt(dev)) {
1035 * Assume LVDS is present if there's an ACPI lid device or if the 1019 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
1036 * device is present in the VBT.
1037 */
1038 if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
1039 DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
1040 return; 1020 return;
1041 } 1021 }
1042 1022
@@ -1180,6 +1160,8 @@ out:
1180 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1160 DRM_DEBUG_KMS("lid notifier registration failed\n");
1181 dev_priv->lid_notifier.notifier_call = NULL; 1161 dev_priv->lid_notifier.notifier_call = NULL;
1182 } 1162 }
1163 /* keep the LVDS connector */
1164 dev_priv->int_lvds_connector = connector;
1183 drm_sysfs_connector_add(connector); 1165 drm_sysfs_connector_add(connector);
1184 return; 1166 return;
1185 1167
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc99716c..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
462} 462}
463 463
464/** 464/**
465 * Don't check status code from this as it switches the bus back to the 465 * Try to read the response after issuie the DDC switch command. But it
466 * SDVO chips which defeats the purpose of doing a bus switch in the first 466 * is noted that we must do the action of reading response and issuing DDC
467 * place. 467 * switch command in one I2C transaction. Otherwise when we try to start
468 * another I2C transaction after issuing the DDC bus switch, it will be
469 * switched to the internal SDVO register.
468 */ 470 */
469static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 471static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
470 u8 target) 472 u8 target)
471{ 473{
472 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); 474 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
475 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
476 struct i2c_msg msgs[] = {
477 {
478 .addr = sdvo_priv->slave_addr >> 1,
479 .flags = 0,
480 .len = 2,
481 .buf = out_buf,
482 },
483 /* the following two are to read the response */
484 {
485 .addr = sdvo_priv->slave_addr >> 1,
486 .flags = 0,
487 .len = 1,
488 .buf = cmd_buf,
489 },
490 {
491 .addr = sdvo_priv->slave_addr >> 1,
492 .flags = I2C_M_RD,
493 .len = 1,
494 .buf = ret_value,
495 },
496 };
497
498 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
499 &target, 1);
500 /* write the DDC switch command argument */
501 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
502
503 out_buf[0] = SDVO_I2C_OPCODE;
504 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
505 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
506 cmd_buf[1] = 0;
507 ret_value[0] = 0;
508 ret_value[1] = 0;
509
510 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
511 if (ret != 3) {
512 /* failure in I2C transfer */
513 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
514 return;
515 }
516 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
517 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
518 ret_value[0]);
519 return;
520 }
521 return;
473} 522}
474 523
475static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 524static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1579 edid = drm_get_edid(&intel_output->base, 1628 edid = drm_get_edid(&intel_output->base,
1580 intel_output->ddc_bus); 1629 intel_output->ddc_bus);
1581 1630
1631 /* This is only applied to SDVO cards with multiple outputs */
1632 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
1633 uint8_t saved_ddc, temp_ddc;
1634 saved_ddc = sdvo_priv->ddc_bus;
1635 temp_ddc = sdvo_priv->ddc_bus >> 1;
1636 /*
1637 * Don't use the 1 as the argument of DDC bus switch to get
1638 * the EDID. It is used for SDVO SPD ROM.
1639 */
1640 while(temp_ddc > 1) {
1641 sdvo_priv->ddc_bus = temp_ddc;
1642 edid = drm_get_edid(&intel_output->base,
1643 intel_output->ddc_bus);
1644 if (edid) {
1645 /*
1646 * When we can get the EDID, maybe it is the
1647 * correct DDC bus. Update it.
1648 */
1649 sdvo_priv->ddc_bus = temp_ddc;
1650 break;
1651 }
1652 temp_ddc >>= 1;
1653 }
1654 if (edid == NULL)
1655 sdvo_priv->ddc_bus = saved_ddc;
1656 }
1582 /* when there is no edid and no monitor is connected with VGA 1657 /* when there is no edid and no monitor is connected with VGA
1583 * port, try to use the CRT ddc to read the EDID for DVI-connector 1658 * port, try to use the CRT ddc to read the EDID for DVI-connector
1584 */ 1659 */
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2270 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2271 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2272 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2273 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2274 2357
2275 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2662,6 +2745,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2662 2745
2663bool intel_sdvo_init(struct drm_device *dev, int output_device) 2746bool intel_sdvo_init(struct drm_device *dev, int output_device)
2664{ 2747{
2748 struct drm_i915_private *dev_priv = dev->dev_private;
2665 struct drm_connector *connector; 2749 struct drm_connector *connector;
2666 struct intel_output *intel_output; 2750 struct intel_output *intel_output;
2667 struct intel_sdvo_priv *sdvo_priv; 2751 struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2792,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2708 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2792 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2709 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2793 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2710 "SDVOB/VGA DDC BUS"); 2794 "SDVOB/VGA DDC BUS");
2795 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2711 } else { 2796 } else {
2712 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2797 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2713 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2798 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2714 "SDVOC/VGA DDC BUS"); 2799 "SDVOC/VGA DDC BUS");
2800 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2715 } 2801 }
2716 2802
2717 if (intel_output->ddc_bus == NULL) 2803 if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index b1bc1ea182b8..1175429da102 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG
30 via debugfs. 30 via debugfs.
31 31
32menu "I2C encoder or helper chips" 32menu "I2C encoder or helper chips"
33 depends on DRM && I2C 33 depends on DRM && DRM_KMS_HELPER && I2C
34 34
35config DRM_I2C_CH7006 35config DRM_I2C_CH7006
36 tristate "Chrontel ch7006 TV encoder" 36 tristate "Chrontel ch7006 TV encoder"
37 depends on DRM_NOUVEAU 37 default m if DRM_NOUVEAU
38 default m
39 help 38 help
40 Support for Chrontel ch7006 and similar TV encoders, found 39 Support for Chrontel ch7006 and similar TV encoders, found
41 on some nVidia video cards. 40 on some nVidia video cards.
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index ba143972769f..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
310 struct drm_device *dev = bios->dev; 310 struct drm_device *dev = bios->dev;
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { 313 if (reg & 0x2 ||
314 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", 314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
315 reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 return 0; 316
317 } 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 /*
319 * Warn on C51 regs that have not been verified accessible in
320 * mmiotracing
321 */
322 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
323 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
324 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
325 reg); 321 reg);
326 322
327 /* Trust the init scripts on G80 */ 323 if (reg >= (8*1024*1024)) {
328 if (dev_priv->card_type >= NV_50) 324 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
329 return 1; 325 return 0;
330
331 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
332 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
333 return 1;
334 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
335 return 1;
336 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
337 return 1;
338 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
339 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
340 return 1;
341 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
342 WITHIN(reg, 0xc000, 0x48))
343 return 1;
344 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
347 if (reg == 0x00011014 || reg == 0x00020328)
348 return 1;
349 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
350 return 1;
351 } 326 }
352 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
353 return 1;
354 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
355 return 1;
356 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
357 return 1;
358 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
359 return 1;
360 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
361 return 1;
362 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
363 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
364 return 1;
365 #undef WITHIN
366
367 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
368 327
369 return 0; 328 return 1;
370} 329}
371 330
372static bool 331static bool
@@ -1906,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1906 1865
1907 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1908 1867
1909 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1910 return 1; 1869 return 1;
1911 1870
1912 /* 1871 /*
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3196 } 3155 }
3197#ifdef __powerpc__ 3156#ifdef __powerpc__
3198 /* Powerbook specific quirks */ 3157 /* Powerbook specific quirks */
3199 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) 3158 if ((dev->pci_device & 0xffff) == 0x0179 ||
3200 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3159 (dev->pci_device & 0xffff) == 0x0189 ||
3201 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { 3160 (dev->pci_device & 0xffff) == 0x0329) {
3202 if (script == LVDS_PANEL_ON) { 3161 if (script == LVDS_RESET) {
3203 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); 3162 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3204 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3163
3205 } 3164 } else if (script == LVDS_PANEL_ON) {
3206 if (script == LVDS_PANEL_OFF) { 3165 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3207 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); 3166 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3208 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3167 | (1 << 31));
3168 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3169 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3170
3171 } else if (script == LVDS_PANEL_OFF) {
3172 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3173 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3174 & ~(1 << 31));
3175 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3176 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3209 } 3177 }
3210 } 3178 }
3211#endif 3179#endif
@@ -3797,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3797 */ 3765 */
3798 3766
3799 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3800 struct init_exec iexec = {true, false};
3801 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3802 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3803 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3877,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3877 } 3844 }
3878 } 3845 }
3879 3846
3880 bios->display.output = dcbent;
3881
3882 if (pxclk == 0) { 3847 if (pxclk == 0) {
3883 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3884 if (!script) { 3849 if (!script) {
@@ -3887,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3887 } 3852 }
3888 3853
3889 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3890 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3891 } else 3856 } else
3892 if (pxclk == -1) { 3857 if (pxclk == -1) {
3893 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3897,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3897 } 3862 }
3898 3863
3899 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3900 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3901 } else 3866 } else
3902 if (pxclk == -2) { 3867 if (pxclk == -2) {
3903 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3910,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3910 } 3875 }
3911 3876
3912 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3913 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3914 } else 3879 } else
3915 if (pxclk > 0) { 3880 if (pxclk > 0) {
3916 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3922,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3922 } 3887 }
3923 3888
3924 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3925 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3926 } else 3891 } else
3927 if (pxclk < 0) { 3892 if (pxclk < 0) {
3928 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3934,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3934 } 3899 }
3935 3900
3936 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3937 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3938 } 3903 }
3939 3904
3940 return 0; 3905 return 0;
@@ -5434,52 +5399,49 @@ static bool
5434parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5399parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5435 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5400 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5436{ 5401{
5437 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && 5402 switch (conn & 0x0000000f) {
5438 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && 5403 case 0:
5439 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && 5404 entry->type = OUTPUT_ANALOG;
5440 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && 5405 break;
5441 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && 5406 case 1:
5442 conn != 0xf2205004 && conn != 0xf2209004) { 5407 entry->type = OUTPUT_TV;
5443 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); 5408 break;
5444 5409 case 2:
5445 /* cause output setting to fail for !TV, so message is seen */ 5410 case 3:
5446 if ((conn & 0xf) != 0x1)
5447 dcb->entries = 0;
5448
5449 return false;
5450 }
5451 /* most of the below is a "best guess" atm */
5452 entry->type = conn & 0xf;
5453 if (entry->type == 2)
5454 /* another way of specifying straps based lvds... */
5455 entry->type = OUTPUT_LVDS; 5411 entry->type = OUTPUT_LVDS;
5456 if (entry->type == 4) { /* digital */ 5412 break;
5457 if (conn & 0x10) 5413 case 4:
5458 entry->type = OUTPUT_LVDS; 5414 switch ((conn & 0x000000f0) >> 4) {
5459 else 5415 case 0:
5460 entry->type = OUTPUT_TMDS; 5416 entry->type = OUTPUT_TMDS;
5417 break;
5418 case 1:
5419 entry->type = OUTPUT_LVDS;
5420 break;
5421 default:
5422 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
5423 (conn & 0x000000f0) >> 4);
5424 return false;
5425 }
5426 break;
5427 default:
5428 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
5429 return false;
5461 } 5430 }
5462 /* what's in bits 5-13? could be some encoder maker thing, in tv case */ 5431
5463 entry->i2c_index = (conn >> 14) & 0xf; 5432 entry->i2c_index = (conn & 0x0003c000) >> 14;
5464 /* raw heads field is in range 0-1, so move to 1-2 */ 5433 entry->heads = ((conn & 0x001c0000) >> 18) + 1;
5465 entry->heads = ((conn >> 18) & 0x7) + 1; 5434 entry->or = entry->heads; /* same as heads, hopefully safe enough */
5466 entry->location = (conn >> 21) & 0xf; 5435 entry->location = (conn & 0x01e00000) >> 21;
5467 /* unused: entry->bus = (conn >> 25) & 0x7; */ 5436 entry->bus = (conn & 0x0e000000) >> 25;
5468 /* set or to be same as heads -- hopefully safe enough */
5469 entry->or = entry->heads;
5470 entry->duallink_possible = false; 5437 entry->duallink_possible = false;
5471 5438
5472 switch (entry->type) { 5439 switch (entry->type) {
5473 case OUTPUT_ANALOG: 5440 case OUTPUT_ANALOG:
5474 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5441 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5475 break; 5442 break;
5476 case OUTPUT_LVDS: 5443 case OUTPUT_TV:
5477 /* 5444 entry->tvconf.has_component_output = false;
5478 * This is probably buried in conn's unknown bits.
5479 * This will upset EDID-ful models, if they exist
5480 */
5481 entry->lvdsconf.use_straps_for_mode = true;
5482 entry->lvdsconf.use_power_scripts = true;
5483 break; 5445 break;
5484 case OUTPUT_TMDS: 5446 case OUTPUT_TMDS:
5485 /* 5447 /*
@@ -5488,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5488 */ 5450 */
5489 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5451 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5490 break; 5452 break;
5491 case OUTPUT_TV: 5453 case OUTPUT_LVDS:
5492 entry->tvconf.has_component_output = false; 5454 if ((conn & 0x00003f00) != 0x10)
5455 entry->lvdsconf.use_straps_for_mode = true;
5456 entry->lvdsconf.use_power_scripts = true;
5457 break;
5458 default:
5493 break; 5459 break;
5494 } 5460 }
5495 5461
@@ -5564,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5564 dcb->entries = newentries; 5530 dcb->entries = newentries;
5565} 5531}
5566 5532
5567static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5533static int
5534parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5568{ 5535{
5536 struct drm_nouveau_private *dev_priv = dev->dev_private;
5569 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5537 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5570 struct parsed_dcb *dcb; 5538 struct parsed_dcb *dcb;
5571 uint16_t dcbptr, i2ctabptr = 0; 5539 uint16_t dcbptr = 0, i2ctabptr = 0;
5572 uint8_t *dcbtable; 5540 uint8_t *dcbtable;
5573 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5541 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5574 bool configblock = true; 5542 bool configblock = true;
@@ -5579,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
5579 dcb->entries = 0; 5547 dcb->entries = 0;
5580 5548
5581 /* get the offset from 0x36 */ 5549 /* get the offset from 0x36 */
5582 dcbptr = ROM16(bios->data[0x36]); 5550 if (dev_priv->card_type > NV_04) {
5551 dcbptr = ROM16(bios->data[0x36]);
5552 if (dcbptr == 0x0000)
5553 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
5554 }
5583 5555
5556 /* this situation likely means a really old card, pre DCB */
5584 if (dcbptr == 0x0) { 5557 if (dcbptr == 0x0) {
5585 NV_WARN(dev, "No output data (DCB) found in BIOS, " 5558 NV_INFO(dev, "Assuming a CRT output exists\n");
5586 "assuming a CRT output exists\n");
5587 /* this situation likely means a really old card, pre DCB */
5588 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5559 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5589 5560
5590 if (nv04_tv_identify(dev, 5561 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
5591 bios->legacy.i2c_indices.tv) >= 0)
5592 fabricate_tv_output(dcb, twoHeads); 5562 fabricate_tv_output(dcb, twoHeads);
5593 5563
5594 return 0; 5564 return 0;
@@ -5892,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5892 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5893 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5894 5864
5865 mutex_lock(&bios->lock);
5895 bios->display.output = dcbent; 5866 bios->display.output = dcbent;
5896 parse_init_table(bios, table, &iexec); 5867 parse_init_table(bios, table, &iexec);
5897 bios->display.output = NULL; 5868 bios->display.output = NULL;
5869 mutex_unlock(&bios->lock);
5898} 5870}
5899 5871
5900static bool NVInitVBIOS(struct drm_device *dev) 5872static bool NVInitVBIOS(struct drm_device *dev)
@@ -5903,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5903 struct nvbios *bios = &dev_priv->VBIOS; 5875 struct nvbios *bios = &dev_priv->VBIOS;
5904 5876
5905 memset(bios, 0, sizeof(struct nvbios)); 5877 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock);
5906 bios->dev = dev; 5879 bios->dev = dev;
5907 5880
5908 if (!NVShadowVBIOS(dev, bios->data)) 5881 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 struct mutex lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0cad6d834eb2..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -33,10 +33,13 @@
33#include "nouveau_drv.h" 33#include "nouveau_drv.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35 35
36#include <linux/log2.h>
37
36static void 38static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 39nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{ 40{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 41 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
42 struct drm_device *dev = dev_priv->dev;
40 struct nouveau_bo *nvbo = nouveau_bo(bo); 43 struct nouveau_bo *nvbo = nouveau_bo(bo);
41 44
42 ttm_bo_kunmap(&nvbo->kmap); 45 ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 if (unlikely(nvbo->gem)) 47 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo); 48 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46 49
50 if (nvbo->tile)
51 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
52
47 spin_lock(&dev_priv->ttm.bo_list_lock); 53 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head); 54 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock); 55 spin_unlock(&dev_priv->ttm.bo_list_lock);
50 kfree(nvbo); 56 kfree(nvbo);
51} 57}
52 58
59static void
60nouveau_bo_fixup_align(struct drm_device *dev,
61 uint32_t tile_mode, uint32_t tile_flags,
62 int *align, int *size)
63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private;
65
66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Align the size to the
69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
72 */
73 if (dev_priv->card_type == NV_50) {
74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
75 int i;
76
77 switch (tile_flags) {
78 case 0x1800:
79 case 0x2800:
80 case 0x4800:
81 case 0x7a00:
82 if (is_power_of_2(block_size)) {
83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size;
85 if (!(*align % 65536))
86 break;
87 }
88 } else {
89 for (i = 1; i < 10; i++) {
90 *align = 8 * i * block_size;
91 if (!(*align % 65536))
92 break;
93 }
94 }
95 *size = roundup(*size, *align);
96 break;
97 default:
98 break;
99 }
100
101 } else {
102 if (tile_mode) {
103 if (dev_priv->chipset >= 0x40) {
104 *align = 65536;
105 *size = roundup(*size, 64 * tile_mode);
106
107 } else if (dev_priv->chipset >= 0x30) {
108 *align = 32768;
109 *size = roundup(*size, 64 * tile_mode);
110
111 } else if (dev_priv->chipset >= 0x20) {
112 *align = 16384;
113 *size = roundup(*size, 64 * tile_mode);
114
115 } else if (dev_priv->chipset >= 0x10) {
116 *align = 16384;
117 *size = roundup(*size, 32 * tile_mode);
118 }
119 }
120 }
121
122 /* ALIGN works only on powers of two. */
123 *size = roundup(*size, PAGE_SIZE);
124
125 if (dev_priv->card_type == NV_50) {
126 *size = roundup(*size, 65536);
127 *align = max(65536, *align);
128 }
129}
130
53int 131int
54nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 132nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode, 133 int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
58{ 136{
59 struct drm_nouveau_private *dev_priv = dev->dev_private; 137 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_bo *nvbo; 138 struct nouveau_bo *nvbo;
61 int ret, n = 0; 139 int ret = 0;
62 140
63 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 141 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
64 if (!nvbo) 142 if (!nvbo)
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
70 nvbo->tile_mode = tile_mode; 148 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags; 149 nvbo->tile_flags = tile_flags;
72 150
73 /* 151 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
77 */
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
86 size += 6 * 28672;
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
91 size += 3 * 16384;
92 align = 12 * 16834;
93 } else {
94 size += 3 * 8192;
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
98 }
99 break;
100 default:
101 break;
102 }
103
104 align >>= PAGE_SHIFT; 152 align >>= PAGE_SHIFT;
105 153
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
111 }
112
113 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT)
116 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117 nvbo->placement.fpfn = 0; 154 nvbo->placement.fpfn = 0;
118 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 155 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119 nvbo->placement.placement = nvbo->placements; 156 nouveau_bo_placement_set(nvbo, flags);
120 nvbo->placement.busy_placement = nvbo->placements;
121 nvbo->placement.num_placement = n;
122 nvbo->placement.num_busy_placement = n;
123 157
124 nvbo->channel = chan; 158 nvbo->channel = chan;
125 nouveau_bo_placement_set(nvbo, flags);
126 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 159 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127 ttm_bo_type_device, &nvbo->placement, align, 0, 160 ttm_bo_type_device, &nvbo->placement, align, 0,
128 false, NULL, size, nouveau_bo_del_ttm); 161 false, NULL, size, nouveau_bo_del_ttm);
@@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
421/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access 454/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
422 * TTM_PL_{VRAM,TT} directly. 455 * TTM_PL_{VRAM,TT} directly.
423 */ 456 */
457
424static int 458static int
425nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 459nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
426 struct nouveau_bo *nvbo, bool evict, bool no_wait, 460 struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -435,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
435 469
436 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
437 evict, no_wait, new_mem); 471 evict, no_wait, new_mem);
472 if (nvbo->channel && nvbo->channel != chan)
473 ret = nouveau_fence_wait(fence, NULL, false, false);
438 nouveau_fence_unref((void *)&fence); 474 nouveau_fence_unref((void *)&fence);
439 return ret; 475 return ret;
440} 476}
@@ -455,11 +491,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
455} 491}
456 492
457static int 493static int
458nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, 494nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
459 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 495 int no_wait, struct ttm_mem_reg *new_mem)
460{ 496{
461 struct nouveau_bo *nvbo = nouveau_bo(bo); 497 struct nouveau_bo *nvbo = nouveau_bo(bo);
462 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 498 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
499 struct ttm_mem_reg *old_mem = &bo->mem;
463 struct nouveau_channel *chan; 500 struct nouveau_channel *chan;
464 uint64_t src_offset, dst_offset; 501 uint64_t src_offset, dst_offset;
465 uint32_t page_count; 502 uint32_t page_count;
@@ -547,7 +584,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
547 584
548 placement.fpfn = placement.lpfn = 0; 585 placement.fpfn = placement.lpfn = 0;
549 placement.num_placement = placement.num_busy_placement = 1; 586 placement.num_placement = placement.num_busy_placement = 1;
550 placement.placement = &placement_memtype; 587 placement.placement = placement.busy_placement = &placement_memtype;
551 588
552 tmp_mem = *new_mem; 589 tmp_mem = *new_mem;
553 tmp_mem.mm_node = NULL; 590 tmp_mem.mm_node = NULL;
@@ -559,7 +596,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
559 if (ret) 596 if (ret)
560 goto out; 597 goto out;
561 598
562 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); 599 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
563 if (ret) 600 if (ret)
564 goto out; 601 goto out;
565 602
@@ -585,7 +622,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
585 622
586 placement.fpfn = placement.lpfn = 0; 623 placement.fpfn = placement.lpfn = 0;
587 placement.num_placement = placement.num_busy_placement = 1; 624 placement.num_placement = placement.num_busy_placement = 1;
588 placement.placement = &placement_memtype; 625 placement.placement = placement.busy_placement = &placement_memtype;
589 626
590 tmp_mem = *new_mem; 627 tmp_mem = *new_mem;
591 tmp_mem.mm_node = NULL; 628 tmp_mem.mm_node = NULL;
@@ -597,7 +634,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
597 if (ret) 634 if (ret)
598 goto out; 635 goto out;
599 636
600 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); 637 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
601 if (ret) 638 if (ret)
602 goto out; 639 goto out;
603 640
@@ -612,52 +649,106 @@ out:
612} 649}
613 650
614static int 651static int
615nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 652nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
616 bool no_wait, struct ttm_mem_reg *new_mem) 653 struct nouveau_tile_reg **new_tile)
617{ 654{
618 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 655 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
619 struct nouveau_bo *nvbo = nouveau_bo(bo);
620 struct drm_device *dev = dev_priv->dev; 656 struct drm_device *dev = dev_priv->dev;
621 struct ttm_mem_reg *old_mem = &bo->mem; 657 struct nouveau_bo *nvbo = nouveau_bo(bo);
658 uint64_t offset;
622 int ret; 659 int ret;
623 660
624 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && 661 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
625 !nvbo->no_vm) { 662 /* Nothing to do. */
626 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; 663 *new_tile = NULL;
664 return 0;
665 }
666
667 offset = new_mem->mm_node->start << PAGE_SHIFT;
627 668
669 if (dev_priv->card_type == NV_50) {
628 ret = nv50_mem_vm_bind_linear(dev, 670 ret = nv50_mem_vm_bind_linear(dev,
629 offset + dev_priv->vm_vram_base, 671 offset + dev_priv->vm_vram_base,
630 new_mem->size, nvbo->tile_flags, 672 new_mem->size, nvbo->tile_flags,
631 offset); 673 offset);
632 if (ret) 674 if (ret)
633 return ret; 675 return ret;
676
677 } else if (dev_priv->card_type >= NV_10) {
678 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
679 nvbo->tile_mode);
680 }
681
682 return 0;
683}
684
685static void
686nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
687 struct nouveau_tile_reg *new_tile,
688 struct nouveau_tile_reg **old_tile)
689{
690 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
691 struct drm_device *dev = dev_priv->dev;
692
693 if (dev_priv->card_type >= NV_10 &&
694 dev_priv->card_type < NV_50) {
695 if (*old_tile)
696 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
697
698 *old_tile = new_tile;
634 } 699 }
700}
701
702static int
703nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
704 bool no_wait, struct ttm_mem_reg *new_mem)
705{
706 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
707 struct nouveau_bo *nvbo = nouveau_bo(bo);
708 struct ttm_mem_reg *old_mem = &bo->mem;
709 struct nouveau_tile_reg *new_tile = NULL;
710 int ret = 0;
711
712 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
713 if (ret)
714 return ret;
635 715
716 /* Software copy if the card isn't up and running yet. */
636 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 717 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
637 !dev_priv->channel) 718 !dev_priv->channel) {
638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 719 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
720 goto out;
721 }
639 722
723 /* Fake bo copy. */
640 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 724 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
641 BUG_ON(bo->mem.mm_node != NULL); 725 BUG_ON(bo->mem.mm_node != NULL);
642 bo->mem = *new_mem; 726 bo->mem = *new_mem;
643 new_mem->mm_node = NULL; 727 new_mem->mm_node = NULL;
644 return 0; 728 goto out;
645 } 729 }
646 730
647 if (new_mem->mem_type == TTM_PL_SYSTEM) { 731 /* Hardware assisted copy. */
648 if (old_mem->mem_type == TTM_PL_SYSTEM) 732 if (new_mem->mem_type == TTM_PL_SYSTEM)
649 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 733 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
650 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) 734 else if (old_mem->mem_type == TTM_PL_SYSTEM)
651 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 735 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
652 } else if (old_mem->mem_type == TTM_PL_SYSTEM) { 736 else
653 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) 737 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
654 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
655 } else {
656 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
657 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
658 }
659 738
660 return 0; 739 if (!ret)
740 goto out;
741
742 /* Fallback to software copy. */
743 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
744
745out:
746 if (ret)
747 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
748 else
749 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
750
751 return ret;
661} 752}
662 753
663static int 754static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 9aaa972f8822..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 return ret; 158 return ret;
159 } 159 }
160 160
161 nouveau_dma_pre_init(chan);
162
161 /* Locate channel's user control regs */ 163 /* Locate channel's user control regs */
162 if (dev_priv->card_type < NV_40) 164 if (dev_priv->card_type < NV_40)
163 user = NV03_USER(channel); 165 user = NV03_USER(channel);
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
235 return 0; 237 return 0;
236} 238}
237 239
238int
239nouveau_channel_idle(struct nouveau_channel *chan)
240{
241 struct drm_device *dev = chan->dev;
242 struct drm_nouveau_private *dev_priv = dev->dev_private;
243 struct nouveau_engine *engine = &dev_priv->engine;
244 uint32_t caches;
245 int idle;
246
247 if (!chan) {
248 NV_ERROR(dev, "no channel...\n");
249 return 1;
250 }
251
252 caches = nv_rd32(dev, NV03_PFIFO_CACHES);
253 nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
254
255 if (engine->fifo.channel_id(dev) != chan->id) {
256 struct nouveau_gpuobj *ramfc =
257 chan->ramfc ? chan->ramfc->gpuobj : NULL;
258
259 if (!ramfc) {
260 NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
261 return 1;
262 }
263
264 engine->instmem.prepare_access(dev, false);
265 if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
266 idle = 0;
267 else
268 idle = 1;
269 engine->instmem.finish_access(dev);
270 } else {
271 idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
272 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
273 }
274
275 nv_wr32(dev, NV03_PFIFO_CACHES, caches);
276 return idle;
277}
278
279/* stops a fifo */ 240/* stops a fifo */
280void 241void
281nouveau_channel_free(struct nouveau_channel *chan) 242nouveau_channel_free(struct nouveau_channel *chan)
@@ -317,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
317 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
318 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
319 280
320 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
321 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
322 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
323 pgraph->fifo_access(dev, true);
324 }
325 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
326 286
327 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
328 pfifo->disable(dev); 288 pfifo->disable(dev);
@@ -414,7 +374,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
414 init->subchan[0].grclass = 0x0039; 374 init->subchan[0].grclass = 0x0039;
415 else 375 else
416 init->subchan[0].grclass = 0x5039; 376 init->subchan[0].grclass = 0x5039;
417 init->nr_subchan = 1; 377 init->subchan[1].handle = NvSw;
378 init->subchan[1].grclass = NV_SW;
379 init->nr_subchan = 2;
418 380
419 /* Named memory object area */ 381 /* Named memory object area */
420 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 382 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5a10deb8bdbd..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
24 * 24 *
25 */ 25 */
26 26
27#include <acpi/button.h>
28
27#include "drmP.h" 29#include "drmP.h"
28#include "drm_edid.h" 30#include "drm_edid.h"
29#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32
30#include "nouveau_reg.h" 33#include "nouveau_reg.h"
31#include "nouveau_drv.h" 34#include "nouveau_drv.h"
32#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
83static void 86static void
84nouveau_connector_destroy(struct drm_connector *drm_connector) 87nouveau_connector_destroy(struct drm_connector *drm_connector)
85{ 88{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector); 89 struct nouveau_connector *nv_connector =
87 struct drm_device *dev = connector->base.dev; 90 nouveau_connector(drm_connector);
91 struct drm_device *dev;
88 92
89 NV_DEBUG_KMS(dev, "\n"); 93 if (!nv_connector)
90
91 if (!connector)
92 return; 94 return;
93 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
99 kfree(nv_connector->edid);
94 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector); 102 kfree(drm_connector);
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector)
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242#ifdef CONFIG_ACPI
243 if (!nouveau_ignorelid && !acpi_lid_open())
244 return connector_status_disconnected;
245#endif
236 nouveau_connector_set_encoder(connector, nv_encoder); 246 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected; 247 return connector_status_connected;
238 } 248 }
239 249
250 /* Cleanup the previous EDID block. */
251 if (nv_connector->edid) {
252 drm_mode_connector_update_edid_property(connector, NULL);
253 kfree(nv_connector->edid);
254 nv_connector->edid = NULL;
255 }
256
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 257 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) { 258 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags); 259 nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector)
247 if (!nv_connector->edid) { 264 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 265 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector)); 266 drm_get_connector_name(connector));
250 return connector_status_disconnected; 267 goto detect_analog;
251 } 268 }
252 269
253 if (nv_encoder->dcb->type == OUTPUT_DP && 270 if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector)
281 return connector_status_connected; 298 return connector_status_connected;
282 } 299 }
283 300
301detect_analog:
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 302 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder) 303 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 304 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
687 */ 705 */
688 if (!nv_connector->edid && !nv_connector->native_mode && 706 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) { 707 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid = 708 struct edid *edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev); 709 (struct edid *)nouveau_bios_embedded_edid(dev);
710 if (edid) {
711 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
712 *(nv_connector->edid) = *edid;
713 }
692 } 714 }
693 715
694 if (!nv_connector->edid) 716 if (!nv_connector->edid)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 703553687b20..50d9e67745af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -29,12 +29,22 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_dma.h" 30#include "nouveau_dma.h"
31 31
32void
33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{
35 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
36 chan->dma.put = 0;
37 chan->dma.cur = chan->dma.put;
38 chan->dma.free = chan->dma.max - chan->dma.cur;
39}
40
32int 41int
33nouveau_dma_init(struct nouveau_channel *chan) 42nouveau_dma_init(struct nouveau_channel *chan)
34{ 43{
35 struct drm_device *dev = chan->dev; 44 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 45 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *m2mf = NULL; 46 struct nouveau_gpuobj *m2mf = NULL;
47 struct nouveau_gpuobj *nvsw = NULL;
38 int ret, i; 48 int ret, i;
39 49
40 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ 50 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
47 if (ret) 57 if (ret)
48 return ret; 58 return ret;
49 59
60 /* Create an NV_SW object for various sync purposes */
61 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
62 if (ret)
63 return ret;
64
65 ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
66 if (ret)
67 return ret;
68
50 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 69 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
51 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 70 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
52 if (ret) 71 if (ret)
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
64 return ret; 83 return ret;
65 } 84 }
66 85
67 /* Initialise DMA vars */
68 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
69 chan->dma.put = 0;
70 chan->dma.cur = chan->dma.put;
71 chan->dma.free = chan->dma.max - chan->dma.cur;
72
73 /* Insert NOPS for NOUVEAU_DMA_SKIPS */ 86 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
74 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 87 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
75 if (ret) 88 if (ret)
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
87 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); 100 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
88 OUT_RING(chan, NvNotify0); 101 OUT_RING(chan, NvNotify0);
89 102
103 /* Initialise NV_SW */
104 ret = RING_SPACE(chan, 2);
105 if (ret)
106 return ret;
107 BEGIN_RING(chan, NvSubSw, 0, 1);
108 OUT_RING(chan, NvSw);
109
90 /* Sit back and pray the channel works.. */ 110 /* Sit back and pray the channel works.. */
91 FIRE_RING(chan); 111 FIRE_RING(chan);
92 112
@@ -106,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
106 chan->dma.cur += nr_dwords; 126 chan->dma.cur += nr_dwords;
107} 127}
108 128
109static inline bool 129/* Fetch and adjust GPU GET pointer
110READ_GET(struct nouveau_channel *chan, uint32_t *get) 130 *
131 * Returns:
132 * value >= 0, the adjusted GET pointer
133 * -EINVAL if GET pointer currently outside main push buffer
134 * -EBUSY if timeout exceeded
135 */
136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
111{ 138{
112 uint32_t val; 139 uint32_t val;
113 140
114 val = nvchan_rd32(chan, chan->user_get); 141 val = nvchan_rd32(chan, chan->user_get);
115 if (val < chan->pushbuf_base || 142
116 val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) { 143 /* reset counter as long as GET is still advancing, this is
117 /* meaningless to dma_wait() except to know whether the 144 * to avoid misdetecting a GPU lockup if the GPU happens to
118 * GPU has stalled or not 145 * just be processing an operation that takes a long time
119 */ 146 */
120 *get = val; 147 if (val != *prev_get) {
121 return false; 148 *prev_get = val;
149 *timeout = 0;
150 }
151
152 if ((++*timeout & 0xff) == 0) {
153 DRM_UDELAY(1);
154 if (*timeout > 100000)
155 return -EBUSY;
122 } 156 }
123 157
124 *get = (val - chan->pushbuf_base) >> 2; 158 if (val < chan->pushbuf_base ||
125 return true; 159 val > chan->pushbuf_base + (chan->dma.max << 2))
160 return -EINVAL;
161
162 return (val - chan->pushbuf_base) >> 2;
126} 163}
127 164
128int 165int
129nouveau_dma_wait(struct nouveau_channel *chan, int size) 166nouveau_dma_wait(struct nouveau_channel *chan, int size)
130{ 167{
131 uint32_t get, prev_get = 0, cnt = 0; 168 uint32_t prev_get = 0, cnt = 0;
132 bool get_valid; 169 int get;
133 170
134 while (chan->dma.free < size) { 171 while (chan->dma.free < size) {
135 /* reset counter as long as GET is still advancing, this is 172 get = READ_GET(chan, &prev_get, &cnt);
136 * to avoid misdetecting a GPU lockup if the GPU happens to 173 if (unlikely(get == -EBUSY))
137 * just be processing an operation that takes a long time 174 return -EBUSY;
138 */
139 get_valid = READ_GET(chan, &get);
140 if (get != prev_get) {
141 prev_get = get;
142 cnt = 0;
143 }
144
145 if ((++cnt & 0xff) == 0) {
146 DRM_UDELAY(1);
147 if (cnt > 100000)
148 return -EBUSY;
149 }
150 175
151 /* loop until we have a usable GET pointer. the value 176 /* loop until we have a usable GET pointer. the value
152 * we read from the GPU may be outside the main ring if 177 * we read from the GPU may be outside the main ring if
@@ -157,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
157 * from the SKIPS area, so the code below doesn't have to deal 182 * from the SKIPS area, so the code below doesn't have to deal
158 * with some fun corner cases. 183 * with some fun corner cases.
159 */ 184 */
160 if (!get_valid || get < NOUVEAU_DMA_SKIPS) 185 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
161 continue; 186 continue;
162 187
163 if (get <= chan->dma.cur) { 188 if (get <= chan->dma.cur) {
@@ -183,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
183 * after processing the currently pending commands. 208 * after processing the currently pending commands.
184 */ 209 */
185 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 210 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
211
212 /* wait for GET to depart from the skips area.
213 * prevents writing GET==PUT and causing a race
214 * condition that causes us to think the GPU is
215 * idle when it's not.
216 */
217 do {
218 get = READ_GET(chan, &prev_get, &cnt);
219 if (unlikely(get == -EBUSY))
220 return -EBUSY;
221 if (unlikely(get == -EINVAL))
222 continue;
223 } while (get <= NOUVEAU_DMA_SKIPS);
186 WRITE_PUT(NOUVEAU_DMA_SKIPS); 224 WRITE_PUT(NOUVEAU_DMA_SKIPS);
187 225
188 /* we're now submitting commands at the start of 226 /* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 04e85d8f757e..dabfd655f93e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -46,10 +46,11 @@
46/* Hardcoded object assignments to subchannels (subchannel id). */ 46/* Hardcoded object assignments to subchannels (subchannel id). */
47enum { 47enum {
48 NvSubM2MF = 0, 48 NvSubM2MF = 0,
49 NvSub2D = 1, 49 NvSubSw = 1,
50 NvSubCtxSurf2D = 1, 50 NvSub2D = 2,
51 NvSubGdiRect = 2, 51 NvSubCtxSurf2D = 2,
52 NvSubImageBlit = 3 52 NvSubGdiRect = 3,
53 NvSubImageBlit = 4
53}; 54};
54 55
55/* Object handles. */ 56/* Object handles. */
@@ -67,6 +68,7 @@ enum {
67 NvClipRect = 0x8000000b, 68 NvClipRect = 0x8000000b,
68 NvGdiRect = 0x8000000c, 69 NvGdiRect = 0x8000000c,
69 NvImageBlit = 0x8000000d, 70 NvImageBlit = 0x8000000d,
71 NvSw = 0x8000000e,
70 72
71 /* G80+ display objects */ 73 /* G80+ display objects */
72 NvEvoVRAM = 0x01000000, 74 NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9e2926c48579..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY; 493 ret = -EBUSY;
494 goto out;
494 } 495 }
495 496
496 udelay(400); 497 udelay(400);
@@ -502,6 +503,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 } 503 }
503 504
504 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
505 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
507 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 06eb993e0883..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -71,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_tmds = -1; 71int nouveau_uscript_tmds = -1;
72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
73 73
74MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
74MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
75 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
76 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5f8cbb79c499..5445cefdd03e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -59,11 +59,19 @@ struct nouveau_grctx;
59#define MAX_NUM_DCB_ENTRIES 16 59#define MAX_NUM_DCB_ENTRIES 16
60 60
61#define NOUVEAU_MAX_CHANNEL_NR 128 61#define NOUVEAU_MAX_CHANNEL_NR 128
62#define NOUVEAU_MAX_TILE_NR 15
62 63
63#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) 64#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
64#define NV50_VM_BLOCK (512*1024*1024ULL) 65#define NV50_VM_BLOCK (512*1024*1024ULL)
65#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) 66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
66 67
68struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used;
73};
74
67struct nouveau_bo { 75struct nouveau_bo {
68 struct ttm_buffer_object bo; 76 struct ttm_buffer_object bo;
69 struct ttm_placement placement; 77 struct ttm_placement placement;
@@ -83,6 +91,7 @@ struct nouveau_bo {
83 91
84 uint32_t tile_mode; 92 uint32_t tile_mode;
85 uint32_t tile_flags; 93 uint32_t tile_flags;
94 struct nouveau_tile_reg *tile;
86 95
87 struct drm_gem_object *gem; 96 struct drm_gem_object *gem;
88 struct drm_file *cpu_filp; 97 struct drm_file *cpu_filp;
@@ -277,8 +286,13 @@ struct nouveau_timer_engine {
277}; 286};
278 287
279struct nouveau_fb_engine { 288struct nouveau_fb_engine {
289 int num_tiles;
290
280 int (*init)(struct drm_device *dev); 291 int (*init)(struct drm_device *dev);
281 void (*takedown)(struct drm_device *dev); 292 void (*takedown)(struct drm_device *dev);
293
294 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
295 uint32_t size, uint32_t pitch);
282}; 296};
283 297
284struct nouveau_fifo_engine { 298struct nouveau_fifo_engine {
@@ -292,6 +306,8 @@ struct nouveau_fifo_engine {
292 void (*disable)(struct drm_device *); 306 void (*disable)(struct drm_device *);
293 void (*enable)(struct drm_device *); 307 void (*enable)(struct drm_device *);
294 bool (*reassign)(struct drm_device *, bool enable); 308 bool (*reassign)(struct drm_device *, bool enable);
309 bool (*cache_flush)(struct drm_device *dev);
310 bool (*cache_pull)(struct drm_device *dev, bool enable);
295 311
296 int (*channel_id)(struct drm_device *); 312 int (*channel_id)(struct drm_device *);
297 313
@@ -330,6 +346,9 @@ struct nouveau_pgraph_engine {
330 void (*destroy_context)(struct nouveau_channel *); 346 void (*destroy_context)(struct nouveau_channel *);
331 int (*load_context)(struct nouveau_channel *); 347 int (*load_context)(struct nouveau_channel *);
332 int (*unload_context)(struct drm_device *); 348 int (*unload_context)(struct drm_device *);
349
350 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
351 uint32_t size, uint32_t pitch);
333}; 352};
334 353
335struct nouveau_engine { 354struct nouveau_engine {
@@ -490,6 +509,8 @@ struct drm_nouveau_private {
490 void __iomem *ramin; 509 void __iomem *ramin;
491 uint32_t ramin_size; 510 uint32_t ramin_size;
492 511
512 struct nouveau_bo *vga_ram;
513
493 struct workqueue_struct *wq; 514 struct workqueue_struct *wq;
494 struct work_struct irq_work; 515 struct work_struct irq_work;
495 516
@@ -548,6 +569,12 @@ struct drm_nouveau_private {
548 unsigned long sg_handle; 569 unsigned long sg_handle;
549 } gart_info; 570 } gart_info;
550 571
572 /* nv10-nv40 tiling regions */
573 struct {
574 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
575 spinlock_t lock;
576 } tile;
577
551 /* G8x/G9x virtual address space */ 578 /* G8x/G9x virtual address space */
552 uint64_t vm_gart_base; 579 uint64_t vm_gart_base;
553 uint64_t vm_gart_size; 580 uint64_t vm_gart_size;
@@ -650,6 +677,9 @@ extern char *nouveau_tv_norm;
650extern int nouveau_reg_debug; 677extern int nouveau_reg_debug;
651extern char *nouveau_vbios; 678extern char *nouveau_vbios;
652extern int nouveau_ctxfw; 679extern int nouveau_ctxfw;
680extern int nouveau_ignorelid;
681extern int nouveau_nofbaccel;
682extern int nouveau_noaccel;
653 683
654/* nouveau_state.c */ 684/* nouveau_state.c */
655extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 685extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@ -685,6 +715,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
685extern int nouveau_mem_init(struct drm_device *); 715extern int nouveau_mem_init(struct drm_device *);
686extern int nouveau_mem_init_agp(struct drm_device *); 716extern int nouveau_mem_init_agp(struct drm_device *);
687extern void nouveau_mem_close(struct drm_device *); 717extern void nouveau_mem_close(struct drm_device *);
718extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
719 uint32_t addr,
720 uint32_t size,
721 uint32_t pitch);
722extern void nv10_mem_expire_tiling(struct drm_device *dev,
723 struct nouveau_tile_reg *tile,
724 struct nouveau_fence *fence);
688extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, 725extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
689 uint32_t size, uint32_t flags, 726 uint32_t size, uint32_t flags,
690 uint64_t phys); 727 uint64_t phys);
@@ -713,7 +750,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
713 struct drm_file *file_priv, 750 struct drm_file *file_priv,
714 uint32_t fb_ctxdma, uint32_t tt_ctxdma); 751 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
715extern void nouveau_channel_free(struct nouveau_channel *); 752extern void nouveau_channel_free(struct nouveau_channel *);
716extern int nouveau_channel_idle(struct nouveau_channel *chan);
717 753
718/* nouveau_object.c */ 754/* nouveau_object.c */
719extern int nouveau_gpuobj_early_init(struct drm_device *); 755extern int nouveau_gpuobj_early_init(struct drm_device *);
@@ -756,6 +792,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
756 uint32_t *o_ret); 792 uint32_t *o_ret);
757extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, 793extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
758 struct nouveau_gpuobj **); 794 struct nouveau_gpuobj **);
795extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
796 struct nouveau_gpuobj **);
759extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, 797extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
760 struct drm_file *); 798 struct drm_file *);
761extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, 799extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -804,6 +842,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
804#endif 842#endif
805 843
806/* nouveau_dma.c */ 844/* nouveau_dma.c */
845extern void nouveau_dma_pre_init(struct nouveau_channel *);
807extern int nouveau_dma_init(struct nouveau_channel *); 846extern int nouveau_dma_init(struct nouveau_channel *);
808extern int nouveau_dma_wait(struct nouveau_channel *, int size); 847extern int nouveau_dma_wait(struct nouveau_channel *, int size);
809 848
@@ -879,16 +918,22 @@ extern void nv04_fb_takedown(struct drm_device *);
879/* nv10_fb.c */ 918/* nv10_fb.c */
880extern int nv10_fb_init(struct drm_device *); 919extern int nv10_fb_init(struct drm_device *);
881extern void nv10_fb_takedown(struct drm_device *); 920extern void nv10_fb_takedown(struct drm_device *);
921extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
922 uint32_t, uint32_t);
882 923
883/* nv40_fb.c */ 924/* nv40_fb.c */
884extern int nv40_fb_init(struct drm_device *); 925extern int nv40_fb_init(struct drm_device *);
885extern void nv40_fb_takedown(struct drm_device *); 926extern void nv40_fb_takedown(struct drm_device *);
927extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
928 uint32_t, uint32_t);
886 929
887/* nv04_fifo.c */ 930/* nv04_fifo.c */
888extern int nv04_fifo_init(struct drm_device *); 931extern int nv04_fifo_init(struct drm_device *);
889extern void nv04_fifo_disable(struct drm_device *); 932extern void nv04_fifo_disable(struct drm_device *);
890extern void nv04_fifo_enable(struct drm_device *); 933extern void nv04_fifo_enable(struct drm_device *);
891extern bool nv04_fifo_reassign(struct drm_device *, bool); 934extern bool nv04_fifo_reassign(struct drm_device *, bool);
935extern bool nv04_fifo_cache_flush(struct drm_device *);
936extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
892extern int nv04_fifo_channel_id(struct drm_device *); 937extern int nv04_fifo_channel_id(struct drm_device *);
893extern int nv04_fifo_create_context(struct nouveau_channel *); 938extern int nv04_fifo_create_context(struct nouveau_channel *);
894extern void nv04_fifo_destroy_context(struct nouveau_channel *); 939extern void nv04_fifo_destroy_context(struct nouveau_channel *);
@@ -941,6 +986,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
941extern int nv10_graph_load_context(struct nouveau_channel *); 986extern int nv10_graph_load_context(struct nouveau_channel *);
942extern int nv10_graph_unload_context(struct drm_device *); 987extern int nv10_graph_unload_context(struct drm_device *);
943extern void nv10_graph_context_switch(struct drm_device *); 988extern void nv10_graph_context_switch(struct drm_device *);
989extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
990 uint32_t, uint32_t);
944 991
945/* nv20_graph.c */ 992/* nv20_graph.c */
946extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; 993extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
@@ -952,6 +999,8 @@ extern int nv20_graph_unload_context(struct drm_device *);
952extern int nv20_graph_init(struct drm_device *); 999extern int nv20_graph_init(struct drm_device *);
953extern void nv20_graph_takedown(struct drm_device *); 1000extern void nv20_graph_takedown(struct drm_device *);
954extern int nv30_graph_init(struct drm_device *); 1001extern int nv30_graph_init(struct drm_device *);
1002extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
1003 uint32_t, uint32_t);
955 1004
956/* nv40_graph.c */ 1005/* nv40_graph.c */
957extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; 1006extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
@@ -963,6 +1012,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
963extern int nv40_graph_load_context(struct nouveau_channel *); 1012extern int nv40_graph_load_context(struct nouveau_channel *);
964extern int nv40_graph_unload_context(struct drm_device *); 1013extern int nv40_graph_unload_context(struct drm_device *);
965extern void nv40_grctx_init(struct nouveau_grctx *); 1014extern void nv40_grctx_init(struct nouveau_grctx *);
1015extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
1016 uint32_t, uint32_t);
966 1017
967/* nv50_graph.c */ 1018/* nv50_graph.c */
968extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; 1019extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -1030,8 +1081,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1030 1081
1031/* nv04_dac.c */ 1082/* nv04_dac.c */
1032extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); 1083extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
1033extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, 1084extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
1034 struct drm_connector *connector);
1035extern int nv04_dac_output_offset(struct drm_encoder *encoder); 1085extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1036extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); 1086extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1037 1087
@@ -1049,9 +1099,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1049 1099
1050/* nv17_tv.c */ 1100/* nv17_tv.c */
1051extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); 1101extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1052extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
1053 struct drm_connector *connector,
1054 uint32_t pin_mask);
1055 1102
1056/* nv04_display.c */ 1103/* nv04_display.c */
1057extern int nv04_display_create(struct drm_device *); 1104extern int nv04_display_create(struct drm_device *);
@@ -1290,14 +1337,14 @@ nv_two_reg_pll(struct drm_device *dev)
1290 return false; 1337 return false;
1291} 1338}
1292 1339
1293#define NV50_NVSW 0x0000506e 1340#define NV_SW 0x0000506e
1294#define NV50_NVSW_DMA_SEMAPHORE 0x00000060 1341#define NV_SW_DMA_SEMAPHORE 0x00000060
1295#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064 1342#define NV_SW_SEMAPHORE_OFFSET 0x00000064
1296#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068 1343#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
1297#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c 1344#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
1298#define NV50_NVSW_DMA_VBLSEM 0x0000018c 1345#define NV_SW_DMA_VBLSEM 0x0000018c
1299#define NV50_NVSW_VBLSEM_OFFSET 0x00000400 1346#define NV_SW_VBLSEM_OFFSET 0x00000400
1300#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404 1347#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1301#define NV50_NVSW_VBLSEM_RELEASE 0x00000408 1348#define NV_SW_VBLSEM_RELEASE 0x00000408
1302 1349
1303#endif /* __NOUVEAU_DRV_H__ */ 1350#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 84af25c238b6..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info)
64 return 0; 64 return 0;
65 65
66 if (RING_SPACE(chan, 4)) { 66 if (RING_SPACE(chan, 4)) {
67 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 67 nouveau_fbcon_gpu_lockup(info);
68 info->flags |= FBINFO_HWACCEL_DISABLED;
69 return 0; 68 return 0;
70 } 69 }
71 70
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
86 } 85 }
87 86
88 if (ret) { 87 if (ret) {
89 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 88 nouveau_fbcon_gpu_lockup(info);
90 info->flags |= FBINFO_HWACCEL_DISABLED;
91 return 0; 89 return 0;
92 } 90 }
93 91
@@ -109,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
109 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
110}; 108};
111 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
112static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
113 u16 blue, int regno) 139 u16 blue, int regno)
114{ 140{
@@ -212,11 +238,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
212 238
213 mode_cmd.bpp = surface_bpp; 239 mode_cmd.bpp = surface_bpp;
214 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); 240 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
215 mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256); 241 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
216 mode_cmd.depth = surface_depth; 242 mode_cmd.depth = surface_depth;
217 243
218 size = mode_cmd.pitch * mode_cmd.height; 244 size = mode_cmd.pitch * mode_cmd.height;
219 size = ALIGN(size, PAGE_SIZE); 245 size = roundup(size, PAGE_SIZE);
220 246
221 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, 247 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
222 0, 0x0000, false, true, &nvbo); 248 0, 0x0000, false, true, &nvbo);
@@ -269,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
269 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
270 296
271 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
272 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
273 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
274 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
275 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
276 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -318,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
318 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
319 par->dev = dev; 349 par->dev = dev;
320 350
321 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
322 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
323 case NV_50: 353 case NV_50:
324 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
325 break; 356 break;
326 default: 357 default:
327 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
328 break; 360 break;
329 }; 361 };
330 } 362 }
@@ -380,3 +412,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
380 412
381 return 0; 413 return 0;
382} 414}
415
416void nouveau_fbcon_gpu_lockup(struct fb_info *info)
417{
418 struct nouveau_fbcon_par *par = info->par;
419 struct drm_device *dev = par->dev;
420
421 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
422 info->flags |= FBINFO_HWACCEL_DISABLED;
423}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 8531140fedbc..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,8 +40,15 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
46#endif /* __NV50_FBCON_H__ */ 53#endif /* __NV50_FBCON_H__ */
47 54
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index dacac9a0842a..faddf53ff9ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
142 list_add_tail(&fence->entry, &chan->fence.pending); 142 list_add_tail(&fence->entry, &chan->fence.pending);
143 spin_unlock_irqrestore(&chan->fence.lock, flags); 143 spin_unlock_irqrestore(&chan->fence.lock, flags);
144 144
145 BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1); 145 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
146 OUT_RING(chan, fence->sequence); 146 OUT_RING(chan, fence->sequence);
147 FIRE_RING(chan); 147 FIRE_RING(chan);
148 148
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 18fd8ac9fca7..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
220} 220}
221 221
222struct validate_op { 222struct validate_op {
223 struct nouveau_fence *fence;
224 struct list_head vram_list; 223 struct list_head vram_list;
225 struct list_head gart_list; 224 struct list_head gart_list;
226 struct list_head both_list; 225 struct list_head both_list;
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
252} 251}
253 252
254static void 253static void
255validate_fini(struct validate_op *op, bool success) 254validate_fini(struct validate_op *op, struct nouveau_fence* fence)
256{ 255{
257 struct nouveau_fence *fence = op->fence; 256 validate_fini_list(&op->vram_list, fence);
258 257 validate_fini_list(&op->gart_list, fence);
259 if (unlikely(!success)) 258 validate_fini_list(&op->both_list, fence);
260 op->fence = NULL;
261
262 validate_fini_list(&op->vram_list, op->fence);
263 validate_fini_list(&op->gart_list, op->fence);
264 validate_fini_list(&op->both_list, op->fence);
265 nouveau_fence_unref((void *)&fence);
266} 259}
267 260
268static int 261static int
@@ -328,6 +321,7 @@ retry:
328 else { 321 else {
329 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
330 b->valid_domains); 323 b->valid_domains);
324 list_add_tail(&nvbo->entry, &op->both_list);
331 validate_fini(op, NULL); 325 validate_fini(op, NULL);
332 return -EINVAL; 326 return -EINVAL;
333 } 327 }
@@ -420,10 +414,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
420 INIT_LIST_HEAD(&op->gart_list); 414 INIT_LIST_HEAD(&op->gart_list);
421 INIT_LIST_HEAD(&op->both_list); 415 INIT_LIST_HEAD(&op->both_list);
422 416
423 ret = nouveau_fence_new(chan, &op->fence, false);
424 if (ret)
425 return ret;
426
427 if (nr_buffers == 0) 417 if (nr_buffers == 0)
428 return 0; 418 return 0;
429 419
@@ -477,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
477static int 467static int
478nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
479 struct drm_nouveau_gem_pushbuf_bo *bo, 469 struct drm_nouveau_gem_pushbuf_bo *bo,
480 int nr_relocs, uint64_t ptr_relocs, 470 unsigned nr_relocs, uint64_t ptr_relocs,
481 int nr_dwords, int first_dword, 471 unsigned nr_dwords, unsigned first_dword,
482 uint32_t *pushbuf, bool is_iomem) 472 uint32_t *pushbuf, bool is_iomem)
483{ 473{
484 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
485 struct drm_device *dev = chan->dev; 475 struct drm_device *dev = chan->dev;
486 int ret = 0, i; 476 int ret = 0;
477 unsigned i;
487 478
488 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
489 if (IS_ERR(reloc)) 480 if (IS_ERR(reloc))
@@ -541,6 +532,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
541 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
542 struct nouveau_channel *chan; 533 struct nouveau_channel *chan;
543 struct validate_op op; 534 struct validate_op op;
535 struct nouveau_fence* fence = 0;
544 uint32_t *pushbuf = NULL; 536 uint32_t *pushbuf = NULL;
545 int ret = 0, do_reloc = 0, i; 537 int ret = 0, do_reloc = 0, i;
546 538
@@ -597,7 +589,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
597 589
598 OUT_RINGp(chan, pushbuf, req->nr_dwords); 590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
599 591
600 ret = nouveau_fence_emit(op.fence); 592 ret = nouveau_fence_new(chan, &fence, true);
601 if (ret) { 593 if (ret) {
602 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
603 WIND_RING(chan); 595 WIND_RING(chan);
@@ -605,7 +597,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
605 } 597 }
606 598
607 if (nouveau_gem_pushbuf_sync(chan)) { 599 if (nouveau_gem_pushbuf_sync(chan)) {
608 ret = nouveau_fence_wait(op.fence, NULL, false, false); 600 ret = nouveau_fence_wait(fence, NULL, false, false);
609 if (ret) { 601 if (ret) {
610 for (i = 0; i < req->nr_dwords; i++) 602 for (i = 0; i < req->nr_dwords; i++)
611 NV_ERROR(dev, "0x%08x\n", pushbuf[i]); 603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
@@ -614,7 +606,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
614 } 606 }
615 607
616out: 608out:
617 validate_fini(&op, ret == 0); 609 validate_fini(&op, fence);
610 nouveau_fence_unref((void**)&fence);
618 mutex_unlock(&dev->struct_mutex); 611 mutex_unlock(&dev->struct_mutex);
619 kfree(pushbuf); 612 kfree(pushbuf);
620 kfree(bo); 613 kfree(bo);
@@ -634,6 +627,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
634 struct drm_gem_object *gem; 627 struct drm_gem_object *gem;
635 struct nouveau_bo *pbbo; 628 struct nouveau_bo *pbbo;
636 struct validate_op op; 629 struct validate_op op;
630 struct nouveau_fence* fence = 0;
637 int i, ret = 0, do_reloc = 0; 631 int i, ret = 0, do_reloc = 0;
638 632
639 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 633 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
@@ -675,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
675 } 669 }
676 pbbo = nouveau_gem_object(gem); 670 pbbo = nouveau_gem_object(gem);
677 671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
678 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
679 chan->fence.sequence); 685 chan->fence.sequence);
680 if (ret) { 686 if (ret) {
@@ -772,7 +778,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
772 OUT_RING(chan, 0); 778 OUT_RING(chan, 0);
773 } 779 }
774 780
775 ret = nouveau_fence_emit(op.fence); 781 ret = nouveau_fence_new(chan, &fence, true);
776 if (ret) { 782 if (ret) {
777 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 783 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
778 WIND_RING(chan); 784 WIND_RING(chan);
@@ -780,7 +786,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
780 } 786 }
781 787
782out: 788out:
783 validate_fini(&op, ret == 0); 789 validate_fini(&op, fence);
790 nouveau_fence_unref((void**)&fence);
784 mutex_unlock(&dev->struct_mutex); 791 mutex_unlock(&dev->struct_mutex);
785 kfree(bo); 792 kfree(bo);
786 793
@@ -918,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
918 } 925 }
919 926
920 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
921 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
922 } else { 931 } else {
923 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
924 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 370c72c968d1..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 497 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 498 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1; 499 unhandled = 1;
500 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
501 uint32_t v = nv_rd32(dev, 0x402000);
502 nv_wr32(dev, 0x402000, v);
503
504 /* dump the error anyway for now: it's useful for
505 Gallium development */
506 unhandled = 1;
486 } else { 507 } else {
487 unhandled = 1; 508 unhandled = 1;
488 } 509 }
@@ -559,85 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
559static void 580static void
560nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
561{ 582{
562 uint32_t status, nsource; 583 uint32_t status;
563 584
564 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
565 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
566 587
567 if (status & 0x00000001) { 588 if (status & 0x00000001) {
568 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
569 status &= ~0x00000001; 590 status &= ~0x00000001;
570 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
571 } 592 }
572 593
573 if (status & 0x00000010) { 594 if (status & 0x00000010) {
574 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
575 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
576 597
577 status &= ~0x00000010; 598 status &= ~0x00000010;
578 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
579 } 600 }
580 601
581 if (status & 0x00001000) { 602 if (status & 0x00001000) {
582 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
583 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
584 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
585 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
586 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
587 610
588 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
589 612
590 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
591 } 614 }
592 615
593 if (status & 0x00100000) { 616 if (status & 0x00100000) {
594 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
595 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
596 619
597 status &= ~0x00100000; 620 status &= ~0x00100000;
598 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
599 } 622 }
600 623
601 if (status & 0x00200000) { 624 if (status & 0x00200000) {
602 int r; 625 int r;
603 626
604 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
605 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
606 629
607 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
608 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
609 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
610 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
611 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
612 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
613 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
614 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
615 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
616 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
617 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
618 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
619 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
620 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
621 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 645 nv_rd32(dev, r));
623 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
624 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
625 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
626 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
627 658
628 if (status) { 659 if (status) {
629 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
630 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
631 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
632 664
633 { 665 {
634 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
635 667
636 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
637 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
670 nv_rd32(dev, 0x400500) | isb);
671 }
638 } 672 }
639 673
640 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
641} 676}
642 677
643static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5158a12f7844..8f3a12f614ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
192} 192}
193 193
194/* 194/*
195 * NV10-NV40 tiling helpers
196 */
197
198static void
199nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
200 uint32_t size, uint32_t pitch)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
204 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
205 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
206 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
207
208 tile->addr = addr;
209 tile->size = size;
210 tile->used = !!pitch;
211 nouveau_fence_unref((void **)&tile->fence);
212
213 if (!pfifo->cache_flush(dev))
214 return;
215
216 pfifo->reassign(dev, false);
217 pfifo->cache_flush(dev);
218 pfifo->cache_pull(dev, false);
219
220 nouveau_wait_for_idle(dev);
221
222 pgraph->set_region_tiling(dev, i, addr, size, pitch);
223 pfb->set_region_tiling(dev, i, addr, size, pitch);
224
225 pfifo->cache_pull(dev, true);
226 pfifo->reassign(dev, true);
227}
228
229struct nouveau_tile_reg *
230nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
231 uint32_t pitch)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
235 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
236 int i;
237
238 spin_lock(&dev_priv->tile.lock);
239
240 for (i = 0; i < pfb->num_tiles; i++) {
241 if (tile[i].used)
242 /* Tile region in use. */
243 continue;
244
245 if (tile[i].fence &&
246 !nouveau_fence_signalled(tile[i].fence, NULL))
247 /* Pending tile region. */
248 continue;
249
250 if (max(tile[i].addr, addr) <
251 min(tile[i].addr + tile[i].size, addr + size))
252 /* Kill an intersecting tile region. */
253 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
254
255 if (pitch && !found) {
256 /* Free tile region. */
257 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
258 found = &tile[i];
259 }
260 }
261
262 spin_unlock(&dev_priv->tile.lock);
263
264 return found;
265}
266
267void
268nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
269 struct nouveau_fence *fence)
270{
271 if (fence) {
272 /* Mark it as pending. */
273 tile->fence = fence;
274 nouveau_fence_ref(fence);
275 }
276
277 tile->used = false;
278}
279
280/*
195 * NV50 VM helpers 281 * NV50 VM helpers
196 */ 282 */
197int 283int
@@ -297,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev)
297{ 383{
298 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct drm_nouveau_private *dev_priv = dev->dev_private;
299 385
300 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) 386 nouveau_bo_unpin(dev_priv->vga_ram);
301 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); 387 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
302 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
303 388
304 ttm_bo_device_release(&dev_priv->ttm.bdev); 389 ttm_bo_device_release(&dev_priv->ttm.bdev);
305 390
@@ -513,6 +598,7 @@ nouveau_mem_init(struct drm_device *dev)
513 598
514 INIT_LIST_HEAD(&dev_priv->ttm.bo_list); 599 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
515 spin_lock_init(&dev_priv->ttm.bo_list_lock); 600 spin_lock_init(&dev_priv->ttm.bo_list_lock);
601 spin_lock_init(&dev_priv->tile.lock);
516 602
517 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 603 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
518 604
@@ -535,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev)
535 return ret; 621 return ret;
536 } 622 }
537 623
624 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
625 0, 0, true, true, &dev_priv->vga_ram);
626 if (ret == 0)
627 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
628 if (ret) {
629 NV_WARN(dev, "failed to reserve VGA memory\n");
630 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
631 }
632
538 /* GART */ 633 /* GART */
539#if !defined(__powerpc__) && !defined(__ia64__) 634#if !defined(__powerpc__) && !defined(__ia64__)
540 if (drm_device_is_agp(dev) && dev->agp) { 635 if (drm_device_is_agp(dev) && dev->agp) {
@@ -566,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev)
566 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 661 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
567 drm_get_resource_len(dev, 1), 662 drm_get_resource_len(dev, 1),
568 DRM_MTRR_WC); 663 DRM_MTRR_WC);
664
569 return 0; 665 return 0;
570} 666}
571 667
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 93379bb81bea..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -881,15 +881,16 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
881 return 0; 881 return 0;
882} 882}
883 883
884static int 884int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index fa1b0e7165b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
@@ -349,19 +350,19 @@
349#define NV04_PGRAPH_BLEND 0x00400824 350#define NV04_PGRAPH_BLEND 0x00400824
350#define NV04_PGRAPH_STORED_FMT 0x00400830 351#define NV04_PGRAPH_STORED_FMT 0x00400830
351#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 352#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
352#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) 353#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
353#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) 354#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
354#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) 355#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
355#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) 356#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
356#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) 357#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
357#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) 358#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
358#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) 359#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
359#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) 360#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
360#define NV04_PGRAPH_U_RAM 0x00400D00 361#define NV04_PGRAPH_U_RAM 0x00400D00
361#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) 362#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
362#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) 363#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
363#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) 364#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
364#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) 365#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
365#define NV04_PGRAPH_V_RAM 0x00400D40 366#define NV04_PGRAPH_V_RAM 0x00400D40
366#define NV04_PGRAPH_W_RAM 0x00400D80 367#define NV04_PGRAPH_W_RAM 0x00400D80
367#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 368#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e76ec2d207a9..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
76 engine->fifo.disable = nv04_fifo_disable; 76 engine->fifo.disable = nv04_fifo_disable;
77 engine->fifo.enable = nv04_fifo_enable; 77 engine->fifo.enable = nv04_fifo_enable;
78 engine->fifo.reassign = nv04_fifo_reassign; 78 engine->fifo.reassign = nv04_fifo_reassign;
79 engine->fifo.cache_flush = nv04_fifo_cache_flush;
80 engine->fifo.cache_pull = nv04_fifo_cache_pull;
79 engine->fifo.channel_id = nv04_fifo_channel_id; 81 engine->fifo.channel_id = nv04_fifo_channel_id;
80 engine->fifo.create_context = nv04_fifo_create_context; 82 engine->fifo.create_context = nv04_fifo_create_context;
81 engine->fifo.destroy_context = nv04_fifo_destroy_context; 83 engine->fifo.destroy_context = nv04_fifo_destroy_context;
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
100 engine->timer.takedown = nv04_timer_takedown; 102 engine->timer.takedown = nv04_timer_takedown;
101 engine->fb.init = nv10_fb_init; 103 engine->fb.init = nv10_fb_init;
102 engine->fb.takedown = nv10_fb_takedown; 104 engine->fb.takedown = nv10_fb_takedown;
105 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
103 engine->graph.grclass = nv10_graph_grclass; 106 engine->graph.grclass = nv10_graph_grclass;
104 engine->graph.init = nv10_graph_init; 107 engine->graph.init = nv10_graph_init;
105 engine->graph.takedown = nv10_graph_takedown; 108 engine->graph.takedown = nv10_graph_takedown;
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
109 engine->graph.fifo_access = nv04_graph_fifo_access; 112 engine->graph.fifo_access = nv04_graph_fifo_access;
110 engine->graph.load_context = nv10_graph_load_context; 113 engine->graph.load_context = nv10_graph_load_context;
111 engine->graph.unload_context = nv10_graph_unload_context; 114 engine->graph.unload_context = nv10_graph_unload_context;
115 engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
112 engine->fifo.channels = 32; 116 engine->fifo.channels = 32;
113 engine->fifo.init = nv10_fifo_init; 117 engine->fifo.init = nv10_fifo_init;
114 engine->fifo.takedown = nouveau_stub_takedown; 118 engine->fifo.takedown = nouveau_stub_takedown;
115 engine->fifo.disable = nv04_fifo_disable; 119 engine->fifo.disable = nv04_fifo_disable;
116 engine->fifo.enable = nv04_fifo_enable; 120 engine->fifo.enable = nv04_fifo_enable;
117 engine->fifo.reassign = nv04_fifo_reassign; 121 engine->fifo.reassign = nv04_fifo_reassign;
122 engine->fifo.cache_flush = nv04_fifo_cache_flush;
123 engine->fifo.cache_pull = nv04_fifo_cache_pull;
118 engine->fifo.channel_id = nv10_fifo_channel_id; 124 engine->fifo.channel_id = nv10_fifo_channel_id;
119 engine->fifo.create_context = nv10_fifo_create_context; 125 engine->fifo.create_context = nv10_fifo_create_context;
120 engine->fifo.destroy_context = nv10_fifo_destroy_context; 126 engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
139 engine->timer.takedown = nv04_timer_takedown; 145 engine->timer.takedown = nv04_timer_takedown;
140 engine->fb.init = nv10_fb_init; 146 engine->fb.init = nv10_fb_init;
141 engine->fb.takedown = nv10_fb_takedown; 147 engine->fb.takedown = nv10_fb_takedown;
148 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
142 engine->graph.grclass = nv20_graph_grclass; 149 engine->graph.grclass = nv20_graph_grclass;
143 engine->graph.init = nv20_graph_init; 150 engine->graph.init = nv20_graph_init;
144 engine->graph.takedown = nv20_graph_takedown; 151 engine->graph.takedown = nv20_graph_takedown;
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
148 engine->graph.fifo_access = nv04_graph_fifo_access; 155 engine->graph.fifo_access = nv04_graph_fifo_access;
149 engine->graph.load_context = nv20_graph_load_context; 156 engine->graph.load_context = nv20_graph_load_context;
150 engine->graph.unload_context = nv20_graph_unload_context; 157 engine->graph.unload_context = nv20_graph_unload_context;
158 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
151 engine->fifo.channels = 32; 159 engine->fifo.channels = 32;
152 engine->fifo.init = nv10_fifo_init; 160 engine->fifo.init = nv10_fifo_init;
153 engine->fifo.takedown = nouveau_stub_takedown; 161 engine->fifo.takedown = nouveau_stub_takedown;
154 engine->fifo.disable = nv04_fifo_disable; 162 engine->fifo.disable = nv04_fifo_disable;
155 engine->fifo.enable = nv04_fifo_enable; 163 engine->fifo.enable = nv04_fifo_enable;
156 engine->fifo.reassign = nv04_fifo_reassign; 164 engine->fifo.reassign = nv04_fifo_reassign;
165 engine->fifo.cache_flush = nv04_fifo_cache_flush;
166 engine->fifo.cache_pull = nv04_fifo_cache_pull;
157 engine->fifo.channel_id = nv10_fifo_channel_id; 167 engine->fifo.channel_id = nv10_fifo_channel_id;
158 engine->fifo.create_context = nv10_fifo_create_context; 168 engine->fifo.create_context = nv10_fifo_create_context;
159 engine->fifo.destroy_context = nv10_fifo_destroy_context; 169 engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
178 engine->timer.takedown = nv04_timer_takedown; 188 engine->timer.takedown = nv04_timer_takedown;
179 engine->fb.init = nv10_fb_init; 189 engine->fb.init = nv10_fb_init;
180 engine->fb.takedown = nv10_fb_takedown; 190 engine->fb.takedown = nv10_fb_takedown;
191 engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
181 engine->graph.grclass = nv30_graph_grclass; 192 engine->graph.grclass = nv30_graph_grclass;
182 engine->graph.init = nv30_graph_init; 193 engine->graph.init = nv30_graph_init;
183 engine->graph.takedown = nv20_graph_takedown; 194 engine->graph.takedown = nv20_graph_takedown;
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
187 engine->graph.destroy_context = nv20_graph_destroy_context; 198 engine->graph.destroy_context = nv20_graph_destroy_context;
188 engine->graph.load_context = nv20_graph_load_context; 199 engine->graph.load_context = nv20_graph_load_context;
189 engine->graph.unload_context = nv20_graph_unload_context; 200 engine->graph.unload_context = nv20_graph_unload_context;
201 engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
190 engine->fifo.channels = 32; 202 engine->fifo.channels = 32;
191 engine->fifo.init = nv10_fifo_init; 203 engine->fifo.init = nv10_fifo_init;
192 engine->fifo.takedown = nouveau_stub_takedown; 204 engine->fifo.takedown = nouveau_stub_takedown;
193 engine->fifo.disable = nv04_fifo_disable; 205 engine->fifo.disable = nv04_fifo_disable;
194 engine->fifo.enable = nv04_fifo_enable; 206 engine->fifo.enable = nv04_fifo_enable;
195 engine->fifo.reassign = nv04_fifo_reassign; 207 engine->fifo.reassign = nv04_fifo_reassign;
208 engine->fifo.cache_flush = nv04_fifo_cache_flush;
209 engine->fifo.cache_pull = nv04_fifo_cache_pull;
196 engine->fifo.channel_id = nv10_fifo_channel_id; 210 engine->fifo.channel_id = nv10_fifo_channel_id;
197 engine->fifo.create_context = nv10_fifo_create_context; 211 engine->fifo.create_context = nv10_fifo_create_context;
198 engine->fifo.destroy_context = nv10_fifo_destroy_context; 212 engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
218 engine->timer.takedown = nv04_timer_takedown; 232 engine->timer.takedown = nv04_timer_takedown;
219 engine->fb.init = nv40_fb_init; 233 engine->fb.init = nv40_fb_init;
220 engine->fb.takedown = nv40_fb_takedown; 234 engine->fb.takedown = nv40_fb_takedown;
235 engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
221 engine->graph.grclass = nv40_graph_grclass; 236 engine->graph.grclass = nv40_graph_grclass;
222 engine->graph.init = nv40_graph_init; 237 engine->graph.init = nv40_graph_init;
223 engine->graph.takedown = nv40_graph_takedown; 238 engine->graph.takedown = nv40_graph_takedown;
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
227 engine->graph.destroy_context = nv40_graph_destroy_context; 242 engine->graph.destroy_context = nv40_graph_destroy_context;
228 engine->graph.load_context = nv40_graph_load_context; 243 engine->graph.load_context = nv40_graph_load_context;
229 engine->graph.unload_context = nv40_graph_unload_context; 244 engine->graph.unload_context = nv40_graph_unload_context;
245 engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
230 engine->fifo.channels = 32; 246 engine->fifo.channels = 32;
231 engine->fifo.init = nv40_fifo_init; 247 engine->fifo.init = nv40_fifo_init;
232 engine->fifo.takedown = nouveau_stub_takedown; 248 engine->fifo.takedown = nouveau_stub_takedown;
233 engine->fifo.disable = nv04_fifo_disable; 249 engine->fifo.disable = nv04_fifo_disable;
234 engine->fifo.enable = nv04_fifo_enable; 250 engine->fifo.enable = nv04_fifo_enable;
235 engine->fifo.reassign = nv04_fifo_reassign; 251 engine->fifo.reassign = nv04_fifo_reassign;
252 engine->fifo.cache_flush = nv04_fifo_cache_flush;
253 engine->fifo.cache_pull = nv04_fifo_cache_pull;
236 engine->fifo.channel_id = nv10_fifo_channel_id; 254 engine->fifo.channel_id = nv10_fifo_channel_id;
237 engine->fifo.create_context = nv40_fifo_create_context; 255 engine->fifo.create_context = nv40_fifo_create_context;
238 engine->fifo.destroy_context = nv40_fifo_destroy_context; 256 engine->fifo.destroy_context = nv40_fifo_destroy_context;
@@ -292,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
292static unsigned int 310static unsigned int
293nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
294{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
295 if (state) 321 if (state)
296 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
297 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -409,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
409 if (ret) 435 if (ret)
410 goto out_timer; 436 goto out_timer;
411 437
412 /* PGRAPH */ 438 if (nouveau_noaccel)
413 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
414 if (ret) 440 else {
415 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
416 445
417 /* PFIFO */ 446 /* PFIFO */
418 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
419 if (ret) 448 if (ret)
420 goto out_graph; 449 goto out_graph;
450 }
421 451
422 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
423 * call irq_postinstall 453 * call irq_postinstall
@@ -461,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
461out_irq: 491out_irq:
462 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
463out_fifo: 493out_fifo:
464 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
465out_graph: 496out_graph:
466 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
467out_fb: 499out_fb:
468 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
469out_timer: 501out_timer:
@@ -500,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev)
500 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
501 } 533 }
502 534
503 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
504 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
505 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
506 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
507 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
508 542
509 mutex_lock(&dev->struct_mutex); 543 mutex_lock(&dev->struct_mutex);
544 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
510 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 545 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
511 mutex_unlock(&dev->struct_mutex); 546 mutex_unlock(&dev->struct_mutex);
512 nouveau_sgdma_takedown(dev); 547 nouveau_sgdma_takedown(dev);
@@ -624,7 +659,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
624 dev_priv->chipset = (reg0 & 0xff00000) >> 20; 659 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
625 /* NV04 or NV05 */ 660 /* NV04 or NV05 */
626 } else if ((reg0 & 0xff00fff0) == 0x20004000) { 661 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
627 dev_priv->chipset = 0x04; 662 if (reg0 & 0x00f00000)
663 dev_priv->chipset = 0x05;
664 else
665 dev_priv->chipset = 0x04;
628 } else 666 } else
629 dev_priv->chipset = 0xff; 667 dev_priv->chipset = 0xff;
630 668
@@ -704,8 +742,8 @@ static void nouveau_close(struct drm_device *dev)
704{ 742{
705 struct drm_nouveau_private *dev_priv = dev->dev_private; 743 struct drm_nouveau_private *dev_priv = dev->dev_private;
706 744
707 /* In the case of an error dev_priv may not be be allocated yet */ 745 /* In the case of an error dev_priv may not be allocated yet */
708 if (dev_priv && dev_priv->card_type) 746 if (dev_priv)
709 nouveau_card_takedown(dev); 747 nouveau_card_takedown(dev);
710} 748}
711 749
@@ -795,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
795 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
796 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
797 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
798 default: 845 default:
799 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
800 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 187eb84e4da5..c385d50f041b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,45 +28,17 @@
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30
31static struct vm_operations_struct nouveau_ttm_vm_ops;
32static const struct vm_operations_struct *ttm_vm_ops;
33
34static int
35nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36{
37 struct ttm_buffer_object *bo = vma->vm_private_data;
38 int ret;
39
40 if (unlikely(bo == NULL))
41 return VM_FAULT_NOPAGE;
42
43 ret = ttm_vm_ops->fault(vma, vmf);
44 return ret;
45}
46
47int 31int
48nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
49{ 33{
50 struct drm_file *file_priv = filp->private_data; 34 struct drm_file *file_priv = filp->private_data;
51 struct drm_nouveau_private *dev_priv = 35 struct drm_nouveau_private *dev_priv =
52 file_priv->minor->dev->dev_private; 36 file_priv->minor->dev->dev_private;
53 int ret;
54 37
55 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 38 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
56 return drm_mmap(filp, vma); 39 return drm_mmap(filp, vma);
57 40
58 ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); 41 return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
59 if (unlikely(ret != 0))
60 return ret;
61
62 if (unlikely(ttm_vm_ops == NULL)) {
63 ttm_vm_ops = vma->vm_ops;
64 nouveau_ttm_vm_ops = *ttm_vm_ops;
65 nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
66 }
67
68 vma->vm_ops = &nouveau_ttm_vm_ops;
69 return 0;
70} 42}
71 43
72static int 44static int
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d9f32879ba38..d0e038d28948 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -212,16 +212,15 @@ out:
212 return connector_status_disconnected; 212 return connector_status_disconnected;
213} 213}
214 214
215enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, 215uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
216 struct drm_connector *connector)
217{ 216{
218 struct drm_device *dev = encoder->dev; 217 struct drm_device *dev = encoder->dev;
219 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct drm_nouveau_private *dev_priv = dev->dev_private;
220 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 219 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
221 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 220 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
222 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 221 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
223 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; 222 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
224 int head, present = 0; 223 int head;
225 224
226#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 225#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
227 if (dcb->type == OUTPUT_TV) { 226 if (dcb->type == OUTPUT_TV) {
@@ -287,13 +286,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
287 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); 286 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
288 msleep(5); 287 msleep(5);
289 288
290 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 289 sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
291
292 if (dcb->type == OUTPUT_TV)
293 present = (nv17_tv_detect(encoder, connector, temp)
294 == connector_status_connected);
295 else
296 present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
297 290
298 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); 291 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
299 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, 292 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -310,15 +303,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
310 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); 303 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
311 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); 304 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
312 305
313 if (present) { 306 return sample;
314 NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); 307}
308
309static enum drm_connector_status
310nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
311{
312 struct drm_device *dev = encoder->dev;
313 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
314 uint32_t sample = nv17_dac_sample_load(encoder);
315
316 if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
317 NV_INFO(dev, "Load detected on output %c\n",
318 '@' + ffs(dcb->or));
315 return connector_status_connected; 319 return connector_status_connected;
320 } else {
321 return connector_status_disconnected;
316 } 322 }
317
318 return connector_status_disconnected;
319} 323}
320 324
321
322static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, 325static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
323 struct drm_display_mode *mode, 326 struct drm_display_mode *mode,
324 struct drm_display_mode *adjusted_mode) 327 struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 09a31071ee58..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
39 return; 39 return;
40 40
41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { 41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
42 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 42 nouveau_fbcon_gpu_lockup(info);
43 info->flags |= FBINFO_HWACCEL_DISABLED;
44 } 43 }
45 44
46 if (info->flags & FBINFO_HWACCEL_DISABLED) { 45 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -55,21 +54,19 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
55 FIRE_RING(chan); 54 FIRE_RING(chan);
56} 55}
57 56
58static void 57void
59nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{ 59{
61 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
62 struct drm_device *dev = par->dev; 61 struct drm_device *dev = par->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan = dev_priv->channel; 63 struct nouveau_channel *chan = dev_priv->channel;
65 uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
66 64
67 if (info->state != FBINFO_STATE_RUNNING) 65 if (info->state != FBINFO_STATE_RUNNING)
68 return; 66 return;
69 67
70 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { 68 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
71 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 69 nouveau_fbcon_gpu_lockup(info);
72 info->flags |= FBINFO_HWACCEL_DISABLED;
73 } 70 }
74 71
75 if (info->flags & FBINFO_HWACCEL_DISABLED) { 72 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -80,14 +77,18 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
80 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 77 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
81 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); 78 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
82 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); 79 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
83 OUT_RING(chan, color); 80 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
81 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
82 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
83 else
84 OUT_RING(chan, rect->color);
84 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); 85 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
85 OUT_RING(chan, (rect->dx << 16) | rect->dy); 86 OUT_RING(chan, (rect->dx << 16) | rect->dy);
86 OUT_RING(chan, (rect->width << 16) | rect->height); 87 OUT_RING(chan, (rect->width << 16) | rect->height);
87 FIRE_RING(chan); 88 FIRE_RING(chan);
88} 89}
89 90
90static void 91void
91nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
92{ 93{
93 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
109 } 110 }
110 111
111 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { 112 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
112 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 113 nouveau_fbcon_gpu_lockup(info);
113 info->flags |= FBINFO_HWACCEL_DISABLED;
114 } 114 }
115 115
116 if (info->flags & FBINFO_HWACCEL_DISABLED) { 116 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
144 int iter_len = dsize > 128 ? 128 : dsize; 144 int iter_len = dsize > 128 ? 128 : dsize;
145 145
146 if (RING_SPACE(chan, iter_len + 1)) { 146 if (RING_SPACE(chan, iter_len + 1)) {
147 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 147 nouveau_fbcon_gpu_lockup(info);
148 info->flags |= FBINFO_HWACCEL_DISABLED;
149 cfb_imageblit(info, image); 148 cfb_imageblit(info, image);
150 return; 149 return;
151 } 150 }
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
184 struct drm_device *dev = par->dev; 183 struct drm_device *dev = par->dev;
185 struct drm_nouveau_private *dev_priv = dev->dev_private; 184 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nouveau_channel *chan = dev_priv->channel; 185 struct nouveau_channel *chan = dev_priv->channel;
186 const int sub = NvSubCtxSurf2D;
187 int surface_fmt, pattern_fmt, rect_fmt; 187 int surface_fmt, pattern_fmt, rect_fmt;
188 int ret; 188 int ret;
189 189
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info)
242 return ret; 242 return ret;
243 243
244 if (RING_SPACE(chan, 49)) { 244 if (RING_SPACE(chan, 49)) {
245 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 245 nouveau_fbcon_gpu_lockup(info);
246 info->flags |= FBINFO_HWACCEL_DISABLED;
247 return 0; 246 return 0;
248 } 247 }
249 248
250 BEGIN_RING(chan, 1, 0x0000, 1); 249 BEGIN_RING(chan, sub, 0x0000, 1);
251 OUT_RING(chan, NvCtxSurf2D); 250 OUT_RING(chan, NvCtxSurf2D);
252 BEGIN_RING(chan, 1, 0x0184, 2); 251 BEGIN_RING(chan, sub, 0x0184, 2);
253 OUT_RING(chan, NvDmaFB); 252 OUT_RING(chan, NvDmaFB);
254 OUT_RING(chan, NvDmaFB); 253 OUT_RING(chan, NvDmaFB);
255 BEGIN_RING(chan, 1, 0x0300, 4); 254 BEGIN_RING(chan, sub, 0x0300, 4);
256 OUT_RING(chan, surface_fmt); 255 OUT_RING(chan, surface_fmt);
257 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 256 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 257 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
259 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
260 259
261 BEGIN_RING(chan, 1, 0x0000, 1); 260 BEGIN_RING(chan, sub, 0x0000, 1);
262 OUT_RING(chan, NvRop); 261 OUT_RING(chan, NvRop);
263 BEGIN_RING(chan, 1, 0x0300, 1); 262 BEGIN_RING(chan, sub, 0x0300, 1);
264 OUT_RING(chan, 0x55); 263 OUT_RING(chan, 0x55);
265 264
266 BEGIN_RING(chan, 1, 0x0000, 1); 265 BEGIN_RING(chan, sub, 0x0000, 1);
267 OUT_RING(chan, NvImagePatt); 266 OUT_RING(chan, NvImagePatt);
268 BEGIN_RING(chan, 1, 0x0300, 8); 267 BEGIN_RING(chan, sub, 0x0300, 8);
269 OUT_RING(chan, pattern_fmt); 268 OUT_RING(chan, pattern_fmt);
270#ifdef __BIG_ENDIAN 269#ifdef __BIG_ENDIAN
271 OUT_RING(chan, 2); 270 OUT_RING(chan, 2);
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
279 OUT_RING(chan, ~0); 278 OUT_RING(chan, ~0);
280 OUT_RING(chan, ~0); 279 OUT_RING(chan, ~0);
281 280
282 BEGIN_RING(chan, 1, 0x0000, 1); 281 BEGIN_RING(chan, sub, 0x0000, 1);
283 OUT_RING(chan, NvClipRect); 282 OUT_RING(chan, NvClipRect);
284 BEGIN_RING(chan, 1, 0x0300, 2); 283 BEGIN_RING(chan, sub, 0x0300, 2);
285 OUT_RING(chan, 0); 284 OUT_RING(chan, 0);
286 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 285 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
287 286
@@ -308,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
308 307
309 FIRE_RING(chan); 308 FIRE_RING(chan);
310 309
311 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
312 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
313 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
314 return 0; 310 return 0;
315} 311}
316 312
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 0c3cd53c7313..f31347b8c9b0 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
71 return (reassign == 1); 71 return (reassign == 1);
72} 72}
73 73
74bool
75nv04_fifo_cache_flush(struct drm_device *dev)
76{
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
79 uint64_t start = ptimer->read(dev);
80
81 do {
82 if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
83 nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
84 return true;
85
86 } while (ptimer->read(dev) - start < 100000000);
87
88 NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
89
90 return false;
91}
92
93bool
94nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
95{
96 uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
97
98 if (enable) {
99 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
100 } else {
101 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
103 }
104
105 return !!(pull & 1);
106}
107
74int 108int
75nv04_fifo_channel_id(struct drm_device *dev) 109nv04_fifo_channel_id(struct drm_device *dev)
76{ 110{
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index d561d773c0f4..e260986ea65a 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,6 +28,10 @@
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29 29
30static uint32_t nv04_graph_ctx_regs[] = { 30static uint32_t nv04_graph_ctx_regs[] = {
31 0x0040053c,
32 0x00400544,
33 0x00400540,
34 0x00400548,
31 NV04_PGRAPH_CTX_SWITCH1, 35 NV04_PGRAPH_CTX_SWITCH1,
32 NV04_PGRAPH_CTX_SWITCH2, 36 NV04_PGRAPH_CTX_SWITCH2,
33 NV04_PGRAPH_CTX_SWITCH3, 37 NV04_PGRAPH_CTX_SWITCH3,
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = {
102 NV04_PGRAPH_PATT_COLOR0, 106 NV04_PGRAPH_PATT_COLOR0,
103 NV04_PGRAPH_PATT_COLOR1, 107 NV04_PGRAPH_PATT_COLOR1,
104 NV04_PGRAPH_PATT_COLORRAM+0x00, 108 NV04_PGRAPH_PATT_COLORRAM+0x00,
105 NV04_PGRAPH_PATT_COLORRAM+0x01,
106 NV04_PGRAPH_PATT_COLORRAM+0x02,
107 NV04_PGRAPH_PATT_COLORRAM+0x03,
108 NV04_PGRAPH_PATT_COLORRAM+0x04, 109 NV04_PGRAPH_PATT_COLORRAM+0x04,
109 NV04_PGRAPH_PATT_COLORRAM+0x05,
110 NV04_PGRAPH_PATT_COLORRAM+0x06,
111 NV04_PGRAPH_PATT_COLORRAM+0x07,
112 NV04_PGRAPH_PATT_COLORRAM+0x08, 110 NV04_PGRAPH_PATT_COLORRAM+0x08,
113 NV04_PGRAPH_PATT_COLORRAM+0x09, 111 NV04_PGRAPH_PATT_COLORRAM+0x0c,
114 NV04_PGRAPH_PATT_COLORRAM+0x0A,
115 NV04_PGRAPH_PATT_COLORRAM+0x0B,
116 NV04_PGRAPH_PATT_COLORRAM+0x0C,
117 NV04_PGRAPH_PATT_COLORRAM+0x0D,
118 NV04_PGRAPH_PATT_COLORRAM+0x0E,
119 NV04_PGRAPH_PATT_COLORRAM+0x0F,
120 NV04_PGRAPH_PATT_COLORRAM+0x10, 112 NV04_PGRAPH_PATT_COLORRAM+0x10,
121 NV04_PGRAPH_PATT_COLORRAM+0x11,
122 NV04_PGRAPH_PATT_COLORRAM+0x12,
123 NV04_PGRAPH_PATT_COLORRAM+0x13,
124 NV04_PGRAPH_PATT_COLORRAM+0x14, 113 NV04_PGRAPH_PATT_COLORRAM+0x14,
125 NV04_PGRAPH_PATT_COLORRAM+0x15,
126 NV04_PGRAPH_PATT_COLORRAM+0x16,
127 NV04_PGRAPH_PATT_COLORRAM+0x17,
128 NV04_PGRAPH_PATT_COLORRAM+0x18, 114 NV04_PGRAPH_PATT_COLORRAM+0x18,
129 NV04_PGRAPH_PATT_COLORRAM+0x19, 115 NV04_PGRAPH_PATT_COLORRAM+0x1c,
130 NV04_PGRAPH_PATT_COLORRAM+0x1A,
131 NV04_PGRAPH_PATT_COLORRAM+0x1B,
132 NV04_PGRAPH_PATT_COLORRAM+0x1C,
133 NV04_PGRAPH_PATT_COLORRAM+0x1D,
134 NV04_PGRAPH_PATT_COLORRAM+0x1E,
135 NV04_PGRAPH_PATT_COLORRAM+0x1F,
136 NV04_PGRAPH_PATT_COLORRAM+0x20, 116 NV04_PGRAPH_PATT_COLORRAM+0x20,
137 NV04_PGRAPH_PATT_COLORRAM+0x21,
138 NV04_PGRAPH_PATT_COLORRAM+0x22,
139 NV04_PGRAPH_PATT_COLORRAM+0x23,
140 NV04_PGRAPH_PATT_COLORRAM+0x24, 117 NV04_PGRAPH_PATT_COLORRAM+0x24,
141 NV04_PGRAPH_PATT_COLORRAM+0x25,
142 NV04_PGRAPH_PATT_COLORRAM+0x26,
143 NV04_PGRAPH_PATT_COLORRAM+0x27,
144 NV04_PGRAPH_PATT_COLORRAM+0x28, 118 NV04_PGRAPH_PATT_COLORRAM+0x28,
145 NV04_PGRAPH_PATT_COLORRAM+0x29, 119 NV04_PGRAPH_PATT_COLORRAM+0x2c,
146 NV04_PGRAPH_PATT_COLORRAM+0x2A,
147 NV04_PGRAPH_PATT_COLORRAM+0x2B,
148 NV04_PGRAPH_PATT_COLORRAM+0x2C,
149 NV04_PGRAPH_PATT_COLORRAM+0x2D,
150 NV04_PGRAPH_PATT_COLORRAM+0x2E,
151 NV04_PGRAPH_PATT_COLORRAM+0x2F,
152 NV04_PGRAPH_PATT_COLORRAM+0x30, 120 NV04_PGRAPH_PATT_COLORRAM+0x30,
153 NV04_PGRAPH_PATT_COLORRAM+0x31,
154 NV04_PGRAPH_PATT_COLORRAM+0x32,
155 NV04_PGRAPH_PATT_COLORRAM+0x33,
156 NV04_PGRAPH_PATT_COLORRAM+0x34, 121 NV04_PGRAPH_PATT_COLORRAM+0x34,
157 NV04_PGRAPH_PATT_COLORRAM+0x35,
158 NV04_PGRAPH_PATT_COLORRAM+0x36,
159 NV04_PGRAPH_PATT_COLORRAM+0x37,
160 NV04_PGRAPH_PATT_COLORRAM+0x38, 122 NV04_PGRAPH_PATT_COLORRAM+0x38,
161 NV04_PGRAPH_PATT_COLORRAM+0x39, 123 NV04_PGRAPH_PATT_COLORRAM+0x3c,
162 NV04_PGRAPH_PATT_COLORRAM+0x3A, 124 NV04_PGRAPH_PATT_COLORRAM+0x40,
163 NV04_PGRAPH_PATT_COLORRAM+0x3B, 125 NV04_PGRAPH_PATT_COLORRAM+0x44,
164 NV04_PGRAPH_PATT_COLORRAM+0x3C, 126 NV04_PGRAPH_PATT_COLORRAM+0x48,
165 NV04_PGRAPH_PATT_COLORRAM+0x3D, 127 NV04_PGRAPH_PATT_COLORRAM+0x4c,
166 NV04_PGRAPH_PATT_COLORRAM+0x3E, 128 NV04_PGRAPH_PATT_COLORRAM+0x50,
167 NV04_PGRAPH_PATT_COLORRAM+0x3F, 129 NV04_PGRAPH_PATT_COLORRAM+0x54,
130 NV04_PGRAPH_PATT_COLORRAM+0x58,
131 NV04_PGRAPH_PATT_COLORRAM+0x5c,
132 NV04_PGRAPH_PATT_COLORRAM+0x60,
133 NV04_PGRAPH_PATT_COLORRAM+0x64,
134 NV04_PGRAPH_PATT_COLORRAM+0x68,
135 NV04_PGRAPH_PATT_COLORRAM+0x6c,
136 NV04_PGRAPH_PATT_COLORRAM+0x70,
137 NV04_PGRAPH_PATT_COLORRAM+0x74,
138 NV04_PGRAPH_PATT_COLORRAM+0x78,
139 NV04_PGRAPH_PATT_COLORRAM+0x7c,
140 NV04_PGRAPH_PATT_COLORRAM+0x80,
141 NV04_PGRAPH_PATT_COLORRAM+0x84,
142 NV04_PGRAPH_PATT_COLORRAM+0x88,
143 NV04_PGRAPH_PATT_COLORRAM+0x8c,
144 NV04_PGRAPH_PATT_COLORRAM+0x90,
145 NV04_PGRAPH_PATT_COLORRAM+0x94,
146 NV04_PGRAPH_PATT_COLORRAM+0x98,
147 NV04_PGRAPH_PATT_COLORRAM+0x9c,
148 NV04_PGRAPH_PATT_COLORRAM+0xa0,
149 NV04_PGRAPH_PATT_COLORRAM+0xa4,
150 NV04_PGRAPH_PATT_COLORRAM+0xa8,
151 NV04_PGRAPH_PATT_COLORRAM+0xac,
152 NV04_PGRAPH_PATT_COLORRAM+0xb0,
153 NV04_PGRAPH_PATT_COLORRAM+0xb4,
154 NV04_PGRAPH_PATT_COLORRAM+0xb8,
155 NV04_PGRAPH_PATT_COLORRAM+0xbc,
156 NV04_PGRAPH_PATT_COLORRAM+0xc0,
157 NV04_PGRAPH_PATT_COLORRAM+0xc4,
158 NV04_PGRAPH_PATT_COLORRAM+0xc8,
159 NV04_PGRAPH_PATT_COLORRAM+0xcc,
160 NV04_PGRAPH_PATT_COLORRAM+0xd0,
161 NV04_PGRAPH_PATT_COLORRAM+0xd4,
162 NV04_PGRAPH_PATT_COLORRAM+0xd8,
163 NV04_PGRAPH_PATT_COLORRAM+0xdc,
164 NV04_PGRAPH_PATT_COLORRAM+0xe0,
165 NV04_PGRAPH_PATT_COLORRAM+0xe4,
166 NV04_PGRAPH_PATT_COLORRAM+0xe8,
167 NV04_PGRAPH_PATT_COLORRAM+0xec,
168 NV04_PGRAPH_PATT_COLORRAM+0xf0,
169 NV04_PGRAPH_PATT_COLORRAM+0xf4,
170 NV04_PGRAPH_PATT_COLORRAM+0xf8,
171 NV04_PGRAPH_PATT_COLORRAM+0xfc,
168 NV04_PGRAPH_PATTERN, 172 NV04_PGRAPH_PATTERN,
169 0x0040080c, 173 0x0040080c,
170 NV04_PGRAPH_PATTERN_SHAPE, 174 NV04_PGRAPH_PATTERN_SHAPE,
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = {
247 0x004004f8, 251 0x004004f8,
248 0x0040047c, 252 0x0040047c,
249 0x004004fc, 253 0x004004fc,
250 0x0040053c,
251 0x00400544,
252 0x00400540,
253 0x00400548,
254 0x00400560,
255 0x00400568,
256 0x00400564,
257 0x0040056c,
258 0x00400534, 254 0x00400534,
259 0x00400538, 255 0x00400538,
260 0x00400514, 256 0x00400514,
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = {
341 0x00400500, 337 0x00400500,
342 0x00400504, 338 0x00400504,
343 NV04_PGRAPH_VALID1, 339 NV04_PGRAPH_VALID1,
344 NV04_PGRAPH_VALID2 340 NV04_PGRAPH_VALID2,
345 341 NV04_PGRAPH_DEBUG_3
346
347}; 342};
348 343
349struct graph_state { 344struct graph_state {
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev)
388 pgraph->fifo_access(dev, true); 383 pgraph->fifo_access(dev, true);
389} 384}
390 385
386static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
387{
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
391 if (nv04_graph_ctx_regs[i] == reg)
392 return &ctx->nv04[i];
393 }
394
395 return NULL;
396}
397
391int nv04_graph_create_context(struct nouveau_channel *chan) 398int nv04_graph_create_context(struct nouveau_channel *chan)
392{ 399{
393 struct graph_state *pgraph_ctx; 400 struct graph_state *pgraph_ctx;
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
398 if (pgraph_ctx == NULL) 405 if (pgraph_ctx == NULL)
399 return -ENOMEM; 406 return -ENOMEM;
400 407
401 /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */ 408 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
402 pgraph_ctx->nv04[0] = 0x0001ffff; 409
403 /* is it really needed ??? */
404#if 0
405 dev_priv->fifos[channel].pgraph_ctx[1] =
406 nv_rd32(dev, NV_PGRAPH_DEBUG_4);
407 dev_priv->fifos[channel].pgraph_ctx[2] =
408 nv_rd32(dev, 0x004006b0);
409#endif
410 return 0; 410 return 0;
411} 411}
412 412
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); 429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
430 430
431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); 431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
432 nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24); 432
433 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
434 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
435
433 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); 436 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
434 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); 437 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
438
435 return 0; 439 return 0;
436} 440}
437 441
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev)
494 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); 498 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
495 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); 499 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
496 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; 500 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
497 tmp |= dev_priv->engine.fifo.channels << 24; 501 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
498 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); 502 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
499 503
500 /* These don't belong here, they're part of a per-channel context */ 504 /* These don't belong here, they're part of a per-channel context */
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
533 int mthd, uint32_t data) 537 int mthd, uint32_t data)
534{ 538{
535 struct drm_device *dev = chan->dev; 539 struct drm_device *dev = chan->dev;
536 uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff; 540 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
537 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; 541 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
538 uint32_t tmp; 542 uint32_t tmp;
539 543
@@ -547,7 +551,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
547 return 0; 551 return 0;
548} 552}
549 553
550static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = { 554static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
551 { 0x0150, nv04_graph_mthd_set_ref }, 555 { 0x0150, nv04_graph_mthd_set_ref },
552 {} 556 {}
553}; 557};
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
558}; 562};
559 563
560struct nouveau_pgraph_object_class nv04_graph_grclass[] = { 564struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
561 { 0x0039, false, nv04_graph_mthds_m2mf }, 565 { 0x0039, false, NULL },
562 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ 566 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
563 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ 567 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
564 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ 568 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
574 { 0x0053, false, NULL }, /* surf3d */ 578 { 0x0053, false, NULL }, /* surf3d */
575 { 0x0054, false, NULL }, /* tex_tri */ 579 { 0x0054, false, NULL }, /* tex_tri */
576 { 0x0055, false, NULL }, /* multitex_tri */ 580 { 0x0055, false, NULL }, /* multitex_tri */
581 { 0x506e, true, nv04_graph_mthds_sw },
577 {} 582 {}
578}; 583};
579 584
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a2..a3b9563a6f60 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
30 * of vram. For now, only reserve a small piece until we know 30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires. 31 * more about what each chipset requires.
32 */ 32 */
33 switch (dev_priv->chipset & 0xf0) { 33 switch (dev_priv->chipset) {
34 case 0x40: 34 case 0x40:
35 case 0x47: 35 case 0x47:
36 case 0x49: 36 case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index 79e2d104d70a..cc5cda44e501 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,17 +3,37 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void
7nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1;
12
13 if (pitch) {
14 if (dev_priv->card_type >= NV_20)
15 addr |= 1;
16 else
17 addr |= 1 << 31;
18 }
19
20 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
21 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
22 nv_wr32(dev, NV10_PFB_TILE(i), addr);
23}
24
6int 25int
7nv10_fb_init(struct drm_device *dev) 26nv10_fb_init(struct drm_device *dev)
8{ 27{
9 uint32_t fb_bar_size; 28 struct drm_nouveau_private *dev_priv = dev->dev_private;
29 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
10 int i; 30 int i;
11 31
12 fb_bar_size = drm_get_resource_len(dev, 0) - 1; 32 pfb->num_tiles = NV10_PFB_TILE__SIZE;
13 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 33
14 nv_wr32(dev, NV10_PFB_TILE(i), 0); 34 /* Turn all the tiling regions off. */
15 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); 35 for (i = 0; i < pfb->num_tiles; i++)
16 } 36 pfb->set_region_tiling(dev, i, 0, 0, 0);
17 37
18 return 0; 38 return 0;
19} 39}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 6870e0ee2e7e..fcf2cdd19493 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -807,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
807 chan->pgraph_ctx = NULL; 807 chan->pgraph_ctx = NULL;
808} 808}
809 809
810void
811nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
812 uint32_t size, uint32_t pitch)
813{
814 uint32_t limit = max(1u, addr + size) - 1;
815
816 if (pitch)
817 addr |= 1 << 31;
818
819 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
820 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
821 nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
822}
823
810int nv10_graph_init(struct drm_device *dev) 824int nv10_graph_init(struct drm_device *dev)
811{ 825{
812 struct drm_nouveau_private *dev_priv = dev->dev_private; 826 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -838,17 +852,9 @@ int nv10_graph_init(struct drm_device *dev)
838 } else 852 } else
839 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); 853 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
840 854
841 /* copy tile info from PFB */ 855 /* Turn all the tiling regions off. */
842 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 856 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
843 nv_wr32(dev, NV10_PGRAPH_TILE(i), 857 nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
844 nv_rd32(dev, NV10_PFB_TILE(i)));
845 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
846 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
847 nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
848 nv_rd32(dev, NV10_PFB_TSIZE(i)));
849 nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
850 nv_rd32(dev, NV10_PFB_TSTATUS(i)));
851 }
852 858
853 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); 859 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
854 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); 860 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 81c01353a9f9..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -33,13 +33,103 @@
33#include "nouveau_hw.h" 33#include "nouveau_hw.h"
34#include "nv17_tv.h" 34#include "nv17_tv.h"
35 35
36enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, 36static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
37 struct drm_connector *connector,
38 uint32_t pin_mask)
39{ 37{
38 struct drm_device *dev = encoder->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
41 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
42 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
43 uint32_t sample = 0;
44 int head;
45
46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
48 if (dev_priv->vbios->tvdactestval)
49 testval = dev_priv->vbios->tvdactestval;
50
51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
52 head = (dacclk & 0x100) >> 8;
53
54 /* Save the previous state. */
55 gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
56 gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
57 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
58 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
59 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
60 fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
61 test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
62 ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
63 ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
64 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
65
66 /* Prepare the DAC for load detection. */
67 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
68 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
69
70 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
73 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
74 NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
75 NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
76 NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
77 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
78 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
79
80 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
81
82 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
83 (dacclk & ~0xff) | 0x22);
84 msleep(1);
85 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
86 (dacclk & ~0xff) | 0x21);
87
88 NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
89 NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
90
91 /* Sample pin 0x4 (usually S-video luma). */
92 NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
93 msleep(20);
94 sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
95 & 0x4 << 28;
96
97 /* Sample the remaining pins. */
98 NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
99 msleep(20);
100 sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
101 & 0xa << 28;
102
103 /* Restore the previous state. */
104 NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
105 NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
106 NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
107 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
108 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
109 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
110 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
113 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
114 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
115
116 return sample;
117}
118
119static enum drm_connector_status
120nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
121{
122 struct drm_device *dev = encoder->dev;
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct drm_mode_config *conf = &dev->mode_config;
40 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 125 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
126 struct dcb_entry *dcb = tv_enc->base.dcb;
41 127
42 tv_enc->pin_mask = pin_mask >> 28 & 0xe; 128 if (dev_priv->chipset == 0x42 ||
129 dev_priv->chipset == 0x43)
130 tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
131 else
132 tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
43 133
44 switch (tv_enc->pin_mask) { 134 switch (tv_enc->pin_mask) {
45 case 0x2: 135 case 0x2:
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
50 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; 140 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
51 break; 141 break;
52 case 0xe: 142 case 0xe:
53 if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output) 143 if (dcb->tvconf.has_component_output)
54 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; 144 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
55 else 145 else
56 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; 146 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
61 } 151 }
62 152
63 drm_connector_property_set_value(connector, 153 drm_connector_property_set_value(connector,
64 encoder->dev->mode_config.tv_subconnector_property, 154 conf->tv_subconnector_property,
65 tv_enc->subconnector); 155 tv_enc->subconnector);
66 156
67 return tv_enc->subconnector ? connector_status_connected : 157 if (tv_enc->subconnector) {
68 connector_status_disconnected; 158 NV_INFO(dev, "Load detected on output %c\n",
159 '@' + ffs(dcb->or));
160 return connector_status_connected;
161 } else {
162 return connector_status_disconnected;
163 }
69} 164}
70 165
71static const struct { 166static const struct {
@@ -484,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
484 nouveau_encoder(encoder)->restore.output); 579 nouveau_encoder(encoder)->restore.output);
485 580
486 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
487} 584}
488 585
489static int nv17_tv_create_resources(struct drm_encoder *encoder, 586static int nv17_tv_create_resources(struct drm_encoder *encoder,
@@ -633,7 +730,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
633 .prepare = nv17_tv_prepare, 730 .prepare = nv17_tv_prepare,
634 .commit = nv17_tv_commit, 731 .commit = nv17_tv_commit,
635 .mode_set = nv17_tv_mode_set, 732 .mode_set = nv17_tv_mode_set,
636 .detect = nv17_dac_detect, 733 .detect = nv17_tv_detect,
637}; 734};
638 735
639static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { 736static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 18ba74f19703..d6fc0a82f03d 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev)
514 nouveau_wait_for_idle(dev); 514 nouveau_wait_for_idle(dev);
515} 515}
516 516
517void
518nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
519 uint32_t size, uint32_t pitch)
520{
521 uint32_t limit = max(1u, addr + size) - 1;
522
523 if (pitch)
524 addr |= 1;
525
526 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
527 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
528 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
529
530 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
531 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
532 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
533 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
534 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
535 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
536}
537
517int 538int
518nv20_graph_init(struct drm_device *dev) 539nv20_graph_init(struct drm_device *dev)
519{ 540{
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev)
572 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); 593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
573 } 594 }
574 595
575 /* copy tile info from PFB */ 596 /* Turn all the tiling regions off. */
576 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 597 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
577 nv_wr32(dev, 0x00400904 + i * 0x10, 598 nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
578 nv_rd32(dev, NV10_PFB_TLIMIT(i))); 599
579 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
580 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
581 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
582 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
583 nv_wr32(dev, 0x00400908 + i * 0x10,
584 nv_rd32(dev, NV10_PFB_TSIZE(i)));
585 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
587 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
588 nv_rd32(dev, NV10_PFB_TSIZE(i)));
589 nv_wr32(dev, 0x00400900 + i * 0x10,
590 nv_rd32(dev, NV10_PFB_TILE(i)));
591 /* which is NV40_PGRAPH_TILE0(i) ?? */
592 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
594 nv_rd32(dev, NV10_PFB_TILE(i)));
595 }
596 for (i = 0; i < 8; i++) { 600 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); 601 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); 602 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev)
704 708
705 nv_wr32(dev, 0x4000c0, 0x00000016); 709 nv_wr32(dev, 0x4000c0, 0x00000016);
706 710
707 /* copy tile info from PFB */ 711 /* Turn all the tiling regions off. */
708 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { 712 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
709 nv_wr32(dev, 0x00400904 + i * 0x10, 713 nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
710 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
711 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
712 nv_wr32(dev, 0x00400908 + i * 0x10,
713 nv_rd32(dev, NV10_PFB_TSIZE(i)));
714 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
715 nv_wr32(dev, 0x00400900 + i * 0x10,
716 nv_rd32(dev, NV10_PFB_TILE(i)));
717 /* which is NV40_PGRAPH_TILE0(i) ?? */
718 }
719 714
720 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 715 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
721 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 716 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index ca1d27107a8e..3cd07d8d5bd7 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -3,12 +3,37 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void
7nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1;
12
13 if (pitch)
14 addr |= 1;
15
16 switch (dev_priv->chipset) {
17 case 0x40:
18 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
19 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
20 nv_wr32(dev, NV10_PFB_TILE(i), addr);
21 break;
22
23 default:
24 nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
25 nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
26 nv_wr32(dev, NV40_PFB_TILE(i), addr);
27 break;
28 }
29}
30
6int 31int
7nv40_fb_init(struct drm_device *dev) 32nv40_fb_init(struct drm_device *dev)
8{ 33{
9 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t fb_bar_size, tmp; 35 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
11 int num_tiles; 36 uint32_t tmp;
12 int i; 37 int i;
13 38
14 /* This is strictly a NV4x register (don't know about NV5x). */ 39 /* This is strictly a NV4x register (don't know about NV5x). */
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev)
23 case 0x45: 48 case 0x45:
24 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); 49 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
25 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); 50 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
26 num_tiles = NV10_PFB_TILE__SIZE; 51 pfb->num_tiles = NV10_PFB_TILE__SIZE;
27 break; 52 break;
28 case 0x46: /* G72 */ 53 case 0x46: /* G72 */
29 case 0x47: /* G70 */ 54 case 0x47: /* G70 */
30 case 0x49: /* G71 */ 55 case 0x49: /* G71 */
31 case 0x4b: /* G73 */ 56 case 0x4b: /* G73 */
32 case 0x4c: /* C51 (G7X version) */ 57 case 0x4c: /* C51 (G7X version) */
33 num_tiles = NV40_PFB_TILE__SIZE_1; 58 pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
34 break; 59 break;
35 default: 60 default:
36 num_tiles = NV40_PFB_TILE__SIZE_0; 61 pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
37 break; 62 break;
38 } 63 }
39 64
40 fb_bar_size = drm_get_resource_len(dev, 0) - 1; 65 /* Turn all the tiling regions off. */
41 switch (dev_priv->chipset) { 66 for (i = 0; i < pfb->num_tiles; i++)
42 case 0x40: 67 pfb->set_region_tiling(dev, i, 0, 0, 0);
43 for (i = 0; i < num_tiles; i++) {
44 nv_wr32(dev, NV10_PFB_TILE(i), 0);
45 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
46 }
47 break;
48 default:
49 for (i = 0; i < num_tiles; i++) {
50 nv_wr32(dev, NV40_PFB_TILE(i), 0);
51 nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
52 }
53 break;
54 }
55 68
56 return 0; 69 return 0;
57} 70}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 2b332bb55acf..53e8afe1dcd1 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -181,6 +181,48 @@ nv40_graph_unload_context(struct drm_device *dev)
181 return ret; 181 return ret;
182} 182}
183 183
184void
185nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
186 uint32_t size, uint32_t pitch)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 uint32_t limit = max(1u, addr + size) - 1;
190
191 if (pitch)
192 addr |= 1;
193
194 switch (dev_priv->chipset) {
195 case 0x44:
196 case 0x4a:
197 case 0x4e:
198 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
199 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
200 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
201 break;
202
203 case 0x46:
204 case 0x47:
205 case 0x49:
206 case 0x4b:
207 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
208 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
209 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
210 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
211 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
212 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
213 break;
214
215 default:
216 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
217 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
218 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
219 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
220 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
221 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
222 break;
223 }
224}
225
184/* 226/*
185 * G70 0x47 227 * G70 0x47
186 * G71 0x49 228 * G71 0x49
@@ -195,7 +237,8 @@ nv40_graph_init(struct drm_device *dev)
195{ 237{
196 struct drm_nouveau_private *dev_priv = 238 struct drm_nouveau_private *dev_priv =
197 (struct drm_nouveau_private *)dev->dev_private; 239 (struct drm_nouveau_private *)dev->dev_private;
198 uint32_t vramsz, tmp; 240 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
241 uint32_t vramsz;
199 int i, j; 242 int i, j;
200 243
201 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 244 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -292,74 +335,9 @@ nv40_graph_init(struct drm_device *dev)
292 nv_wr32(dev, 0x400b38, 0x2ffff800); 335 nv_wr32(dev, 0x400b38, 0x2ffff800);
293 nv_wr32(dev, 0x400b3c, 0x00006000); 336 nv_wr32(dev, 0x400b3c, 0x00006000);
294 337
295 /* copy tile info from PFB */ 338 /* Turn all the tiling regions off. */
296 switch (dev_priv->chipset) { 339 for (i = 0; i < pfb->num_tiles; i++)
297 case 0x40: /* vanilla NV40 */ 340 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
298 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
299 tmp = nv_rd32(dev, NV10_PFB_TILE(i));
300 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
301 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
302 tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
303 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
304 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
305 tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
306 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
307 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
308 tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
309 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
310 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
311 }
312 break;
313 case 0x44:
314 case 0x4a:
315 case 0x4e: /* NV44-based cores don't have 0x406900? */
316 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
317 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
318 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
319 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
320 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
321 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
322 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
323 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
324 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
325 }
326 break;
327 case 0x46:
328 case 0x47:
329 case 0x49:
330 case 0x4b: /* G7X-based cores */
331 for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
332 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
333 nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
334 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
335 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
336 nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
337 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
338 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
339 nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
340 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
341 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
342 nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
343 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
344 }
345 break;
346 default: /* everything else */
347 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
348 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
349 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
350 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
351 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
352 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
353 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
354 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
355 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
356 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
357 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
358 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
359 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
360 }
361 break;
362 }
363 341
364 /* begin RAM config */ 342 /* begin RAM config */
365 vramsz = drm_get_resource_len(dev, 0) - 1; 343 vramsz = drm_get_resource_len(dev, 0) - 1;
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 118d3285fd8c..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
@@ -432,6 +435,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 435 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev; 436 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder; 437 struct drm_encoder *encoder;
438 uint32_t dac = 0, sor = 0;
435 439
436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 440 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
437 441
@@ -439,9 +443,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 443 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441 445
442 if (drm_helper_encoder_in_use(encoder)) 446 if (!drm_helper_encoder_in_use(encoder))
443 continue; 447 continue;
444 448
449 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
450 nv_encoder->dcb->type == OUTPUT_TV)
451 dac |= (1 << nv_encoder->or);
452 else
453 sor |= (1 << nv_encoder->or);
454 }
455
456 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
457 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
458
459 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
460 nv_encoder->dcb->type == OUTPUT_TV) {
461 if (dac & (1 << nv_encoder->or))
462 continue;
463 } else {
464 if (sor & (1 << nv_encoder->or))
465 continue;
466 }
467
445 nv_encoder->disconnect(nv_encoder); 468 nv_encoder->disconnect(nv_encoder);
446 } 469 }
447 470
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a9263d92a231..90f0bf59fbcd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
690 int pxclk) 690 int pxclk)
691{ 691{
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nouveau_connector *nv_connector = NULL;
694 struct drm_encoder *encoder;
693 struct nvbios *bios = &dev_priv->VBIOS; 695 struct nvbios *bios = &dev_priv->VBIOS;
694 uint32_t mc, script = 0, or; 696 uint32_t mc, script = 0, or;
695 697
698 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
699 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
700
701 if (nv_encoder->dcb != dcbent)
702 continue;
703
704 nv_connector = nouveau_encoder_connector_get(nv_encoder);
705 break;
706 }
707
696 or = ffs(dcbent->or) - 1; 708 or = ffs(dcbent->or) - 1;
697 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); 709 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
698 switch (dcbent->type) { 710 switch (dcbent->type) {
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
711 } else 723 } else
712 if (bios->fp.strapless_is_24bit & 1) 724 if (bios->fp.strapless_is_24bit & 1)
713 script |= 0x0200; 725 script |= 0x0200;
726
727 if (nv_connector && nv_connector->edid &&
728 (nv_connector->edid->revision >= 4) &&
729 (nv_connector->edid->input & 0x70) >= 0x20)
730 script |= 0x0200;
714 } 731 }
715 732
716 if (nouveau_uscript_lvds >= 0) { 733 if (nouveau_uscript_lvds >= 0) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bcc6d39e9b0..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
16 16
17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && 17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { 18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
19 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 19 nouveau_fbcon_gpu_lockup(info);
20
21 info->flags |= FBINFO_HWACCEL_DISABLED;
22 } 20 }
23 21
24 if (info->flags & FBINFO_HWACCEL_DISABLED) { 22 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
31 OUT_RING(chan, 1); 29 OUT_RING(chan, 1);
32 } 30 }
33 BEGIN_RING(chan, NvSub2D, 0x0588, 1); 31 BEGIN_RING(chan, NvSub2D, 0x0588, 1);
34 OUT_RING(chan, rect->color); 32 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
33 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
34 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
35 else
36 OUT_RING(chan, rect->color);
35 BEGIN_RING(chan, NvSub2D, 0x0600, 4); 37 BEGIN_RING(chan, NvSub2D, 0x0600, 4);
36 OUT_RING(chan, rect->dx); 38 OUT_RING(chan, rect->dx);
37 OUT_RING(chan, rect->dy); 39 OUT_RING(chan, rect->dy);
@@ -44,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
44 FIRE_RING(chan); 46 FIRE_RING(chan);
45} 47}
46 48
47static void 49void
48nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
49{ 51{
50 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
56 return; 58 return;
57 59
58 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { 60 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
59 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 61 nouveau_fbcon_gpu_lockup(info);
60
61 info->flags |= FBINFO_HWACCEL_DISABLED;
62 } 62 }
63 63
64 if (info->flags & FBINFO_HWACCEL_DISABLED) { 64 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
101 } 101 }
102 102
103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { 103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
104 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 104 nouveau_fbcon_gpu_lockup(info);
105 info->flags |= FBINFO_HWACCEL_DISABLED;
106 } 105 }
107 106
108 if (info->flags & FBINFO_HWACCEL_DISABLED) { 107 if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
135 int push = dwords > 2047 ? 2047 : dwords; 134 int push = dwords > 2047 ? 2047 : dwords;
136 135
137 if (RING_SPACE(chan, push + 1)) { 136 if (RING_SPACE(chan, push + 1)) {
138 NV_ERROR(dev, 137 nouveau_fbcon_gpu_lockup(info);
139 "GPU lockup - switching to software fbcon\n");
140 info->flags |= FBINFO_HWACCEL_DISABLED;
141 cfb_imageblit(info, image); 138 cfb_imageblit(info, image);
142 return; 139 return;
143 } 140 }
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
199 196
200 ret = RING_SPACE(chan, 59); 197 ret = RING_SPACE(chan, 59);
201 if (ret) { 198 if (ret) {
202 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 199 nouveau_fbcon_gpu_lockup(info);
203 return ret; 200 return ret;
204 } 201 }
205 202
@@ -265,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
265 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
266 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
267 264
268 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
269 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
270 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
271 return 0; 265 return 0;
272} 266}
273 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index b7282284f080..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
272 return ret; 272 return ret;
273 ramfc = chan->ramfc->gpuobj; 273 ramfc = chan->ramfc->gpuobj;
274 274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, 275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
276 0, &chan->cache); 276 0, &chan->cache);
277 if (ret) 277 if (ret)
278 return ret; 278 return ret;
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
@@ -384,8 +387,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
384 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), 387 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
385 nv_ro32(dev, cache, (ptr * 2) + 1)); 388 nv_ro32(dev, cache, (ptr * 2) + 1));
386 } 389 }
387 nv_wr32(dev, 0x3210, cnt << 2); 390 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
388 nv_wr32(dev, 0x3270, 0); 391 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
389 392
390 /* guessing that all the 0x34xx regs aren't on NV50 */ 393 /* guessing that all the 0x34xx regs aren't on NV50 */
391 if (!IS_G80) { 394 if (!IS_G80) {
@@ -398,8 +401,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
398 401
399 dev_priv->engine.instmem.finish_access(dev); 402 dev_priv->engine.instmem.finish_access(dev);
400 403
401 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
403 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); 404 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
404 return 0; 405 return 0;
405} 406}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ca79f32be44c..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
84 nv_wr32(dev, 0x400804, 0xc0000000); 84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000); 87 nv_wr32(dev, 0x401800, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000);
90 90
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
282 return 0; 288 return 0;
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nv_wr32(dev, 0x400500, fifo & ~1); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
288 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
289 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
290 nv_wr32(dev, 0x400500, fifo);
291 296
292 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
293 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index e395c16d30f5..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -90,11 +90,25 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{ 90{
91 struct drm_device *dev = encoder->dev; 91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 struct drm_encoder *enc;
93 uint32_t val; 94 uint32_t val;
94 int or = nv_encoder->or; 95 int or = nv_encoder->or;
95 96
96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
97 98
99 nv_encoder->last_dpms = mode;
100 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102
103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
105 nvenc->dcb->or != nv_encoder->dcb->or)
106 continue;
107
108 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
109 return;
110 }
111
98 /* wait for it to be done */ 112 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 113 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 114 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5f5fe75e6af..1cc7b937b1ea 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
24$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable 24$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
25 $(call if_changed,mkregtable) 25 $(call if_changed,mkregtable)
26 26
27$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
28 $(call if_changed,mkregtable)
29
27$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable 30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
28 $(call if_changed,mkregtable) 31 $(call if_changed,mkregtable)
29 32
@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
35 38
36$(obj)/r300.o: $(obj)/r300_reg_safe.h 39$(obj)/r300.o: $(obj)/r300_reg_safe.h
37 40
41$(obj)/r420.o: $(obj)/r420_reg_safe.h
42
38$(obj)/rs600.o: $(obj)/rs600_reg_safe.h 43$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
39 44
40radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 45radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index 6d0183c61d3b..c714179d1bfa 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright 2006-2007 Advanced Micro Devices, Inc. 2* Copyright 2006-2007 Advanced Micro Devices, Inc.
3* 3*
4* Permission is hereby granted, free of charge, to any person obtaining a 4* Permission is hereby granted, free of charge, to any person obtaining a
5* copy of this software and associated documentation files (the "Software"), 5* copy of this software and associated documentation files (the "Software"),
@@ -41,14 +41,14 @@
41/****************************************************/ 41/****************************************************/
42/* Encoder Object ID Definition */ 42/* Encoder Object ID Definition */
43/****************************************************/ 43/****************************************************/
44#define ENCODER_OBJECT_ID_NONE 0x00 44#define ENCODER_OBJECT_ID_NONE 0x00
45 45
46/* Radeon Class Display Hardware */ 46/* Radeon Class Display Hardware */
47#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01 47#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
48#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02 48#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
49#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03 49#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
50#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04 50#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
51#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ 51#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
52#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06 52#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
53#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07 53#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
54 54
@@ -56,11 +56,11 @@
56#define ENCODER_OBJECT_ID_SI170B 0x08 56#define ENCODER_OBJECT_ID_SI170B 0x08
57#define ENCODER_OBJECT_ID_CH7303 0x09 57#define ENCODER_OBJECT_ID_CH7303 0x09
58#define ENCODER_OBJECT_ID_CH7301 0x0A 58#define ENCODER_OBJECT_ID_CH7301 0x0A
59#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ 59#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
60#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C 60#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
61#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D 61#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
62#define ENCODER_OBJECT_ID_TITFP513 0x0E 62#define ENCODER_OBJECT_ID_TITFP513 0x0E
63#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ 63#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
64#define ENCODER_OBJECT_ID_VT1623 0x10 64#define ENCODER_OBJECT_ID_VT1623 0x10
65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
@@ -68,9 +68,9 @@
68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
70#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15 70#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
71#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ 71#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
72#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ 72#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
73#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ 73#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
74#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19 74#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
75#define ENCODER_OBJECT_ID_VT1625 0x1A 75#define ENCODER_OBJECT_ID_VT1625 0x1A
76#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B 76#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
@@ -86,7 +86,7 @@
86/****************************************************/ 86/****************************************************/
87/* Connector Object ID Definition */ 87/* Connector Object ID Definition */
88/****************************************************/ 88/****************************************************/
89#define CONNECTOR_OBJECT_ID_NONE 0x00 89#define CONNECTOR_OBJECT_ID_NONE 0x00
90#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01 90#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
91#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02 91#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
92#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03 92#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
@@ -96,7 +96,7 @@
96#define CONNECTOR_OBJECT_ID_SVIDEO 0x07 96#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
97#define CONNECTOR_OBJECT_ID_YPbPr 0x08 97#define CONNECTOR_OBJECT_ID_YPbPr 0x08
98#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09 98#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
99#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ 99#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
100#define CONNECTOR_OBJECT_ID_SCART 0x0B 100#define CONNECTOR_OBJECT_ID_SCART 0x0B
101#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C 101#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
102#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D 102#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
@@ -106,6 +106,8 @@
106#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 106#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
107#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 107#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
109#define CONNECTOR_OBJECT_ID_eDP 0x14
110#define CONNECTOR_OBJECT_ID_MXM 0x15
109 111
110/* deleted */ 112/* deleted */
111 113
@@ -116,6 +118,14 @@
116#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 118#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
117 119
118/****************************************************/ 120/****************************************************/
121/* Generic Object ID Definition */
122/****************************************************/
123#define GENERIC_OBJECT_ID_NONE 0x00
124#define GENERIC_OBJECT_ID_GLSYNC 0x01
125#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
126#define GENERIC_OBJECT_ID_MXM_OPM 0x03
127
128/****************************************************/
119/* Graphics Object ENUM ID Definition */ 129/* Graphics Object ENUM ID Definition */
120/****************************************************/ 130/****************************************************/
121#define GRAPH_OBJECT_ENUM_ID1 0x01 131#define GRAPH_OBJECT_ENUM_ID1 0x01
@@ -124,6 +134,7 @@
124#define GRAPH_OBJECT_ENUM_ID4 0x04 134#define GRAPH_OBJECT_ENUM_ID4 0x04
125#define GRAPH_OBJECT_ENUM_ID5 0x05 135#define GRAPH_OBJECT_ENUM_ID5 0x05
126#define GRAPH_OBJECT_ENUM_ID6 0x06 136#define GRAPH_OBJECT_ENUM_ID6 0x06
137#define GRAPH_OBJECT_ENUM_ID7 0x07
127 138
128/****************************************************/ 139/****************************************************/
129/* Graphics Object ID Bit definition */ 140/* Graphics Object ID Bit definition */
@@ -133,35 +144,35 @@
133#define RESERVED1_ID_MASK 0x0800 144#define RESERVED1_ID_MASK 0x0800
134#define OBJECT_TYPE_MASK 0x7000 145#define OBJECT_TYPE_MASK 0x7000
135#define RESERVED2_ID_MASK 0x8000 146#define RESERVED2_ID_MASK 0x8000
136 147
137#define OBJECT_ID_SHIFT 0x00 148#define OBJECT_ID_SHIFT 0x00
138#define ENUM_ID_SHIFT 0x08 149#define ENUM_ID_SHIFT 0x08
139#define OBJECT_TYPE_SHIFT 0x0C 150#define OBJECT_TYPE_SHIFT 0x0C
140 151
152
141/****************************************************/ 153/****************************************************/
142/* Graphics Object family definition */ 154/* Graphics Object family definition */
143/****************************************************/ 155/****************************************************/
144#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \ 156#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
145 (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ 157 GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
146 GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
147/****************************************************/ 158/****************************************************/
148/* GPU Object ID definition - Shared with BIOS */ 159/* GPU Object ID definition - Shared with BIOS */
149/****************************************************/ 160/****************************************************/
150#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ 161#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
151 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) 162 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
152 163
153/****************************************************/ 164/****************************************************/
154/* Encoder Object ID definition - Shared with BIOS */ 165/* Encoder Object ID definition - Shared with BIOS */
155/****************************************************/ 166/****************************************************/
156/* 167/*
157#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 168#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
158#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102 169#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
159#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103 170#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
160#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104 171#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
161#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105 172#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
162#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106 173#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
163#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107 174#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
164#define ENCODER_SIL170B_ENUM_ID1 0x2108 175#define ENCODER_SIL170B_ENUM_ID1 0x2108
165#define ENCODER_CH7303_ENUM_ID1 0x2109 176#define ENCODER_CH7303_ENUM_ID1 0x2109
166#define ENCODER_CH7301_ENUM_ID1 0x210A 177#define ENCODER_CH7301_ENUM_ID1 0x210A
167#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B 178#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
@@ -175,8 +186,8 @@
175#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113 186#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
176#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114 187#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
177#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115 188#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
178#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 189#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
179#define ENCODER_SI178_ENUM_ID1 0x2117 190#define ENCODER_SI178_ENUM_ID1 0x2117
180#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118 191#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
181#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119 192#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
182#define ENCODER_VT1625_ENUM_ID1 0x211A 193#define ENCODER_VT1625_ENUM_ID1 0x211A
@@ -185,205 +196,169 @@
185#define ENCODER_DP_DP501_ENUM_ID1 0x211D 196#define ENCODER_DP_DP501_ENUM_ID1 0x211D
186#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E 197#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
187*/ 198*/
188#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \ 199#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
189 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 200 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
190 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 201 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
191 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) 202
192 203#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
193#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \ 204 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
194 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 205 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
195 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 206
196 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) 207#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
197 208 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
198#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \ 209 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
199 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 210
200 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 211#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
201 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) 212 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
202 213 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
203#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \ 214
204 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 215#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
205 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 216 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
206 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) 217 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
207 218
208#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \ 219#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
209 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 220 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
210 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 221 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
211 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) 222
212 223#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
213#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \ 224 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
214 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 225 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
215 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 226
216 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) 227#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
217 228 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
218#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \ 229 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
219 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 230
220 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 231#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
221 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) 232 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
222 233 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
223#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \ 234
224 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 235#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
225 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 236 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
226 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) 237 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
227 238
228#define ENCODER_SIL170B_ENUM_ID1 \ 239#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
229 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 240 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
230 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 241 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
231 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) 242
232 243#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
233#define ENCODER_CH7303_ENUM_ID1 \ 244 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
234 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 245 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
235 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 246
236 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) 247#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
237 248 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
238#define ENCODER_CH7301_ENUM_ID1 \ 249 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
239 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 250
240 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 251#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
241 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) 252 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
242 253 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
243#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \ 254
244 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 255
245 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 256#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
246 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) 257 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
247 258 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
248#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \ 259
249 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 260
250 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 261#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
251 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) 262 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
252 263 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
253#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \ 264
254 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 265#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
255 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 266 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
256 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) 267 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
257 268
258#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \ 269#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
259 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 270 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
260 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 271 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
261 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) 272
262 273#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
263#define ENCODER_TITFP513_ENUM_ID1 \ 274 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
264 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 275 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
265 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 276
266 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) 277#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
267 278 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
268#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \ 279 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
269 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 280
270 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 281#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
271 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) 282 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
272 283 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
273#define ENCODER_VT1623_ENUM_ID1 \ 284
274 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 285
275 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 286#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
276 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) 287 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
277 288 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
278#define ENCODER_HDMI_SI1930_ENUM_ID1 \ 289
279 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 290
280 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 291#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
281 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) 292 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
282 293 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
283#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \ 294
284 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 295#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
285 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 296 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
286 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) 297 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
287 298
288#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \ 299#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
289 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 300 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
290 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 301 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
291 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) 302
292 303#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
293#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \ 304 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
294 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 305 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
295 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 306
296 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) 307#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
297 308 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
298#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \ 309 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
299 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 310
300 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 311#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
301 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) 312 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
302 313 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
303#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \ 314
304 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 315#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
305 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 316 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
306 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) 317 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
307 318
308#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \ 319#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
309 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 320 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
310 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 321 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
311 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */ 322
312 323#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
313#define ENCODER_SI178_ENUM_ID1 \ 324 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
314 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 325 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
315 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 326
316 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) 327#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
317 328 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
318#define ENCODER_MVPU_FPGA_ENUM_ID1 \ 329 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
319 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 330
320 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 331#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
321 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) 332 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
322 333 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
323#define ENCODER_INTERNAL_DDI_ENUM_ID1 \ 334
324 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 335#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
325 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 336 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
326 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 337 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
327 338
328#define ENCODER_VT1625_ENUM_ID1 \ 339#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
329 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 340 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
330 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 341 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
331 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) 342
332 343#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
333#define ENCODER_HDMI_SI1932_ENUM_ID1 \ 344 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
334 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 345 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
335 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 346
336 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) 347#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
337 348 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
338#define ENCODER_DP_DP501_ENUM_ID1 \ 349 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
339 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 350
340 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 351#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
341 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) 352 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
342 353 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
343#define ENCODER_DP_AN9801_ENUM_ID1 \ 354
344 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 355#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
345 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 356 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
346 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) 357 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
347 358
348#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \ 359#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
349 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ 360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
350 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 361 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
351 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
352
353#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
354 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
355 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
356 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
357
358#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
359 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
361 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
362
363#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
364 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
365 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
366 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
367
368#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
369 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
370 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
371 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
372
373#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
374 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
375 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
376 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
377
378#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
379 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
380 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
381 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
382
383#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
384 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
385 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
386 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
387 362
388/****************************************************/ 363/****************************************************/
389/* Connector Object ID definition - Shared with BIOS */ 364/* Connector Object ID definition - Shared with BIOS */
@@ -406,167 +381,253 @@
406#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F 381#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
407#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 382#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
408*/ 383*/
409#define CONNECTOR_LVDS_ENUM_ID1 \ 384#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
410 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 385 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
411 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 386 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
412 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) 387
413 388#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
414#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \ 389 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
415 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 390 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
416 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 391
417 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) 392#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
418 393 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
419#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \ 394 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
420 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 395
421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 396#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) 397 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
423 398 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
424#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \ 399
425 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 400#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
426 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 401 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
427 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) 402 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
428 403
429#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \ 404#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
430 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 405 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
431 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 406 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
432 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) 407
433 408#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
434#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \ 409 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
435 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 410 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
436 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 411
437 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 412#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
438 413 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
439#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \ 414 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
440 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 415
441 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 416#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
442 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 417 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
443 418 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
444#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \ 419
445 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 420#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
446 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
447 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
448 423
449#define CONNECTOR_VGA_ENUM_ID1 \ 424#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
450 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 425 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
451 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 426 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
452 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) 427
453 428#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
454#define CONNECTOR_VGA_ENUM_ID2 \ 429 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
455 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 430 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
456 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 431
457 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) 432#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
458 433 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
459#define CONNECTOR_COMPOSITE_ENUM_ID1 \ 434 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
460 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 435
461 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 436#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
462 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) 437 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
463 438 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
464#define CONNECTOR_SVIDEO_ENUM_ID1 \ 439
465 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 440#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
466 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 441 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
467 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) 442 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
468 443
469#define CONNECTOR_YPbPr_ENUM_ID1 \ 444#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
470 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 445 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
471 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 446 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
472 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) 447
473 448#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
474#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \ 449 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
475 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 450 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
476 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 451
477 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) 452#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
478 453 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
479#define CONNECTOR_9PIN_DIN_ENUM_ID1 \ 454 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
480 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 455
481 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 456#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
482 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) 457 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
483 458 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
484#define CONNECTOR_SCART_ENUM_ID1 \ 459
485 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 460#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
486 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 461 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
487 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) 462 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
488 463
489#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \ 464#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
490 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 465 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
491 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 466 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
492 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) 467
493 468#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
494#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \ 469 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
495 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 470 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
496 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 471
497 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) 472#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
498 473 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
499#define CONNECTOR_7PIN_DIN_ENUM_ID1 \ 474 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
500 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 475
501 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 476#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
502 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) 477 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
503 478 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
504#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \ 479
505 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 480#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
506 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 481 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
507 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) 482 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
508 483
509#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \ 484#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
510 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 485 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
511 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 486 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
512 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) 487
513 488#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
514#define CONNECTOR_CROSSFIRE_ENUM_ID1 \ 489 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
515 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 490 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
516 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 491
517 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) 492#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
518 493 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
519#define CONNECTOR_CROSSFIRE_ENUM_ID2 \ 494 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
520 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 495
521 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 496#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
522 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) 497 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
523 498 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
524#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \ 499
525 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 500#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
526 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 501 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
527 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) 502 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
528 503
529#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \ 504#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
530 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 505 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
531 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 506 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
532 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) 507
533 508#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
534#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \ 509 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
535 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 510 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
536 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 511
537 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 512#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
538 513 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
539#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \ 514 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
540 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 515#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
541 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 516 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 517 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
543 518
544#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \ 519#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
545 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 520 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
546 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ 521 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
547 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 522
548 523#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
549#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \ 524 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
550 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 525 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
551 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ 526
552 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) 527#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
528 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
529 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
530
531#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
532 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
533 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
534
535
536#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
537 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
538 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
539
540#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
541 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
543
544#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
545 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
546 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
547
548#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
549 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
550 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
551
552#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
553 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
554 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
555
556#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
557 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
558 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
559
560#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
561 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
562 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
563
564#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
565 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
566 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
567
568#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
569 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
570 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
571
572#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
573 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
574 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
575
576#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
577 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
578 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
579
580#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
581 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
582 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
583
584#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
585 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
586 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
587
588#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
589 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
590 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
591
592#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
593 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
594 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
553 595
554/****************************************************/ 596/****************************************************/
555/* Router Object ID definition - Shared with BIOS */ 597/* Router Object ID definition - Shared with BIOS */
556/****************************************************/ 598/****************************************************/
557#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \ 599#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
558 (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ 600 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
559 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 601 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
560 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
561 602
562/* deleted */ 603/* deleted */
563 604
564/****************************************************/ 605/****************************************************/
606/* Generic Object ID definition - Shared with BIOS */
607/****************************************************/
608#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
609 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
610 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
611
612#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
613 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
614 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
615
616#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
617 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
618 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
619
620#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
621 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
622 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
623
624/****************************************************/
565/* Object Cap definition - Shared with BIOS */ 625/* Object Cap definition - Shared with BIOS */
566/****************************************************/ 626/****************************************************/
567#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L 627#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
568#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L 628#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
569 629
630
570#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 631#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
571#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 632#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
572#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 633#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
@@ -575,4 +636,8 @@
575#pragma pack() 636#pragma pack()
576#endif 637#endif
577 638
578#endif /*GRAPHICTYPE */ 639#endif /*GRAPHICTYPE */
640
641
642
643
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 388140a7e651..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -246,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
246 case ATOM_WS_ATTRIBUTES: 249 case ATOM_WS_ATTRIBUTES:
247 val = gctx->io_attr; 250 val = gctx->io_attr;
248 break; 251 break;
252 case ATOM_WS_REGPTR:
253 val = gctx->reg_block;
254 break;
249 default: 255 default:
250 val = ctx->ws[idx]; 256 val = ctx->ws[idx];
251 } 257 }
@@ -385,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
385 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 391 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
386} 392}
387 393
394static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
395{
396 uint32_t val = 0xCDCDCDCD;
397
398 switch (align) {
399 case ATOM_SRC_DWORD:
400 val = U32(*ptr);
401 (*ptr) += 4;
402 break;
403 case ATOM_SRC_WORD0:
404 case ATOM_SRC_WORD8:
405 case ATOM_SRC_WORD16:
406 val = U16(*ptr);
407 (*ptr) += 2;
408 break;
409 case ATOM_SRC_BYTE0:
410 case ATOM_SRC_BYTE8:
411 case ATOM_SRC_BYTE16:
412 case ATOM_SRC_BYTE24:
413 val = U8(*ptr);
414 (*ptr)++;
415 break;
416 }
417 return val;
418}
419
388static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 420static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
389 int *ptr, uint32_t *saved, int print) 421 int *ptr, uint32_t *saved, int print)
390{ 422{
@@ -482,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
482 case ATOM_WS_ATTRIBUTES: 514 case ATOM_WS_ATTRIBUTES:
483 gctx->io_attr = val; 515 gctx->io_attr = val;
484 break; 516 break;
517 case ATOM_WS_REGPTR:
518 gctx->reg_block = val;
519 break;
485 default: 520 default:
486 ctx->ws[idx] = val; 521 ctx->ws[idx] = val;
487 } 522 }
@@ -608,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
608 uint8_t count = U8((*ptr)++); 643 uint8_t count = U8((*ptr)++);
609 SDEBUG(" count: %d\n", count); 644 SDEBUG(" count: %d\n", count);
610 if (arg == ATOM_UNIT_MICROSEC) 645 if (arg == ATOM_UNIT_MICROSEC)
611 schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 udelay(count);
612 else 647 else
613 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
614} 649}
@@ -677,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
677 SDEBUG(" dst: "); 712 SDEBUG(" dst: ");
678 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 713 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
679 SDEBUG(" src1: "); 714 SDEBUG(" src1: ");
680 src1 = atom_get_src(ctx, attr, ptr); 715 src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
681 SDEBUG(" src2: "); 716 SDEBUG(" src2: ");
682 src2 = atom_get_src(ctx, attr, ptr); 717 src2 = atom_get_src(ctx, attr, ptr);
683 dst &= src1; 718 dst &= src1;
@@ -809,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
809 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 844 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
810} 845}
811 846
847static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
848{
849 uint8_t attr = U8((*ptr)++), shift;
850 uint32_t saved, dst;
851 int dptr = *ptr;
852 attr &= 0x38;
853 attr |= atom_def_dst[attr >> 3] << 6;
854 SDEBUG(" dst: ");
855 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
856 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
857 SDEBUG(" shift: %d\n", shift);
858 dst <<= shift;
859 SDEBUG(" dst: ");
860 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
861}
862
863static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
864{
865 uint8_t attr = U8((*ptr)++), shift;
866 uint32_t saved, dst;
867 int dptr = *ptr;
868 attr &= 0x38;
869 attr |= atom_def_dst[attr >> 3] << 6;
870 SDEBUG(" dst: ");
871 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
872 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
873 SDEBUG(" shift: %d\n", shift);
874 dst >>= shift;
875 SDEBUG(" dst: ");
876 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
877}
878
812static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 879static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
813{ 880{
814 uint8_t attr = U8((*ptr)++), shift; 881 uint8_t attr = U8((*ptr)++), shift;
@@ -818,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
818 attr |= atom_def_dst[attr >> 3] << 6; 885 attr |= atom_def_dst[attr >> 3] << 6;
819 SDEBUG(" dst: "); 886 SDEBUG(" dst: ");
820 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
821 shift = U8((*ptr)++); 888 shift = atom_get_src(ctx, attr, ptr);
822 SDEBUG(" shift: %d\n", shift); 889 SDEBUG(" shift: %d\n", shift);
823 dst <<= shift; 890 dst <<= shift;
824 SDEBUG(" dst: "); 891 SDEBUG(" dst: ");
@@ -834,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
834 attr |= atom_def_dst[attr >> 3] << 6; 901 attr |= atom_def_dst[attr >> 3] << 6;
835 SDEBUG(" dst: "); 902 SDEBUG(" dst: ");
836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 903 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837 shift = U8((*ptr)++); 904 shift = atom_get_src(ctx, attr, ptr);
838 SDEBUG(" shift: %d\n", shift); 905 SDEBUG(" shift: %d\n", shift);
839 dst >>= shift; 906 dst >>= shift;
840 SDEBUG(" dst: "); 907 SDEBUG(" dst: ");
@@ -937,18 +1004,18 @@ static struct {
937 atom_op_or, ATOM_ARG_FB}, { 1004 atom_op_or, ATOM_ARG_FB}, {
938 atom_op_or, ATOM_ARG_PLL}, { 1005 atom_op_or, ATOM_ARG_PLL}, {
939 atom_op_or, ATOM_ARG_MC}, { 1006 atom_op_or, ATOM_ARG_MC}, {
940 atom_op_shl, ATOM_ARG_REG}, { 1007 atom_op_shift_left, ATOM_ARG_REG}, {
941 atom_op_shl, ATOM_ARG_PS}, { 1008 atom_op_shift_left, ATOM_ARG_PS}, {
942 atom_op_shl, ATOM_ARG_WS}, { 1009 atom_op_shift_left, ATOM_ARG_WS}, {
943 atom_op_shl, ATOM_ARG_FB}, { 1010 atom_op_shift_left, ATOM_ARG_FB}, {
944 atom_op_shl, ATOM_ARG_PLL}, { 1011 atom_op_shift_left, ATOM_ARG_PLL}, {
945 atom_op_shl, ATOM_ARG_MC}, { 1012 atom_op_shift_left, ATOM_ARG_MC}, {
946 atom_op_shr, ATOM_ARG_REG}, { 1013 atom_op_shift_right, ATOM_ARG_REG}, {
947 atom_op_shr, ATOM_ARG_PS}, { 1014 atom_op_shift_right, ATOM_ARG_PS}, {
948 atom_op_shr, ATOM_ARG_WS}, { 1015 atom_op_shift_right, ATOM_ARG_WS}, {
949 atom_op_shr, ATOM_ARG_FB}, { 1016 atom_op_shift_right, ATOM_ARG_FB}, {
950 atom_op_shr, ATOM_ARG_PLL}, { 1017 atom_op_shift_right, ATOM_ARG_PLL}, {
951 atom_op_shr, ATOM_ARG_MC}, { 1018 atom_op_shift_right, ATOM_ARG_MC}, {
952 atom_op_mul, ATOM_ARG_REG}, { 1019 atom_op_mul, ATOM_ARG_REG}, {
953 atom_op_mul, ATOM_ARG_PS}, { 1020 atom_op_mul, ATOM_ARG_PS}, {
954 atom_op_mul, ATOM_ARG_WS}, { 1021 atom_op_mul, ATOM_ARG_WS}, {
@@ -1058,8 +1125,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1058 1125
1059 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1126 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1060 1127
1061 /* reset reg block */
1062 ctx->reg_block = 0;
1063 ectx.ctx = ctx; 1128 ectx.ctx = ctx;
1064 ectx.ps_shift = ps / 4; 1129 ectx.ps_shift = ps / 4;
1065 ectx.start = base; 1130 ectx.start = base;
@@ -1096,6 +1161,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1096void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1161void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1097{ 1162{
1098 mutex_lock(&ctx->mutex); 1163 mutex_lock(&ctx->mutex);
1164 /* reset reg block */
1165 ctx->reg_block = 0;
1166 /* reset fb window */
1167 ctx->fb_base = 0;
1168 /* reset io mode */
1169 ctx->io_mode = ATOM_IO_MM;
1099 atom_execute_table_locked(ctx, index, params); 1170 atom_execute_table_locked(ctx, index, params);
1100 mutex_unlock(&ctx->mutex); 1171 mutex_unlock(&ctx->mutex);
1101} 1172}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 47fd943f6d14..bc73781423a1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
91#define ATOM_WS_AND_MASK 0x45 91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46 92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47 93#define ATOM_WS_ATTRIBUTES 0x47
94#define ATOM_WS_REGPTR 0x48
94 95
95#define ATOM_IIO_NOP 0 96#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1 97#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00c..af464e351fbd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
308 args.ucCRTC = radeon_crtc->crtc_id; 308 args.ucCRTC = radeon_crtc->crtc_id;
309 309
310 printk("executing set crtc dtd timing\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 310 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312} 311}
313 312
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 346 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
348 args.ucCRTC = radeon_crtc->crtc_id; 347 args.ucCRTC = radeon_crtc->crtc_id;
349 348
350 printk("executing set crtc timing\n");
351 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
352} 350}
353 351
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
409 } 407 }
410} 408}
411 409
412void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 410union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
412};
413
414static u32 atombios_adjust_pll(struct drm_crtc *crtc,
415 struct drm_display_mode *mode,
416 struct radeon_pll *pll)
413{ 417{
414 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
415 struct drm_device *dev = crtc->dev; 418 struct drm_device *dev = crtc->dev;
416 struct radeon_device *rdev = dev->dev_private; 419 struct radeon_device *rdev = dev->dev_private;
417 struct drm_encoder *encoder = NULL; 420 struct drm_encoder *encoder = NULL;
418 struct radeon_encoder *radeon_encoder = NULL; 421 struct radeon_encoder *radeon_encoder = NULL;
419 uint8_t frev, crev; 422 u32 adjusted_clock = mode->clock;
420 int index;
421 SET_PIXEL_CLOCK_PS_ALLOCATION args;
422 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
423 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
424 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
425 uint32_t pll_clock = mode->clock;
426 uint32_t adjusted_clock;
427 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
428 struct radeon_pll *pll;
429 int pll_flags = 0;
430 423
431 memset(&args, 0, sizeof(args)); 424 /* reset the pll flags */
425 pll->flags = 0;
432 426
433 if (ASIC_IS_AVIVO(rdev)) { 427 if (ASIC_IS_AVIVO(rdev)) {
434 if ((rdev->family == CHIP_RS600) || 428 if ((rdev->family == CHIP_RS600) ||
435 (rdev->family == CHIP_RS690) || 429 (rdev->family == CHIP_RS690) ||
436 (rdev->family == CHIP_RS740)) 430 (rdev->family == CHIP_RS740))
437 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 431 pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
438 RADEON_PLL_PREFER_CLOSEST_LOWER); 432 RADEON_PLL_PREFER_CLOSEST_LOWER);
439 433
440 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 434 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
441 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 435 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
442 else 436 else
443 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 437 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
444 } else { 438 } else {
445 pll_flags |= RADEON_PLL_LEGACY; 439 pll->flags |= RADEON_PLL_LEGACY;
446 440
447 if (mode->clock > 200000) /* range limits??? */ 441 if (mode->clock > 200000) /* range limits??? */
448 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 442 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
449 else 443 else
450 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 444 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
451 445
452 } 446 }
453 447
454 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 if (encoder->crtc == crtc) { 449 if (encoder->crtc == crtc) {
456 if (!ASIC_IS_AVIVO(rdev)) {
457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (encoder->encoder_type ==
461 DRM_MODE_ENCODER_LVDS)
462 pll_flags |= RADEON_PLL_USE_REF_DIV;
463 }
464 radeon_encoder = to_radeon_encoder(encoder); 450 radeon_encoder = to_radeon_encoder(encoder);
451 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2;
455 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
458 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
459 pll->flags |= RADEON_PLL_USE_REF_DIV;
460 }
465 break; 461 break;
466 } 462 }
467 } 463 }
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
471 * special hw requirements. 467 * special hw requirements.
472 */ 468 */
473 if (ASIC_IS_DCE3(rdev)) { 469 if (ASIC_IS_DCE3(rdev)) {
474 ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; 470 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev;
473 int index;
475 474
476 if (!encoder) 475 if (!radeon_encoder->enc_priv)
477 return; 476 return adjusted_clock;
478 477 dig = radeon_encoder->enc_priv;
479 memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
480 adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
481 adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
482 adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
483 478
484 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
485 atom_execute_table(rdev->mode_info.atom_context, 480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
486 index, (uint32_t *)&adjust_pll_args); 481 &crev);
487 adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; 482
488 } else { 483 memset(&args, 0, sizeof(args));
489 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 484
490 if (ASIC_IS_AVIVO(rdev) && 485 switch (frev) {
491 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) 486 case 1:
492 adjusted_clock = mode->clock * 2; 487 switch (crev) {
493 else 488 case 1:
494 adjusted_clock = mode->clock; 489 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
493
494 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
505 return adjusted_clock;
506 }
495 } 507 }
508 return adjusted_clock;
509}
510
511union set_pixel_clock {
512 SET_PIXEL_CLOCK_PS_ALLOCATION base;
513 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3;
516};
517
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev;
522 struct radeon_device *rdev = dev->dev_private;
523 struct drm_encoder *encoder = NULL;
524 struct radeon_encoder *radeon_encoder = NULL;
525 u8 frev, crev;
526 int index;
527 union set_pixel_clock args;
528 u32 pll_clock = mode->clock;
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll;
531 u32 adjusted_clock;
532
533 memset(&args, 0, sizeof(args));
534
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder);
538 break;
539 }
540 }
541
542 if (!radeon_encoder)
543 return;
496 544
497 if (radeon_crtc->crtc_id == 0) 545 if (radeon_crtc->crtc_id == 0)
498 pll = &rdev->clock.p1pll; 546 pll = &rdev->clock.p1pll;
499 else 547 else
500 pll = &rdev->clock.p2pll; 548 pll = &rdev->clock.p2pll;
501 549
550 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552
502 if (ASIC_IS_AVIVO(rdev)) { 553 if (ASIC_IS_AVIVO(rdev)) {
503 if (radeon_new_pll) 554 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div, 556 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags); 557 &ref_div, &post_div);
507 else 558 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div, 560 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags); 561 &ref_div, &post_div);
511 } else 562 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags); 564 &ref_div, &post_div);
514 565
515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
520 case 1: 571 case 1:
521 switch (crev) { 572 switch (crev) {
522 case 1: 573 case 1:
523 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 574 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
524 spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 575 args.v1.usRefDiv = cpu_to_le16(ref_div);
525 spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 576 args.v1.usFbDiv = cpu_to_le16(fb_div);
526 spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 577 args.v1.ucFracFbDiv = frac_fb_div;
527 spc1_ptr->ucFracFbDiv = frac_fb_div; 578 args.v1.ucPostDiv = post_div;
528 spc1_ptr->ucPostDiv = post_div; 579 args.v1.ucPpll =
529 spc1_ptr->ucPpll =
530 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
531 spc1_ptr->ucCRTC = radeon_crtc->crtc_id; 581 args.v1.ucCRTC = radeon_crtc->crtc_id;
532 spc1_ptr->ucRefDivSrc = 1; 582 args.v1.ucRefDivSrc = 1;
533 break; 583 break;
534 case 2: 584 case 2:
535 spc2_ptr = 585 args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
536 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 586 args.v2.usRefDiv = cpu_to_le16(ref_div);
537 spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 587 args.v2.usFbDiv = cpu_to_le16(fb_div);
538 spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 588 args.v2.ucFracFbDiv = frac_fb_div;
539 spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 589 args.v2.ucPostDiv = post_div;
540 spc2_ptr->ucFracFbDiv = frac_fb_div; 590 args.v2.ucPpll =
541 spc2_ptr->ucPostDiv = post_div;
542 spc2_ptr->ucPpll =
543 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
544 spc2_ptr->ucCRTC = radeon_crtc->crtc_id; 592 args.v2.ucCRTC = radeon_crtc->crtc_id;
545 spc2_ptr->ucRefDivSrc = 1; 593 args.v2.ucRefDivSrc = 1;
546 break; 594 break;
547 case 3: 595 case 3:
548 if (!encoder) 596 args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
549 return; 597 args.v3.usRefDiv = cpu_to_le16(ref_div);
550 spc3_ptr = 598 args.v3.usFbDiv = cpu_to_le16(fb_div);
551 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 599 args.v3.ucFracFbDiv = frac_fb_div;
552 spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 600 args.v3.ucPostDiv = post_div;
553 spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 601 args.v3.ucPpll =
554 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
555 spc3_ptr->ucFracFbDiv = frac_fb_div;
556 spc3_ptr->ucPostDiv = post_div;
557 spc3_ptr->ucPpll =
558 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
559 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); 603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
560 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; 604 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
561 spc3_ptr->ucEncoderMode = 605 args.v3.ucEncoderMode =
562 atombios_get_encoder_mode(encoder); 606 atombios_get_encoder_mode(encoder);
563 break; 607 break;
564 default: 608 default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
571 return; 615 return;
572 } 616 }
573 617
574 printk("executing set pll\n");
575 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
576} 619}
577 620
578int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
579 struct drm_framebuffer *old_fb) 622 struct drm_framebuffer *old_fb)
580{ 623{
581 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 624 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
582 struct drm_device *dev = crtc->dev; 625 struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
706 return 0; 749 return 0;
707} 750}
708 751
752int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
753 struct drm_framebuffer *old_fb)
754{
755 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private;
757
758 if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb);
762}
763
764/* properly set additional regs when using atombios */
765static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
766{
767 struct drm_device *dev = crtc->dev;
768 struct radeon_device *rdev = dev->dev_private;
769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
770 u32 disp_merge_cntl;
771
772 switch (radeon_crtc->crtc_id) {
773 case 0:
774 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
775 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
776 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
777 break;
778 case 1:
779 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
780 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
781 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
782 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
783 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
784 break;
785 }
786}
787
709int atombios_crtc_mode_set(struct drm_crtc *crtc, 788int atombios_crtc_mode_set(struct drm_crtc *crtc,
710 struct drm_display_mode *mode, 789 struct drm_display_mode *mode,
711 struct drm_display_mode *adjusted_mode, 790 struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
727 else { 806 else {
728 if (radeon_crtc->crtc_id == 0) 807 if (radeon_crtc->crtc_id == 0)
729 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
730 radeon_crtc_set_base(crtc, x, y, old_fb); 809 atombios_crtc_set_base(crtc, x, y, old_fb);
731 radeon_legacy_atom_set_surface(crtc); 810 radeon_legacy_atom_fixup(crtc);
732 } 811 }
733 atombios_overscan_setup(crtc, mode, adjusted_mode); 812 atombios_overscan_setup(crtc, mode, adjusted_mode);
734 atombios_scaler_setup(crtc); 813 atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
746 825
747static void atombios_crtc_prepare(struct drm_crtc *crtc) 826static void atombios_crtc_prepare(struct drm_crtc *crtc)
748{ 827{
749 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
750 atombios_lock_crtc(crtc, 1); 828 atombios_lock_crtc(crtc, 1);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
751} 830}
752 831
753static void atombios_crtc_commit(struct drm_crtc *crtc) 832static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0d63c4436e7c..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
@@ -468,7 +472,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
468 struct radeon_connector *radeon_connector; 472 struct radeon_connector *radeon_connector;
469 struct radeon_connector_atom_dig *dig_connector; 473 struct radeon_connector_atom_dig *dig_connector;
470 474
471 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 475 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
476 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
472 return; 477 return;
473 478
474 radeon_connector = to_radeon_connector(connector); 479 radeon_connector = to_radeon_connector(connector);
@@ -582,7 +587,8 @@ void dp_link_train(struct drm_encoder *encoder,
582 u8 train_set[4]; 587 u8 train_set[4];
583 int i; 588 int i;
584 589
585 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 590 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
591 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
586 return; 592 return;
587 593
588 if (!radeon_encoder->enc_priv) 594 if (!radeon_encoder->enc_priv)
@@ -594,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
594 return; 600 return;
595 dig_connector = radeon_connector->con_priv; 601 dig_connector = radeon_connector->con_priv;
596 602
597 if (ASIC_IS_DCE32(rdev)) { 603 if (dig->dig_encoder)
598 if (dig->dig_block) 604 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
599 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 605 else
600 else 606 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
601 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 607 if (dig_connector->linkb)
602 if (dig_connector->linkb) 608 enc_id |= ATOM_DP_CONFIG_LINK_B;
603 enc_id |= ATOM_DP_CONFIG_LINK_B; 609 else
604 else 610 enc_id |= ATOM_DP_CONFIG_LINK_A;
605 enc_id |= ATOM_DP_CONFIG_LINK_A;
606 } else {
607 if (dig_connector->linkb)
608 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
609 else
610 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
611 }
612 611
613 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 612 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
614 if (dig_connector->dp_clock == 270000) 613 if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 0d79577c1576..607241c6a8a9 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
661 fseek(file, 0, SEEK_SET); 661 fseek(file, 0, SEEK_SET);
662 662
663 /* get header */ 663 /* get header */
664 if (fgets(buf, 1024, file) == NULL) 664 if (fgets(buf, 1024, file) == NULL) {
665 fclose(file);
665 return -1; 666 return -1;
667 }
666 668
667 /* first line will contain the last register 669 /* first line will contain the last register
668 * and gpu name */ 670 * and gpu name */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 71727460968f..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev)
131 break; 131 break;
132 } 132 }
133 } 133 }
134 r100_irq_set(rdev); 134 if (rdev->irq.installed)
135 r100_irq_set(rdev);
135} 136}
136 137
137void r100_hpd_fini(struct radeon_device *rdev) 138void r100_hpd_fini(struct radeon_device *rdev)
@@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
243{ 244{
244 uint32_t tmp = 0; 245 uint32_t tmp = 0;
245 246
247 if (!rdev->irq.installed) {
248 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
249 WREG32(R_000040_GEN_INT_CNTL, 0);
250 return -EINVAL;
251 }
246 if (rdev->irq.sw_int) { 252 if (rdev->irq.sw_int) {
247 tmp |= RADEON_SW_INT_ENABLE; 253 tmp |= RADEON_SW_INT_ENABLE;
248 } 254 }
@@ -348,14 +354,25 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
348 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
349} 355}
350 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
351void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
352 struct radeon_fence *fence) 360 struct radeon_fence *fence)
353{ 361{
354 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
355 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
356 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
357 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
358 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
371 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
372 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
373 RADEON_HDP_READ_BUFFER_INVALIDATE);
374 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
375 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
359 /* Emit fence sequence & fire IRQ */ 376 /* Emit fence sequence & fire IRQ */
360 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 377 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
361 radeon_ring_write(rdev, fence->seq); 378 radeon_ring_write(rdev, fence->seq);
@@ -1493,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1493 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1510 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1494 return -EINVAL; 1511 return -EINVAL;
1495 } 1512 }
1513 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1496 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1514 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1497 track->immd_dwords = pkt->count - 1; 1515 track->immd_dwords = pkt->count - 1;
1498 r = r100_cs_track_check(p->rdev, track); 1516 r = r100_cs_track_check(p->rdev, track);
@@ -1713,14 +1731,6 @@ void r100_gpu_init(struct radeon_device *rdev)
1713 r100_hdp_reset(rdev); 1731 r100_hdp_reset(rdev);
1714} 1732}
1715 1733
1716void r100_hdp_flush(struct radeon_device *rdev)
1717{
1718 u32 tmp;
1719 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1720 tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
1721 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1722}
1723
1724void r100_hdp_reset(struct radeon_device *rdev) 1734void r100_hdp_reset(struct radeon_device *rdev)
1725{ 1735{
1726 uint32_t tmp; 1736 uint32_t tmp;
@@ -3313,6 +3323,7 @@ static int r100_startup(struct radeon_device *rdev)
3313 } 3323 }
3314 /* Enable IRQ */ 3324 /* Enable IRQ */
3315 r100_irq_set(rdev); 3325 r100_irq_set(rdev);
3326 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3316 /* 1M ring buffer */ 3327 /* 1M ring buffer */
3317 r = r100_cp_init(rdev, 1024 * 1024); 3328 r = r100_cp_init(rdev, 1024 * 1024);
3318 if (r) { 3329 if (r) {
@@ -3364,13 +3375,13 @@ int r100_suspend(struct radeon_device *rdev)
3364 3375
3365void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3366{ 3377{
3367 r100_suspend(rdev);
3368 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3369 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3370 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
3371 radeon_gem_fini(rdev); 3381 radeon_gem_fini(rdev);
3372 if (rdev->flags & RADEON_IS_PCI) 3382 if (rdev->flags & RADEON_IS_PCI)
3373 r100_pci_gart_fini(rdev); 3383 r100_pci_gart_fini(rdev);
3384 radeon_agp_fini(rdev);
3374 radeon_irq_kms_fini(rdev); 3385 radeon_irq_kms_fini(rdev);
3375 radeon_fence_driver_fini(rdev); 3386 radeon_fence_driver_fini(rdev);
3376 radeon_bo_fini(rdev); 3387 radeon_bo_fini(rdev);
@@ -3394,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
3394 if (rdev->flags & RADEON_IS_AGP) { 3405 if (rdev->flags & RADEON_IS_AGP) {
3395 r = radeon_agp_init(rdev); 3406 r = radeon_agp_init(rdev);
3396 if (r) { 3407 if (r) {
3397 printk(KERN_WARNING "[drm] Disabling AGP\n"); 3408 radeon_agp_disable(rdev);
3398 rdev->flags &= ~RADEON_IS_AGP;
3399 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3400 } else { 3409 } else {
3401 rdev->mc.gtt_location = rdev->mc.agp_base; 3410 rdev->mc.gtt_location = rdev->mc.agp_base;
3402 } 3411 }
@@ -3477,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3477 if (r) { 3486 if (r) {
3478 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3479 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3480 r100_suspend(rdev);
3481 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3482 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3483 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3484 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3485 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3486 radeon_irq_kms_fini(rdev);
3487 rdev->accel_working = false; 3495 rdev->accel_working = false;
3488 } 3496 }
3489 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..ff1e0cd608bf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
371 case 5: 371 case 5:
372 case 6: 372 case 6:
373 case 7: 373 case 7:
374 /* 1D/2D */
374 track->textures[i].tex_coord_type = 0; 375 track->textures[i].tex_coord_type = 0;
375 break; 376 break;
376 case 1: 377 case 1:
377 track->textures[i].tex_coord_type = 1; 378 /* CUBE */
379 track->textures[i].tex_coord_type = 2;
378 break; 380 break;
379 case 2: 381 case 2:
380 track->textures[i].tex_coord_type = 2; 382 /* 3D */
383 track->textures[i].tex_coord_type = 1;
381 break; 384 break;
382 } 385 }
383 break; 386 break;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3f2cc9e2e8d9..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
36#include "rv350d.h" 36#include "rv350d.h"
37#include "r300_reg_safe.h" 37#include "r300_reg_safe.h"
38 38
39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ 39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
40 *
41 * GPU Errata:
42 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
43 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
44 * However, scheduling such write to the ring seems harmless, i suspect
45 * the CP read collide with the flush somehow, or maybe the MC, hard to
46 * tell. (Jerome Glisse)
47 */
40 48
41/* 49/*
42 * rv370,rv380 PCIE GART 50 * rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
178 /* Wait until IDLE & CLEAN */ 186 /* Wait until IDLE & CLEAN */
179 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 187 radeon_ring_write(rdev, PACKET0(0x1720, 0));
180 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE);
192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
181 /* Emit fence sequence & fire IRQ */ 194 /* Emit fence sequence & fire IRQ */
182 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 195 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
183 radeon_ring_write(rdev, fence->seq); 196 radeon_ring_write(rdev, fence->seq);
@@ -493,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
493 506
494 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
495 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
496 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
497 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
498 rdev->mc.vram_width = 128; 512 switch (tmp) {
499 } else { 513 case 0: rdev->mc.vram_width = 64; break;
500 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
501 } 517 }
502 518
503 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1258,6 +1274,7 @@ static int r300_startup(struct radeon_device *rdev)
1258 } 1274 }
1259 /* Enable IRQ */ 1275 /* Enable IRQ */
1260 r100_irq_set(rdev); 1276 r100_irq_set(rdev);
1277 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1261 /* 1M ring buffer */ 1278 /* 1M ring buffer */
1262 r = r100_cp_init(rdev, 1024 * 1024); 1279 r = r100_cp_init(rdev, 1024 * 1024);
1263 if (r) { 1280 if (r) {
@@ -1313,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1313 1330
1314void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1315{ 1332{
1316 r300_suspend(rdev);
1317 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1318 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1319 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1322,6 +1338,7 @@ void r300_fini(struct radeon_device *rdev)
1322 rv370_pcie_gart_fini(rdev); 1338 rv370_pcie_gart_fini(rdev);
1323 if (rdev->flags & RADEON_IS_PCI) 1339 if (rdev->flags & RADEON_IS_PCI)
1324 r100_pci_gart_fini(rdev); 1340 r100_pci_gart_fini(rdev);
1341 radeon_agp_fini(rdev);
1325 radeon_irq_kms_fini(rdev); 1342 radeon_irq_kms_fini(rdev);
1326 radeon_fence_driver_fini(rdev); 1343 radeon_fence_driver_fini(rdev);
1327 radeon_bo_fini(rdev); 1344 radeon_bo_fini(rdev);
@@ -1403,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1403 if (r) { 1420 if (r) {
1404 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1405 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1406 r300_suspend(rdev);
1407 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1408 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1409 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1410 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1411 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1412 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1413 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1414 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1415 rdev->accel_working = false; 1432 rdev->accel_working = false;
1416 } 1433 }
1417 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c05a7270cf0c..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,7 +30,15 @@
30#include "radeon_reg.h" 30#include "radeon_reg.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "atom.h" 32#include "atom.h"
33#include "r100d.h"
33#include "r420d.h" 34#include "r420d.h"
35#include "r420_reg_safe.h"
36
37static void r420_set_reg_safe(struct radeon_device *rdev)
38{
39 rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41}
34 42
35int r420_mc_init(struct radeon_device *rdev) 43int r420_mc_init(struct radeon_device *rdev)
36{ 44{
@@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
42 if (rdev->flags & RADEON_IS_AGP) { 50 if (rdev->flags & RADEON_IS_AGP) {
43 r = radeon_agp_init(rdev); 51 r = radeon_agp_init(rdev);
44 if (r) { 52 if (r) {
45 printk(KERN_WARNING "[drm] Disabling AGP\n"); 53 radeon_agp_disable(rdev);
46 rdev->flags &= ~RADEON_IS_AGP;
47 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
48 } else { 54 } else {
49 rdev->mc.gtt_location = rdev->mc.agp_base; 55 rdev->mc.gtt_location = rdev->mc.agp_base;
50 } 56 }
@@ -165,6 +171,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
165 WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); 171 WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
166} 172}
167 173
174static void r420_cp_errata_init(struct radeon_device *rdev)
175{
176 /* RV410 and R420 can lock up if CP DMA to host memory happens
177 * while the 2D engine is busy.
178 *
179 * The proper workaround is to queue a RESYNC at the beginning
180 * of the CP init, apparently.
181 */
182 radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
183 radeon_ring_lock(rdev, 8);
184 radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
185 radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
186 radeon_ring_write(rdev, 0xDEADBEEF);
187 radeon_ring_unlock_commit(rdev);
188}
189
190static void r420_cp_errata_fini(struct radeon_device *rdev)
191{
192 /* Catch the RESYNC we dispatched all the way back,
193 * at the very beginning of the CP init.
194 */
195 radeon_ring_lock(rdev, 8);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
198 radeon_ring_unlock_commit(rdev);
199 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
200}
201
168static int r420_startup(struct radeon_device *rdev) 202static int r420_startup(struct radeon_device *rdev)
169{ 203{
170 int r; 204 int r;
@@ -190,12 +224,14 @@ static int r420_startup(struct radeon_device *rdev)
190 r420_pipes_init(rdev); 224 r420_pipes_init(rdev);
191 /* Enable IRQ */ 225 /* Enable IRQ */
192 r100_irq_set(rdev); 226 r100_irq_set(rdev);
227 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
193 /* 1M ring buffer */ 228 /* 1M ring buffer */
194 r = r100_cp_init(rdev, 1024 * 1024); 229 r = r100_cp_init(rdev, 1024 * 1024);
195 if (r) { 230 if (r) {
196 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 231 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
197 return r; 232 return r;
198 } 233 }
234 r420_cp_errata_init(rdev);
199 r = r100_wb_init(rdev); 235 r = r100_wb_init(rdev);
200 if (r) { 236 if (r) {
201 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 237 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +274,7 @@ int r420_resume(struct radeon_device *rdev)
238 274
239int r420_suspend(struct radeon_device *rdev) 275int r420_suspend(struct radeon_device *rdev)
240{ 276{
277 r420_cp_errata_fini(rdev);
241 r100_cp_disable(rdev); 278 r100_cp_disable(rdev);
242 r100_wb_disable(rdev); 279 r100_wb_disable(rdev);
243 r100_irq_disable(rdev); 280 r100_irq_disable(rdev);
@@ -346,22 +383,21 @@ int r420_init(struct radeon_device *rdev)
346 if (r) 383 if (r)
347 return r; 384 return r;
348 } 385 }
349 r300_set_reg_safe(rdev); 386 r420_set_reg_safe(rdev);
350 rdev->accel_working = true; 387 rdev->accel_working = true;
351 r = r420_startup(rdev); 388 r = r420_startup(rdev);
352 if (r) { 389 if (r) {
353 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
354 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
355 r420_suspend(rdev);
356 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
357 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
358 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
359 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
360 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
361 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
362 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
363 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
364 radeon_irq_kms_fini(rdev);
365 rdev->accel_working = false; 401 rdev->accel_working = false;
366 } 402 }
367 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0f3843b6dac7..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
186 } 186 }
187 /* Enable IRQ */ 187 /* Enable IRQ */
188 rs600_irq_set(rdev); 188 rs600_irq_set(rdev);
189 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
189 /* 1M ring buffer */ 190 /* 1M ring buffer */
190 r = r100_cp_init(rdev, 1024 * 1024); 191 r = r100_cp_init(rdev, 1024 * 1024);
191 if (r) { 192 if (r) {
@@ -293,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
293 if (r) { 294 if (r) {
294 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
295 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
296 rv515_suspend(rdev);
297 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
298 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
299 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
300 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
301 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
302 radeon_irq_kms_fini(rdev);
303 rdev->accel_working = false; 303 rdev->accel_working = false;
304 } 304 }
305 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a0ac3c134b1b..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
285 } 285 }
286 } 286 }
287 } 287 }
288 r600_irq_set(rdev); 288 if (rdev->irq.installed)
289 r600_irq_set(rdev);
289} 290}
290 291
291void r600_hpd_fini(struct radeon_device *rdev) 292void r600_hpd_fini(struct radeon_device *rdev)
@@ -623,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
623 fixed20_12 a; 624 fixed20_12 a;
624 u32 tmp; 625 u32 tmp;
625 int chansize, numchan; 626 int chansize, numchan;
626 int r;
627 627
628 /* Get VRAM informations */ 628 /* Get VRAM informations */
629 rdev->mc.vram_is_ddr = true; 629 rdev->mc.vram_is_ddr = true;
@@ -666,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
666 rdev->mc.real_vram_size = rdev->mc.aper_size; 666 rdev->mc.real_vram_size = rdev->mc.aper_size;
667 667
668 if (rdev->flags & RADEON_IS_AGP) { 668 if (rdev->flags & RADEON_IS_AGP) {
669 r = radeon_agp_init(rdev);
670 if (r)
671 return r;
672 /* gtt_size is setup by radeon_agp_init */ 669 /* gtt_size is setup by radeon_agp_init */
673 rdev->mc.gtt_location = rdev->mc.agp_base; 670 rdev->mc.gtt_location = rdev->mc.agp_base;
674 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -726,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev)
726 a.full = rfixed_const(100); 723 a.full = rfixed_const(100);
727 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); 724 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
728 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 725 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
727 if (rdev->flags & RADEON_IS_IGP)
728 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
729
729 return 0; 730 return 0;
730} 731}
731 732
@@ -1384,11 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1384 (void)RREG32(PCIE_PORT_DATA); 1385 (void)RREG32(PCIE_PORT_DATA);
1385} 1386}
1386 1387
1387void r600_hdp_flush(struct radeon_device *rdev)
1388{
1389 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1390}
1391
1392/* 1388/*
1393 * CP & Ring 1389 * CP & Ring
1394 */ 1390 */
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1658 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1659} 1655}
1660 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1661 1663
1662/* 1664/*
1663 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1785,28 +1787,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1785 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1787 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1786 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1788 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1787 radeon_ring_write(rdev, fence->seq); 1789 radeon_ring_write(rdev, fence->seq);
1790 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1791 radeon_ring_write(rdev, 1);
1788 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1789 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1790 radeon_ring_write(rdev, RB_INT_STAT); 1794 radeon_ring_write(rdev, RB_INT_STAT);
1791} 1795}
1792 1796
1793int r600_copy_dma(struct radeon_device *rdev,
1794 uint64_t src_offset,
1795 uint64_t dst_offset,
1796 unsigned num_pages,
1797 struct radeon_fence *fence)
1798{
1799 /* FIXME: implement */
1800 return 0;
1801}
1802
1803int r600_copy_blit(struct radeon_device *rdev, 1797int r600_copy_blit(struct radeon_device *rdev,
1804 uint64_t src_offset, uint64_t dst_offset, 1798 uint64_t src_offset, uint64_t dst_offset,
1805 unsigned num_pages, struct radeon_fence *fence) 1799 unsigned num_pages, struct radeon_fence *fence)
1806{ 1800{
1807 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 1801 int r;
1802
1803 mutex_lock(&rdev->r600_blit.mutex);
1804 rdev->r600_blit.vb_ib = NULL;
1805 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1806 if (r) {
1807 if (rdev->r600_blit.vb_ib)
1808 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
1809 mutex_unlock(&rdev->r600_blit.mutex);
1810 return r;
1811 }
1808 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 1812 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1809 r600_blit_done_copy(rdev, fence); 1813 r600_blit_done_copy(rdev, fence);
1814 mutex_unlock(&rdev->r600_blit.mutex);
1810 return 0; 1815 return 0;
1811} 1816}
1812 1817
@@ -1862,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
1862 return r; 1867 return r;
1863 } 1868 }
1864 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1865 1870 r = r600_blit_init(rdev);
1866 if (!rdev->r600_blit.shader_obj) { 1871 if (r) {
1867 r = r600_blit_init(rdev); 1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1876 /* pin copy shader into vram */
1877 if (rdev->r600_blit.shader_obj) {
1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1879 if (unlikely(r != 0))
1880 return r;
1881 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1882 &rdev->r600_blit.shader_gpu_addr);
1883 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1868 if (r) { 1884 if (r) {
1869 DRM_ERROR("radeon: failed blitter (%d).\n", r); 1885 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1870 return r; 1886 return r;
1871 } 1887 }
1872 } 1888 }
1873
1874 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1875 if (unlikely(r != 0))
1876 return r;
1877 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1878 &rdev->r600_blit.shader_gpu_addr);
1879 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1880 if (r) {
1881 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1882 return r;
1883 }
1884
1885 /* Enable IRQ */ 1889 /* Enable IRQ */
1886 r = r600_irq_init(rdev); 1890 r = r600_irq_init(rdev);
1887 if (r) { 1891 if (r) {
@@ -1946,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1946 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1947 return r; 1951 return r;
1948 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1949 return r; 1960 return r;
1950} 1961}
1951 1962
@@ -1953,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
1953{ 1964{
1954 int r; 1965 int r;
1955 1966
1967 r600_audio_fini(rdev);
1956 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1957 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1958 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
1971 r600_irq_suspend(rdev);
1959 r600_wb_disable(rdev); 1972 r600_wb_disable(rdev);
1960 r600_pcie_gart_disable(rdev); 1973 r600_pcie_gart_disable(rdev);
1961 /* unpin shaders bo */ 1974 /* unpin shaders bo */
1962 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1975 if (rdev->r600_blit.shader_obj) {
1963 if (unlikely(r != 0)) 1976 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1964 return r; 1977 if (!r) {
1965 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1978 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1966 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1979 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1980 }
1981 }
1967 return 0; 1982 return 0;
1968} 1983}
1969 1984
@@ -2024,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
2024 r = radeon_fence_driver_init(rdev); 2039 r = radeon_fence_driver_init(rdev);
2025 if (r) 2040 if (r)
2026 return r; 2041 return r;
2042 if (rdev->flags & RADEON_IS_AGP) {
2043 r = radeon_agp_init(rdev);
2044 if (r)
2045 radeon_agp_disable(rdev);
2046 }
2027 r = r600_mc_init(rdev); 2047 r = r600_mc_init(rdev);
2028 if (r) 2048 if (r)
2029 return r; 2049 return r;
@@ -2049,22 +2069,25 @@ int r600_init(struct radeon_device *rdev)
2049 rdev->accel_working = true; 2069 rdev->accel_working = true;
2050 r = r600_startup(rdev); 2070 r = r600_startup(rdev);
2051 if (r) { 2071 if (r) {
2052 r600_suspend(rdev); 2072 dev_err(rdev->dev, "disabling GPU acceleration\n");
2073 r600_cp_fini(rdev);
2053 r600_wb_fini(rdev); 2074 r600_wb_fini(rdev);
2054 radeon_ring_fini(rdev); 2075 r600_irq_fini(rdev);
2076 radeon_irq_kms_fini(rdev);
2055 r600_pcie_gart_fini(rdev); 2077 r600_pcie_gart_fini(rdev);
2056 rdev->accel_working = false; 2078 rdev->accel_working = false;
2057 } 2079 }
2058 if (rdev->accel_working) { 2080 if (rdev->accel_working) {
2059 r = radeon_ib_pool_init(rdev); 2081 r = radeon_ib_pool_init(rdev);
2060 if (r) { 2082 if (r) {
2061 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 2083 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2062 rdev->accel_working = false;
2063 }
2064 r = r600_ib_test(rdev);
2065 if (r) {
2066 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2067 rdev->accel_working = false; 2084 rdev->accel_working = false;
2085 } else {
2086 r = r600_ib_test(rdev);
2087 if (r) {
2088 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2089 rdev->accel_working = false;
2090 }
2068 } 2091 }
2069 } 2092 }
2070 2093
@@ -2076,21 +2099,17 @@ int r600_init(struct radeon_device *rdev)
2076 2099
2077void r600_fini(struct radeon_device *rdev) 2100void r600_fini(struct radeon_device *rdev)
2078{ 2101{
2079 /* Suspend operations */
2080 r600_suspend(rdev);
2081
2082 r600_audio_fini(rdev); 2102 r600_audio_fini(rdev);
2083 r600_blit_fini(rdev); 2103 r600_blit_fini(rdev);
2104 r600_cp_fini(rdev);
2105 r600_wb_fini(rdev);
2084 r600_irq_fini(rdev); 2106 r600_irq_fini(rdev);
2085 radeon_irq_kms_fini(rdev); 2107 radeon_irq_kms_fini(rdev);
2086 radeon_ring_fini(rdev);
2087 r600_wb_fini(rdev);
2088 r600_pcie_gart_fini(rdev); 2108 r600_pcie_gart_fini(rdev);
2109 radeon_agp_fini(rdev);
2089 radeon_gem_fini(rdev); 2110 radeon_gem_fini(rdev);
2090 radeon_fence_driver_fini(rdev); 2111 radeon_fence_driver_fini(rdev);
2091 radeon_clocks_fini(rdev); 2112 radeon_clocks_fini(rdev);
2092 if (rdev->flags & RADEON_IS_AGP)
2093 radeon_agp_fini(rdev);
2094 radeon_bo_fini(rdev); 2113 radeon_bo_fini(rdev);
2095 radeon_atombios_fini(rdev); 2114 radeon_atombios_fini(rdev);
2096 kfree(rdev->bios); 2115 kfree(rdev->bios);
@@ -2196,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2196 rb_bufsz = drm_order(ring_size / 4); 2215 rb_bufsz = drm_order(ring_size / 4);
2197 ring_size = (1 << rb_bufsz) * 4; 2216 ring_size = (1 << rb_bufsz) * 4;
2198 rdev->ih.ring_size = ring_size; 2217 rdev->ih.ring_size = ring_size;
2199 rdev->ih.align_mask = 4 - 1; 2218 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2219 rdev->ih.rptr = 0;
2200} 2220}
2201 2221
2202static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) 2222static int r600_ih_ring_alloc(struct radeon_device *rdev)
2203{ 2223{
2204 int r; 2224 int r;
2205 2225
2206 rdev->ih.ring_size = ring_size;
2207 /* Allocate ring buffer */ 2226 /* Allocate ring buffer */
2208 if (rdev->ih.ring_obj == NULL) { 2227 if (rdev->ih.ring_obj == NULL) {
2209 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2228 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2233,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2233 return r; 2252 return r;
2234 } 2253 }
2235 } 2254 }
2236 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2237 rdev->ih.rptr = 0;
2238
2239 return 0; 2255 return 0;
2240} 2256}
2241 2257
@@ -2385,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
2385 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2401 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2386 2402
2387 /* allocate ring */ 2403 /* allocate ring */
2388 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); 2404 ret = r600_ih_ring_alloc(rdev);
2389 if (ret) 2405 if (ret)
2390 return ret; 2406 return ret;
2391 2407
@@ -2448,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
2448 return ret; 2464 return ret;
2449} 2465}
2450 2466
2451void r600_irq_fini(struct radeon_device *rdev) 2467void r600_irq_suspend(struct radeon_device *rdev)
2452{ 2468{
2453 r600_disable_interrupts(rdev); 2469 r600_disable_interrupts(rdev);
2454 r600_rlc_stop(rdev); 2470 r600_rlc_stop(rdev);
2471}
2472
2473void r600_irq_fini(struct radeon_device *rdev)
2474{
2475 r600_irq_suspend(rdev);
2455 r600_ih_ring_fini(rdev); 2476 r600_ih_ring_fini(rdev);
2456} 2477}
2457 2478
@@ -2461,9 +2482,17 @@ int r600_irq_set(struct radeon_device *rdev)
2461 u32 mode_int = 0; 2482 u32 mode_int = 0;
2462 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 2483 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2463 2484
2485 if (!rdev->irq.installed) {
2486 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2487 return -EINVAL;
2488 }
2464 /* don't enable anything if the ih is disabled */ 2489 /* don't enable anything if the ih is disabled */
2465 if (!rdev->ih.enabled) 2490 if (!rdev->ih.enabled) {
2491 r600_disable_interrupts(rdev);
2492 /* force the active interrupt state to all disabled */
2493 r600_disable_interrupt_state(rdev);
2466 return 0; 2494 return 0;
2495 }
2467 2496
2468 if (ASIC_IS_DCE3(rdev)) { 2497 if (ASIC_IS_DCE3(rdev)) {
2469 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2498 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2633,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2633 wptr = RREG32(IH_RB_WPTR); 2662 wptr = RREG32(IH_RB_WPTR);
2634 2663
2635 if (wptr & RB_OVERFLOW) { 2664 if (wptr & RB_OVERFLOW) {
2636 WARN_ON(1); 2665 /* When a ring buffer overflow happen start parsing interrupt
2637 /* XXX deal with overflow */ 2666 * from the last not overwritten vector (wptr + 16). Hopefully
2638 DRM_ERROR("IH RB overflow\n"); 2667 * this should allow us to catchup.
2668 */
2669 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2670 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2671 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2639 tmp = RREG32(IH_RB_CNTL); 2672 tmp = RREG32(IH_RB_CNTL);
2640 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2673 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2641 WREG32(IH_RB_CNTL, tmp); 2674 WREG32(IH_RB_CNTL, tmp);
2642 } 2675 }
2643 wptr = wptr & WPTR_OFFSET_MASK; 2676 return (wptr & rdev->ih.ptr_mask);
2644
2645 return wptr;
2646} 2677}
2647 2678
2648/* r600 IV Ring 2679/* r600 IV Ring
@@ -2678,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
2678 u32 wptr = r600_get_ih_wptr(rdev); 2709 u32 wptr = r600_get_ih_wptr(rdev);
2679 u32 rptr = rdev->ih.rptr; 2710 u32 rptr = rdev->ih.rptr;
2680 u32 src_id, src_data; 2711 u32 src_id, src_data;
2681 u32 last_entry = rdev->ih.ring_size - 16;
2682 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2712 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2683 unsigned long flags; 2713 unsigned long flags;
2684 bool queue_hotplug = false; 2714 bool queue_hotplug = false;
2685 2715
2686 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2716 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2717 if (!rdev->ih.enabled)
2718 return IRQ_NONE;
2687 2719
2688 spin_lock_irqsave(&rdev->ih.lock, flags); 2720 spin_lock_irqsave(&rdev->ih.lock, flags);
2689 2721
@@ -2724,7 +2756,7 @@ restart_ih:
2724 } 2756 }
2725 break; 2757 break;
2726 default: 2758 default:
2727 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2759 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2728 break; 2760 break;
2729 } 2761 }
2730 break; 2762 break;
@@ -2744,7 +2776,7 @@ restart_ih:
2744 } 2776 }
2745 break; 2777 break;
2746 default: 2778 default:
2747 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2779 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2748 break; 2780 break;
2749 } 2781 }
2750 break; 2782 break;
@@ -2793,7 +2825,7 @@ restart_ih:
2793 } 2825 }
2794 break; 2826 break;
2795 default: 2827 default:
2796 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2828 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2797 break; 2829 break;
2798 } 2830 }
2799 break; 2831 break;
@@ -2807,15 +2839,13 @@ restart_ih:
2807 DRM_DEBUG("IH: CP EOP\n"); 2839 DRM_DEBUG("IH: CP EOP\n");
2808 break; 2840 break;
2809 default: 2841 default:
2810 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 2842 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2811 break; 2843 break;
2812 } 2844 }
2813 2845
2814 /* wptr/rptr are in bytes! */ 2846 /* wptr/rptr are in bytes! */
2815 if (rptr == last_entry) 2847 rptr += 16;
2816 rptr = 0; 2848 rptr &= rdev->ih.ptr_mask;
2817 else
2818 rptr += 16;
2819 } 2849 }
2820 /* make sure wptr hasn't changed while processing */ 2850 /* make sure wptr hasn't changed while processing */
2821 wptr = r600_get_ih_wptr(rdev); 2851 wptr = r600_get_ih_wptr(rdev);
@@ -2883,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2883 return 0; 2913 return 0;
2884#endif 2914#endif
2885} 2915}
2916
2917/**
2918 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2919 * rdev: radeon device structure
2920 * bo: buffer object struct which userspace is waiting for idle
2921 *
2922 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2923 * through ring buffer, this leads to corruption in rendering, see
2924 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2925 * directly perform HDP flush by writing register through MMIO.
2926 */
2927void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2928{
2929 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2930}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aecafb51b66..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
449 u32 packet2s[16]; 449 u32 packet2s[16];
450 int num_packet2s = 0; 450 int num_packet2s = 0;
451 451
452 mutex_init(&rdev->r600_blit.mutex);
452 rdev->r600_blit.state_offset = 0; 453 rdev->r600_blit.state_offset = 0;
453 454
454 if (rdev->family >= CHIP_RV770) 455 if (rdev->family >= CHIP_RV770)
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev)
512{ 513{
513 int r; 514 int r;
514 515
516 if (rdev->r600_blit.shader_obj == NULL)
517 return;
518 /* If we can't reserve the bo, unref should be enough to destroy
519 * it when it becomes idle.
520 */
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 521 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) { 522 if (!r) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); 523 radeon_bo_unpin(rdev->r600_blit.shader_obj);
518 goto out_unref; 524 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
519 } 525 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj); 526 radeon_bo_unref(&rdev->r600_blit.shader_obj);
524} 527}
525 528
@@ -540,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
540void r600_vb_ib_put(struct radeon_device *rdev) 543void r600_vb_ib_put(struct radeon_device *rdev)
541{ 544{
542 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
543 mutex_lock(&rdev->ib_pool.mutex);
544 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
545 mutex_unlock(&rdev->ib_pool.mutex);
546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
547} 547}
548 548
@@ -555,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
555 int dwords_per_loop = 76, num_loops; 555 int dwords_per_loop = 76, num_loops;
556 556
557 r = r600_vb_ib_get(rdev); 557 r = r600_vb_ib_get(rdev);
558 WARN_ON(r); 558 if (r)
559 return r;
559 560
560 /* set_render_target emits 2 extra dwords on rv6xx */ 561 /* set_render_target emits 2 extra dwords on rv6xx */
561 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) 562 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -577,11 +578,12 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
577 ring_size = num_loops * dwords_per_loop; 578 ring_size = num_loops * dwords_per_loop;
578 /* set default + shaders */ 579 /* set default + shaders */
579 ring_size += 40; /* shaders + def state */ 580 ring_size += 40; /* shaders + def state */
580 ring_size += 5; /* fence emit for VB IB */ 581 ring_size += 7; /* fence emit for VB IB */
581 ring_size += 5; /* done copy */ 582 ring_size += 5; /* done copy */
582 ring_size += 5; /* fence emit for done copy */ 583 ring_size += 7; /* fence emit for done copy */
583 r = radeon_ring_lock(rdev, ring_size); 584 r = radeon_ring_lock(rdev, ring_size);
584 WARN_ON(r); 585 if (r)
586 return r;
585 587
586 set_default_state(rdev); /* 14 */ 588 set_default_state(rdev); /* 14 */
587 set_shaders(rdev); /* 26 */ 589 set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e91..75bcf35a0931 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1428
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1429 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1430
1431 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1432 dev_priv->r600_max_backends, 1432 backend_map = 0x28;
1433 (0xff << dev_priv->r600_max_backends) & 0xff); 1433 else
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1434 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1435 1438
1436 cc_gc_shader_pipe_config = 1439 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 44060b92d9e6..e4c45ec16507 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
38 38
39struct r600_cs_track {
40 u32 cb_color0_base_last;
41};
42
39/** 43/**
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context. 45 * @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
177} 181}
178 182
179/** 183/**
184 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
185 * @parser: parser structure holding parsing context.
186 *
187 * Check next packet is relocation packet3, do bo validation and compute
188 * GPU offset using the provided start.
189 **/
190static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
191{
192 struct radeon_cs_packet p3reloc;
193 int r;
194
195 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
196 if (r) {
197 return 0;
198 }
199 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
200 return 0;
201 }
202 return 1;
203}
204
205/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet 206 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context. 207 * @parser: parser structure holding parsing context.
182 * 208 *
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
337 struct radeon_cs_packet *pkt) 363 struct radeon_cs_packet *pkt)
338{ 364{
339 struct radeon_cs_reloc *reloc; 365 struct radeon_cs_reloc *reloc;
366 struct r600_cs_track *track;
340 volatile u32 *ib; 367 volatile u32 *ib;
341 unsigned idx; 368 unsigned idx;
342 unsigned i; 369 unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
344 int r; 371 int r;
345 u32 idx_value; 372 u32 idx_value;
346 373
374 track = (struct r600_cs_track *)p->track;
347 ib = p->ib->ptr; 375 ib = p->ib->ptr;
348 idx = pkt->idx + 1; 376 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx); 377 idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
503 for (i = 0; i < pkt->count; i++) { 531 for (i = 0; i < pkt->count; i++) {
504 reg = start_reg + (4 * i); 532 reg = start_reg + (4 * i);
505 switch (reg) { 533 switch (reg) {
534 /* This register were added late, there is userspace
535 * which does provide relocation for those but set
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
506 case DB_DEPTH_BASE: 576 case DB_DEPTH_BASE:
507 case DB_HTILE_DATA_BASE: 577 case DB_HTILE_DATA_BASE:
508 case CB_COLOR0_BASE: 578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
509 case CB_COLOR1_BASE: 588 case CB_COLOR1_BASE:
510 case CB_COLOR2_BASE: 589 case CB_COLOR2_BASE:
511 case CB_COLOR3_BASE: 590 case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
678int r600_cs_parse(struct radeon_cs_parser *p) 757int r600_cs_parse(struct radeon_cs_parser *p)
679{ 758{
680 struct radeon_cs_packet pkt; 759 struct radeon_cs_packet pkt;
760 struct r600_cs_track *track;
681 int r; 761 int r;
682 762
763 track = kzalloc(sizeof(*track), GFP_KERNEL);
764 p->track = track;
683 do { 765 do {
684 r = r600_cs_packet_parse(p, &pkt, p->idx); 766 r = r600_cs_packet_parse(p, &pkt, p->idx);
685 if (r) { 767 if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
757 /* initialize parser */ 839 /* initialize parser */
758 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 840 memset(&parser, 0, sizeof(struct radeon_cs_parser));
759 parser.filp = filp; 841 parser.filp = filp;
842 parser.dev = &dev->pdev->dev;
760 parser.rdev = NULL; 843 parser.rdev = NULL;
761 parser.family = family; 844 parser.family = family;
762 parser.ib = &fake_ib; 845 parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab4..30480881aed1 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
889#define C_0280E0_BASE_256B 0x00000000
890#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
891#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
892#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
893#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
894#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
895#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
896#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
897#define R_0280C0_CB_COLOR0_TILE 0x0280C0
898#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
899#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
900#define C_0280C0_BASE_256B 0x00000000
901#define R_0280C4_CB_COLOR1_TILE 0x0280C4
902#define R_0280C8_CB_COLOR2_TILE 0x0280C8
903#define R_0280CC_CB_COLOR3_TILE 0x0280CC
904#define R_0280D0_CB_COLOR4_TILE 0x0280D0
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908
909
885#endif 910#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 53b55608102b..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
96 * symbol; 96 * symbol;
97 */ 97 */
98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
99/* RADEON_IB_POOL_SIZE must be a power of 2 */
99#define RADEON_IB_POOL_SIZE 16 100#define RADEON_IB_POOL_SIZE 16
100#define RADEON_DEBUGFS_MAX_NUM_FILES 32 101#define RADEON_DEBUGFS_MAX_NUM_FILES 32
101#define RADEONFB_CONN_LIMIT 4 102#define RADEONFB_CONN_LIMIT 4
@@ -319,10 +320,12 @@ struct radeon_mc {
319 u64 real_vram_size; 320 u64 real_vram_size;
320 int vram_mtrr; 321 int vram_mtrr;
321 bool vram_is_ddr; 322 bool vram_is_ddr;
323 bool igp_sideport_enabled;
322}; 324};
323 325
324int radeon_mc_setup(struct radeon_device *rdev); 326int radeon_mc_setup(struct radeon_device *rdev);
325 327bool radeon_combios_sideport_present(struct radeon_device *rdev);
328bool radeon_atombios_sideport_present(struct radeon_device *rdev);
326 329
327/* 330/*
328 * GPU scratch registers structures, functions & helpers 331 * GPU scratch registers structures, functions & helpers
@@ -361,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
361 */ 364 */
362struct radeon_ib { 365struct radeon_ib {
363 struct list_head list; 366 struct list_head list;
364 unsigned long idx; 367 unsigned idx;
365 uint64_t gpu_addr; 368 uint64_t gpu_addr;
366 struct radeon_fence *fence; 369 struct radeon_fence *fence;
367 uint32_t *ptr; 370 uint32_t *ptr;
368 uint32_t length_dw; 371 uint32_t length_dw;
372 bool free;
369}; 373};
370 374
371/* 375/*
@@ -375,10 +379,9 @@ struct radeon_ib {
375struct radeon_ib_pool { 379struct radeon_ib_pool {
376 struct mutex mutex; 380 struct mutex mutex;
377 struct radeon_bo *robj; 381 struct radeon_bo *robj;
378 struct list_head scheduled_ibs;
379 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
380 bool ready; 383 bool ready;
381 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 unsigned head_id;
382}; 385};
383 386
384struct radeon_cp { 387struct radeon_cp {
@@ -408,13 +411,13 @@ struct r600_ih {
408 unsigned wptr_old; 411 unsigned wptr_old;
409 unsigned ring_size; 412 unsigned ring_size;
410 uint64_t gpu_addr; 413 uint64_t gpu_addr;
411 uint32_t align_mask;
412 uint32_t ptr_mask; 414 uint32_t ptr_mask;
413 spinlock_t lock; 415 spinlock_t lock;
414 bool enabled; 416 bool enabled;
415}; 417};
416 418
417struct r600_blit { 419struct r600_blit {
420 struct mutex mutex;
418 struct radeon_bo *shader_obj; 421 struct radeon_bo *shader_obj;
419 u64 shader_gpu_addr; 422 u64 shader_gpu_addr;
420 u32 vs_offset, ps_offset; 423 u32 vs_offset, ps_offset;
@@ -463,6 +466,7 @@ struct radeon_cs_chunk {
463}; 466};
464 467
465struct radeon_cs_parser { 468struct radeon_cs_parser {
469 struct device *dev;
466 struct radeon_device *rdev; 470 struct radeon_device *rdev;
467 struct drm_file *filp; 471 struct drm_file *filp;
468 /* chunks */ 472 /* chunks */
@@ -654,11 +658,17 @@ struct radeon_asic {
654 uint32_t offset, uint32_t obj_size); 658 uint32_t offset, uint32_t obj_size);
655 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 659 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
656 void (*bandwidth_update)(struct radeon_device *rdev); 660 void (*bandwidth_update)(struct radeon_device *rdev);
657 void (*hdp_flush)(struct radeon_device *rdev);
658 void (*hpd_init)(struct radeon_device *rdev); 661 void (*hpd_init)(struct radeon_device *rdev);
659 void (*hpd_fini)(struct radeon_device *rdev); 662 void (*hpd_fini)(struct radeon_device *rdev);
660 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
661 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 664 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
665 /* ioctl hw specific callback. Some hw might want to perform special
666 * operation on specific ioctl. For instance on wait idle some hw
667 * might want to perform and HDP flush through MMIO as it seems that
668 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
669 * through ring.
670 */
671 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
662}; 672};
663 673
664/* 674/*
@@ -667,11 +677,14 @@ struct radeon_asic {
667struct r100_asic { 677struct r100_asic {
668 const unsigned *reg_safe_bm; 678 const unsigned *reg_safe_bm;
669 unsigned reg_safe_bm_size; 679 unsigned reg_safe_bm_size;
680 u32 hdp_cntl;
670}; 681};
671 682
672struct r300_asic { 683struct r300_asic {
673 const unsigned *reg_safe_bm; 684 const unsigned *reg_safe_bm;
674 unsigned reg_safe_bm_size; 685 unsigned reg_safe_bm_size;
686 u32 resync_scratch;
687 u32 hdp_cntl;
675}; 688};
676 689
677struct r600_asic { 690struct r600_asic {
@@ -843,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
843 856
844static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 857static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
845{ 858{
846 if (reg < 0x10000) 859 if (reg < rdev->rmmio_size)
847 return readl(((void __iomem *)rdev->rmmio) + reg); 860 return readl(((void __iomem *)rdev->rmmio) + reg);
848 else { 861 else {
849 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 862 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -853,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
853 866
854static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 867static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
855{ 868{
856 if (reg < 0x10000) 869 if (reg < rdev->rmmio_size)
857 writel(v, ((void __iomem *)rdev->rmmio) + reg); 870 writel(v, ((void __iomem *)rdev->rmmio) + reg);
858 else { 871 else {
859 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 872 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -1007,13 +1020,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1007#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1020#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
1008#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 1021#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
1009#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 1022#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
1010#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
1011#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) 1023#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
1012#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) 1024#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1013#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) 1025#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
1014#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1026#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1015 1027
1016/* Common functions */ 1028/* Common functions */
1029/* AGP */
1030extern void radeon_agp_disable(struct radeon_device *rdev);
1017extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1031extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1018extern int radeon_modeset_init(struct radeon_device *rdev); 1032extern int radeon_modeset_init(struct radeon_device *rdev);
1019extern void radeon_modeset_fini(struct radeon_device *rdev); 1033extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1137,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1137extern void r600_cp_stop(struct radeon_device *rdev); 1151extern void r600_cp_stop(struct radeon_device *rdev);
1138extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1139extern int r600_cp_resume(struct radeon_device *rdev); 1153extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev);
1140extern int r600_count_pipe_bits(uint32_t val); 1155extern int r600_count_pipe_bits(uint32_t val);
1141extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1142extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1157,7 +1172,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
1157extern void r600_irq_fini(struct radeon_device *rdev); 1172extern void r600_irq_fini(struct radeon_device *rdev);
1158extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1173extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1159extern int r600_irq_set(struct radeon_device *rdev); 1174extern int r600_irq_set(struct radeon_device *rdev);
1160 1175extern void r600_irq_suspend(struct radeon_device *rdev);
1176/* r600 audio */
1161extern int r600_audio_init(struct radeon_device *rdev); 1177extern int r600_audio_init(struct radeon_device *rdev);
1162extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1178extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1163extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1179extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 54bf49a6d676..c0681a5556dc 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
144 144
145 ret = drm_agp_info(rdev->ddev, &info); 145 ret = drm_agp_info(rdev->ddev, &info);
146 if (ret) { 146 if (ret) {
147 drm_agp_release(rdev->ddev);
147 DRM_ERROR("Unable to get AGP info: %d\n", ret); 148 DRM_ERROR("Unable to get AGP info: %d\n", ret);
148 return ret; 149 return ret;
149 } 150 }
151
152 if (rdev->ddev->agp->agp_info.aper_size < 32) {
153 drm_agp_release(rdev->ddev);
154 dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
155 "need at least 32M, disabling AGP\n",
156 rdev->ddev->agp->agp_info.aper_size);
157 return -EINVAL;
158 }
159
150 mode.mode = info.mode; 160 mode.mode = info.mode;
151 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; 161 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
152 is_v3 = !!(agp_status & RADEON_AGPv3_MODE); 162 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
221 ret = drm_agp_enable(rdev->ddev, mode); 231 ret = drm_agp_enable(rdev->ddev, mode);
222 if (ret) { 232 if (ret) {
223 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); 233 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
234 drm_agp_release(rdev->ddev);
224 return ret; 235 return ret;
225 } 236 }
226 237
@@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
252void radeon_agp_fini(struct radeon_device *rdev) 263void radeon_agp_fini(struct radeon_device *rdev)
253{ 264{
254#if __OS_HAS_AGP 265#if __OS_HAS_AGP
255 if (rdev->flags & RADEON_IS_AGP) { 266 if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
256 if (rdev->ddev->agp && rdev->ddev->agp->acquired) { 267 drm_agp_release(rdev->ddev);
257 drm_agp_release(rdev->ddev);
258 }
259 } 268 }
260#endif 269#endif
261} 270}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index eb29217bbf1d..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -77,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
77void r100_bandwidth_update(struct radeon_device *rdev); 77void r100_bandwidth_update(struct radeon_device *rdev);
78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
79int r100_ring_test(struct radeon_device *rdev); 79int r100_ring_test(struct radeon_device *rdev);
80void r100_hdp_flush(struct radeon_device *rdev);
81void r100_hpd_init(struct radeon_device *rdev); 80void r100_hpd_init(struct radeon_device *rdev);
82void r100_hpd_fini(struct radeon_device *rdev); 81void r100_hpd_fini(struct radeon_device *rdev);
83bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -114,11 +113,11 @@ static struct radeon_asic r100_asic = {
114 .set_surface_reg = r100_set_surface_reg, 113 .set_surface_reg = r100_set_surface_reg,
115 .clear_surface_reg = r100_clear_surface_reg, 114 .clear_surface_reg = r100_clear_surface_reg,
116 .bandwidth_update = &r100_bandwidth_update, 115 .bandwidth_update = &r100_bandwidth_update,
117 .hdp_flush = &r100_hdp_flush,
118 .hpd_init = &r100_hpd_init, 116 .hpd_init = &r100_hpd_init,
119 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
120 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
121 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
122}; 121};
123 122
124 123
@@ -174,11 +173,11 @@ static struct radeon_asic r300_asic = {
174 .set_surface_reg = r100_set_surface_reg, 173 .set_surface_reg = r100_set_surface_reg,
175 .clear_surface_reg = r100_clear_surface_reg, 174 .clear_surface_reg = r100_clear_surface_reg,
176 .bandwidth_update = &r100_bandwidth_update, 175 .bandwidth_update = &r100_bandwidth_update,
177 .hdp_flush = &r100_hdp_flush,
178 .hpd_init = &r100_hpd_init, 176 .hpd_init = &r100_hpd_init,
179 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
180 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
181 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
182}; 181};
183 182
184/* 183/*
@@ -218,11 +217,11 @@ static struct radeon_asic r420_asic = {
218 .set_surface_reg = r100_set_surface_reg, 217 .set_surface_reg = r100_set_surface_reg,
219 .clear_surface_reg = r100_clear_surface_reg, 218 .clear_surface_reg = r100_clear_surface_reg,
220 .bandwidth_update = &r100_bandwidth_update, 219 .bandwidth_update = &r100_bandwidth_update,
221 .hdp_flush = &r100_hdp_flush,
222 .hpd_init = &r100_hpd_init, 220 .hpd_init = &r100_hpd_init,
223 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
224 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
225 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
226}; 225};
227 226
228 227
@@ -267,11 +266,11 @@ static struct radeon_asic rs400_asic = {
267 .set_surface_reg = r100_set_surface_reg, 266 .set_surface_reg = r100_set_surface_reg,
268 .clear_surface_reg = r100_clear_surface_reg, 267 .clear_surface_reg = r100_clear_surface_reg,
269 .bandwidth_update = &r100_bandwidth_update, 268 .bandwidth_update = &r100_bandwidth_update,
270 .hdp_flush = &r100_hdp_flush,
271 .hpd_init = &r100_hpd_init, 269 .hpd_init = &r100_hpd_init,
272 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
273 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
274 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
275}; 274};
276 275
277 276
@@ -324,11 +323,11 @@ static struct radeon_asic rs600_asic = {
324 .set_pcie_lanes = NULL, 323 .set_pcie_lanes = NULL,
325 .set_clock_gating = &radeon_atom_set_clock_gating, 324 .set_clock_gating = &radeon_atom_set_clock_gating,
326 .bandwidth_update = &rs600_bandwidth_update, 325 .bandwidth_update = &rs600_bandwidth_update,
327 .hdp_flush = &r100_hdp_flush,
328 .hpd_init = &rs600_hpd_init, 326 .hpd_init = &rs600_hpd_init,
329 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
330 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
331 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
332}; 331};
333 332
334 333
@@ -372,11 +371,11 @@ static struct radeon_asic rs690_asic = {
372 .set_surface_reg = r100_set_surface_reg, 371 .set_surface_reg = r100_set_surface_reg,
373 .clear_surface_reg = r100_clear_surface_reg, 372 .clear_surface_reg = r100_clear_surface_reg,
374 .bandwidth_update = &rs690_bandwidth_update, 373 .bandwidth_update = &rs690_bandwidth_update,
375 .hdp_flush = &r100_hdp_flush,
376 .hpd_init = &rs600_hpd_init, 374 .hpd_init = &rs600_hpd_init,
377 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
378 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
379 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
380}; 379};
381 380
382 381
@@ -424,11 +423,11 @@ static struct radeon_asic rv515_asic = {
424 .set_surface_reg = r100_set_surface_reg, 423 .set_surface_reg = r100_set_surface_reg,
425 .clear_surface_reg = r100_clear_surface_reg, 424 .clear_surface_reg = r100_clear_surface_reg,
426 .bandwidth_update = &rv515_bandwidth_update, 425 .bandwidth_update = &rv515_bandwidth_update,
427 .hdp_flush = &r100_hdp_flush,
428 .hpd_init = &rs600_hpd_init, 426 .hpd_init = &rs600_hpd_init,
429 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
430 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
431 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
432}; 431};
433 432
434 433
@@ -467,11 +466,11 @@ static struct radeon_asic r520_asic = {
467 .set_surface_reg = r100_set_surface_reg, 466 .set_surface_reg = r100_set_surface_reg,
468 .clear_surface_reg = r100_clear_surface_reg, 467 .clear_surface_reg = r100_clear_surface_reg,
469 .bandwidth_update = &rv515_bandwidth_update, 468 .bandwidth_update = &rv515_bandwidth_update,
470 .hdp_flush = &r100_hdp_flush,
471 .hpd_init = &rs600_hpd_init, 469 .hpd_init = &rs600_hpd_init,
472 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
473 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
474 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
475}; 474};
476 475
477/* 476/*
@@ -508,12 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
508int r600_copy_blit(struct radeon_device *rdev, 507int r600_copy_blit(struct radeon_device *rdev,
509 uint64_t src_offset, uint64_t dst_offset, 508 uint64_t src_offset, uint64_t dst_offset,
510 unsigned num_pages, struct radeon_fence *fence); 509 unsigned num_pages, struct radeon_fence *fence);
511void r600_hdp_flush(struct radeon_device *rdev);
512void r600_hpd_init(struct radeon_device *rdev); 510void r600_hpd_init(struct radeon_device *rdev);
513void r600_hpd_fini(struct radeon_device *rdev); 511void r600_hpd_fini(struct radeon_device *rdev);
514bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
515void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
516 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
517 516
518static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
519 .init = &r600_init, 518 .init = &r600_init,
@@ -544,11 +543,11 @@ static struct radeon_asic r600_asic = {
544 .set_surface_reg = r600_set_surface_reg, 543 .set_surface_reg = r600_set_surface_reg,
545 .clear_surface_reg = r600_clear_surface_reg, 544 .clear_surface_reg = r600_clear_surface_reg,
546 .bandwidth_update = &rv515_bandwidth_update, 545 .bandwidth_update = &rv515_bandwidth_update,
547 .hdp_flush = &r600_hdp_flush,
548 .hpd_init = &r600_hpd_init, 546 .hpd_init = &r600_hpd_init,
549 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
550 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
551 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
552}; 551};
553 552
554/* 553/*
@@ -589,11 +588,11 @@ static struct radeon_asic rv770_asic = {
589 .set_surface_reg = r600_set_surface_reg, 588 .set_surface_reg = r600_set_surface_reg,
590 .clear_surface_reg = r600_clear_surface_reg, 589 .clear_surface_reg = r600_clear_surface_reg,
591 .bandwidth_update = &rv515_bandwidth_update, 590 .bandwidth_update = &rv515_bandwidth_update,
592 .hdp_flush = &r600_hdp_flush,
593 .hpd_init = &r600_hpd_init, 591 .hpd_init = &r600_hpd_init,
594 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
595 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
596 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
597}; 596};
598 597
599#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 321044bef71c..4d8831548a5f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
114 i2c.i2c_id = gpio->sucI2cId.ucAccess; 114 i2c.i2c_id = gpio->sucI2cId.ucAccess;
115 115
116 i2c.valid = true; 116 i2c.valid = true;
117 break;
117 } 118 }
118 } 119 }
119 120
@@ -205,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
205 *connector_type = DRM_MODE_CONNECTOR_DVID; 206 *connector_type = DRM_MODE_CONNECTOR_DVID;
206 } 207 }
207 208
209 /* Asrock RS600 board lists the DVI port as HDMI */
210 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x1849) &&
212 (dev->pdev->subsystem_device == 0x7941)) {
213 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
214 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
215 *connector_type = DRM_MODE_CONNECTOR_DVID;
216 }
217
208 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 218 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
209 if ((dev->pdev->device == 0x7941) && 219 if ((dev->pdev->device == 0x7941) &&
210 (dev->pdev->subsystem_vendor == 0x147b) && 220 (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -286,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
286 *connector_type = DRM_MODE_CONNECTOR_DVID; 296 *connector_type = DRM_MODE_CONNECTOR_DVID;
287 } 297 }
288 298
299 /* XFX Pine Group device rv730 reports no VGA DDC lines
300 * even though they are wired up to record 0x93
301 */
302 if ((dev->pdev->device == 0x9498) &&
303 (dev->pdev->subsystem_vendor == 0x1682) &&
304 (dev->pdev->subsystem_device == 0x2452)) {
305 struct radeon_device *rdev = dev->dev_private;
306 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
307 }
289 return true; 308 return true;
290} 309}
291 310
@@ -345,7 +364,9 @@ const int object_connector_convert[] = {
345 DRM_MODE_CONNECTOR_Unknown, 364 DRM_MODE_CONNECTOR_Unknown,
346 DRM_MODE_CONNECTOR_Unknown, 365 DRM_MODE_CONNECTOR_Unknown,
347 DRM_MODE_CONNECTOR_Unknown, 366 DRM_MODE_CONNECTOR_Unknown,
348 DRM_MODE_CONNECTOR_DisplayPort 367 DRM_MODE_CONNECTOR_DisplayPort,
368 DRM_MODE_CONNECTOR_eDP,
369 DRM_MODE_CONNECTOR_Unknown
349}; 370};
350 371
351bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) 372bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -935,6 +956,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
935 return false; 956 return false;
936} 957}
937 958
959union igp_info {
960 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
961 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
962};
963
964bool radeon_atombios_sideport_present(struct radeon_device *rdev)
965{
966 struct radeon_mode_info *mode_info = &rdev->mode_info;
967 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
968 union igp_info *igp_info;
969 u8 frev, crev;
970 u16 data_offset;
971
972 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
973 &crev, &data_offset);
974
975 igp_info = (union igp_info *)(mode_info->atom_context->bios +
976 data_offset);
977
978 if (igp_info) {
979 switch (crev) {
980 case 1:
981 if (igp_info->info.ucMemoryType & 0xf0)
982 return true;
983 break;
984 case 2:
985 if (igp_info->info_2.ucMemoryType & 0x0f)
986 return true;
987 break;
988 default:
989 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
990 break;
991 }
992 }
993 return false;
994}
995
938bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, 996bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
939 struct radeon_encoder_int_tmds *tmds) 997 struct radeon_encoder_int_tmds *tmds)
940{ 998{
@@ -1026,6 +1084,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1026 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1084 ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
1027 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1085 ss->range = ss_info->asSS_Info[i].ucSS_Range;
1028 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1086 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
1087 break;
1029 } 1088 }
1030 } 1089 }
1031 } 1090 }
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 812f24dbc2a8..73c4405bf42f 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
56 else if (post_div == 3) 56 else if (post_div == 3)
57 sclk >>= 2; 57 sclk >>= 2;
58 else if (post_div == 4) 58 else if (post_div == 4)
59 sclk >>= 4; 59 sclk >>= 3;
60 60
61 return sclk; 61 return sclk;
62} 62}
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
86 else if (post_div == 3) 86 else if (post_div == 3)
87 mclk >>= 2; 87 mclk >>= 2;
88 else if (post_div == 4) 88 else if (post_div == 4)
89 mclk >>= 4; 89 mclk >>= 3;
90 90
91 return mclk; 91 return mclk;
92} 92}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index fd94dbca33ac..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
595 return false; 595 return false;
596} 596}
597 597
598bool radeon_combios_sideport_present(struct radeon_device *rdev)
599{
600 struct drm_device *dev = rdev->ddev;
601 u16 igp_info;
602
603 igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
604
605 if (igp_info) {
606 if (RBIOS16(igp_info + 0x4))
607 return true;
608 }
609 return false;
610}
611
612static const uint32_t default_primarydac_adj[CHIP_LAST] = {
613 0x00000808, /* r100 */
614 0x00000808, /* rv100 */
615 0x00000808, /* rs100 */
616 0x00000808, /* rv200 */
617 0x00000808, /* rs200 */
618 0x00000808, /* r200 */
619 0x00000808, /* rv250 */
620 0x00000000, /* rs300 */
621 0x00000808, /* rv280 */
622 0x00000808, /* r300 */
623 0x00000808, /* r350 */
624 0x00000808, /* rv350 */
625 0x00000808, /* rv380 */
626 0x00000808, /* r420 */
627 0x00000808, /* r423 */
628 0x00000808, /* rv410 */
629 0x00000000, /* rs400 */
630 0x00000000, /* rs480 */
631};
632
633static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
634 struct radeon_encoder_primary_dac *p_dac)
635{
636 p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
637 return;
638}
639
598struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct 640struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
599 radeon_encoder 641 radeon_encoder
600 *encoder) 642 *encoder)
@@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
604 uint16_t dac_info; 646 uint16_t dac_info;
605 uint8_t rev, bg, dac; 647 uint8_t rev, bg, dac;
606 struct radeon_encoder_primary_dac *p_dac = NULL; 648 struct radeon_encoder_primary_dac *p_dac = NULL;
649 int found = 0;
607 650
608 if (rdev->bios == NULL) 651 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
652 GFP_KERNEL);
653
654 if (!p_dac)
609 return NULL; 655 return NULL;
610 656
657 if (rdev->bios == NULL)
658 goto out;
659
611 /* check CRT table */ 660 /* check CRT table */
612 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 661 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
613 if (dac_info) { 662 if (dac_info) {
614 p_dac =
615 kzalloc(sizeof(struct radeon_encoder_primary_dac),
616 GFP_KERNEL);
617
618 if (!p_dac)
619 return NULL;
620
621 rev = RBIOS8(dac_info) & 0x3; 663 rev = RBIOS8(dac_info) & 0x3;
622 if (rev < 2) { 664 if (rev < 2) {
623 bg = RBIOS8(dac_info + 0x2) & 0xf; 665 bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,9 +670,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
628 dac = RBIOS8(dac_info + 0x3) & 0xf; 670 dac = RBIOS8(dac_info + 0x3) & 0xf;
629 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 671 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
630 } 672 }
631 673 found = 1;
632 } 674 }
633 675
676out:
677 if (!found) /* fallback to defaults */
678 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
679
634 return p_dac; 680 return p_dac;
635} 681}
636 682
@@ -641,6 +687,9 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
641 uint16_t tv_info; 687 uint16_t tv_info;
642 enum radeon_tv_std tv_std = TV_STD_NTSC; 688 enum radeon_tv_std tv_std = TV_STD_NTSC;
643 689
690 if (rdev->bios == NULL)
691 return tv_std;
692
644 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 693 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
645 if (tv_info) { 694 if (tv_info) {
646 if (RBIOS8(tv_info + 6) == 'T') { 695 if (RBIOS8(tv_info + 6) == 'T') {
@@ -922,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
922 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
923 972
924 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
925 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
926 lvds->panel_vcc_delay = 2000;
927 975
928 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
929 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20161567dbff..65f81942f399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
49 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 49 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
50 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 50 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
51 51
52 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 52 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
53 if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 53 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
54 if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
55 (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
54 if (radeon_dp_needs_link_train(radeon_connector)) { 56 if (radeon_dp_needs_link_train(radeon_connector)) {
55 if (connector->encoder) 57 if (connector->encoder)
56 dp_link_train(connector->encoder, connector); 58 dp_link_train(connector->encoder, connector);
@@ -578,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
578 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
579 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
580 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
581 bool dret; 583 bool dret = false;
582 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
583 585
584 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
585 if (!encoder) 587 if (!encoder)
586 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
587 589
588 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
589 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
591 if (dret) { 595 if (dret) {
592 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
593 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -615,7 +619,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
615 ret = connector_status_connected; 619 ret = connector_status_connected;
616 } 620 }
617 } else { 621 } else {
618 if (radeon_connector->dac_load_detect) { 622 if (radeon_connector->dac_load_detect && encoder) {
619 encoder_funcs = encoder->helper_private; 623 encoder_funcs = encoder->helper_private;
620 ret = encoder_funcs->detect(encoder, connector); 624 ret = encoder_funcs->detect(encoder, connector);
621 } 625 }
@@ -738,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
738 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
739 int i; 743 int i;
740 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
741 bool dret; 745 bool dret = false;
742 746
743 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
744 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
746 if (dret) { 752 if (dret) {
747 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
748 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
@@ -774,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
774 * connected and the DVI port disconnected. If the edid doesn't 780 * connected and the DVI port disconnected. If the edid doesn't
775 * say HDMI, vice versa. 781 * say HDMI, vice versa.
776 */ 782 */
777 if (radeon_connector->shared_ddc && connector_status_connected) { 783 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
778 struct drm_device *dev = connector->dev; 784 struct drm_device *dev = connector->dev;
779 struct drm_connector *list_connector; 785 struct drm_connector *list_connector;
780 struct radeon_connector *list_radeon_connector; 786 struct radeon_connector *list_radeon_connector;
@@ -898,10 +904,18 @@ static void radeon_dvi_force(struct drm_connector *connector)
898static int radeon_dvi_mode_valid(struct drm_connector *connector, 904static int radeon_dvi_mode_valid(struct drm_connector *connector,
899 struct drm_display_mode *mode) 905 struct drm_display_mode *mode)
900{ 906{
907 struct drm_device *dev = connector->dev;
908 struct radeon_device *rdev = dev->dev_private;
901 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 909 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
902 910
903 /* XXX check mode bandwidth */ 911 /* XXX check mode bandwidth */
904 912
913 /* clocks over 135 MHz have heat issues with DVI on RV100 */
914 if (radeon_connector->use_digital &&
915 (rdev->family == CHIP_RV100) &&
916 (mode->clock > 135000))
917 return MODE_CLOCK_HIGH;
918
905 if (radeon_connector->use_digital && (mode->clock > 165000)) { 919 if (radeon_connector->use_digital && (mode->clock > 165000)) {
906 if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || 920 if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
907 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || 921 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
@@ -967,7 +981,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
967 } 981 }
968 982
969 sink_type = radeon_dp_getsinktype(radeon_connector); 983 sink_type = radeon_dp_getsinktype(radeon_connector);
970 if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 984 if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
985 (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
971 if (radeon_dp_getdpcd(radeon_connector)) { 986 if (radeon_dp_getdpcd(radeon_connector)) {
972 radeon_dig_connector->dp_sink_type = sink_type; 987 radeon_dig_connector->dp_sink_type = sink_type;
973 ret = connector_status_connected; 988 ret = connector_status_connected;
@@ -992,7 +1007,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
992 1007
993 /* XXX check mode bandwidth */ 1008 /* XXX check mode bandwidth */
994 1009
995 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 1010 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1011 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
996 return radeon_dp_mode_valid_helper(radeon_connector, mode); 1012 return radeon_dp_mode_valid_helper(radeon_connector, mode);
997 else 1013 else
998 return MODE_OK; 1014 return MODE_OK;
@@ -1044,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1044 return; 1060 return;
1045 } 1061 }
1046 if (radeon_connector->ddc_bus && i2c_bus->valid) { 1062 if (radeon_connector->ddc_bus && i2c_bus->valid) {
1047 if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, 1063 if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
1048 sizeof(struct radeon_i2c_bus_rec)) == 0) {
1049 radeon_connector->shared_ddc = true; 1064 radeon_connector->shared_ddc = true;
1050 shared_ddc = true; 1065 shared_ddc = true;
1051 } 1066 }
@@ -1145,6 +1160,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1145 subpixel_order = SubPixelHorizontalRGB; 1160 subpixel_order = SubPixelHorizontalRGB;
1146 break; 1161 break;
1147 case DRM_MODE_CONNECTOR_DisplayPort: 1162 case DRM_MODE_CONNECTOR_DisplayPort:
1163 case DRM_MODE_CONNECTOR_eDP:
1148 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1164 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1149 if (!radeon_dig_connector) 1165 if (!radeon_dig_connector)
1150 goto failed; 1166 goto failed;
@@ -1157,10 +1173,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1157 goto failed; 1173 goto failed;
1158 if (i2c_bus->valid) { 1174 if (i2c_bus->valid) {
1159 /* add DP i2c bus */ 1175 /* add DP i2c bus */
1160 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1176 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1177 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1178 else
1179 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1161 if (!radeon_dig_connector->dp_i2c_bus) 1180 if (!radeon_dig_connector->dp_i2c_bus)
1162 goto failed; 1181 goto failed;
1163 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 1182 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1183 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
1184 else
1185 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
1164 if (!radeon_connector->ddc_bus) 1186 if (!radeon_connector->ddc_bus)
1165 goto failed; 1187 goto failed;
1166 } 1188 }
@@ -1324,7 +1346,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1324 radeon_connector->dac_load_detect = false; 1346 radeon_connector->dac_load_detect = false;
1325 drm_connector_attach_property(&radeon_connector->base, 1347 drm_connector_attach_property(&radeon_connector->base,
1326 rdev->mode_info.load_detect_property, 1348 rdev->mode_info.load_detect_property,
1327 1); 1349 radeon_connector->dac_load_detect);
1328 drm_connector_attach_property(&radeon_connector->base, 1350 drm_connector_attach_property(&radeon_connector->base,
1329 rdev->mode_info.tv_std_property, 1351 rdev->mode_info.tv_std_property,
1330 radeon_combios_get_tv_info(rdev)); 1352 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2ad2c1..06123ba31d31 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2145 &master_priv->sarea); 2145 &master_priv->sarea);
2146 if (ret) { 2146 if (ret) {
2147 DRM_ERROR("SAREA setup failed\n"); 2147 DRM_ERROR("SAREA setup failed\n");
2148 kfree(master_priv);
2148 return ret; 2149 return ret;
2149 } 2150 }
2150 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); 2151 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d93..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (!error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 parser->ib->fence);
195 } else {
196 radeon_bo_list_unreserve(&parser->validated);
197 } 194 }
195 radeon_bo_list_unreserve(&parser->validated);
198 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
199 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj) {
200 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
@@ -231,6 +229,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 229 memset(&parser, 0, sizeof(struct radeon_cs_parser));
232 parser.filp = filp; 230 parser.filp = filp;
233 parser.rdev = rdev; 231 parser.rdev = rdev;
232 parser.dev = rdev->dev;
234 r = radeon_cs_parser_init(&parser, data); 233 r = radeon_cs_parser_init(&parser, data);
235 if (r) { 234 if (r) {
236 DRM_ERROR("Failed to initialize parser !\n"); 235 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c6848096bcd..768b1509fa03 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
546 } 546 }
547 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
547} 548}
548 549
549void radeon_check_arguments(struct radeon_device *rdev) 550void radeon_check_arguments(struct radeon_device *rdev)
@@ -733,16 +734,18 @@ void radeon_device_fini(struct radeon_device *rdev)
733 */ 734 */
734int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 735int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
735{ 736{
736 struct radeon_device *rdev = dev->dev_private; 737 struct radeon_device *rdev;
737 struct drm_crtc *crtc; 738 struct drm_crtc *crtc;
738 int r; 739 int r;
739 740
740 if (dev == NULL || rdev == NULL) { 741 if (dev == NULL || dev->dev_private == NULL) {
741 return -ENODEV; 742 return -ENODEV;
742 } 743 }
743 if (state.event == PM_EVENT_PRETHAW) { 744 if (state.event == PM_EVENT_PRETHAW) {
744 return 0; 745 return 0;
745 } 746 }
747 rdev = dev->dev_private;
748
746 /* unpin the front buffers */ 749 /* unpin the front buffers */
747 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 750 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
748 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 751 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 91d72b70abc9..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
234 "INTERNAL_UNIPHY2", 234 "INTERNAL_UNIPHY2",
235}; 235};
236 236
237static const char *connector_names[13] = { 237static const char *connector_names[15] = {
238 "Unknown", 238 "Unknown",
239 "VGA", 239 "VGA",
240 "DVI-I", 240 "DVI-I",
@@ -248,6 +248,8 @@ static const char *connector_names[13] = {
248 "DisplayPort", 248 "DisplayPort",
249 "HDMI-A", 249 "HDMI-A",
250 "HDMI-B", 250 "HDMI-B",
251 "TV",
252 "eDP",
251}; 253};
252 254
253static const char *hpd_names[7] = { 255static const char *hpd_names[7] = {
@@ -276,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
276 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
277 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
278 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
279 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
280 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
281 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
282 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -286,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
286 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
287 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
288 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
289 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
290 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
291 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
@@ -329,8 +340,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
329 ret = radeon_get_atom_connector_info_from_object_table(dev); 340 ret = radeon_get_atom_connector_info_from_object_table(dev);
330 else 341 else
331 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 342 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
332 } else 343 } else {
333 ret = radeon_get_legacy_connector_info_from_bios(dev); 344 ret = radeon_get_legacy_connector_info_from_bios(dev);
345 if (ret == false)
346 ret = radeon_get_legacy_connector_info_from_table(dev);
347 }
334 } else { 348 } else {
335 if (!ASIC_IS_AVIVO(rdev)) 349 if (!ASIC_IS_AVIVO(rdev))
336 ret = radeon_get_legacy_connector_info_from_table(dev); 350 ret = radeon_get_legacy_connector_info_from_table(dev);
@@ -349,9 +363,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
349{ 363{
350 int ret = 0; 364 int ret = 0;
351 365
352 if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 366 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
367 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
353 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 368 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
354 if (dig->dp_i2c_bus) 369 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
370 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
355 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 371 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
356 } 372 }
357 if (!radeon_connector->ddc_bus) 373 if (!radeon_connector->ddc_bus)
@@ -404,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
404 uint32_t *fb_div_p, 420 uint32_t *fb_div_p,
405 uint32_t *frac_fb_div_p, 421 uint32_t *frac_fb_div_p,
406 uint32_t *ref_div_p, 422 uint32_t *ref_div_p,
407 uint32_t *post_div_p, 423 uint32_t *post_div_p)
408 int flags)
409{ 424{
410 uint32_t min_ref_div = pll->min_ref_div; 425 uint32_t min_ref_div = pll->min_ref_div;
411 uint32_t max_ref_div = pll->max_ref_div; 426 uint32_t max_ref_div = pll->max_ref_div;
427 uint32_t min_post_div = pll->min_post_div;
428 uint32_t max_post_div = pll->max_post_div;
412 uint32_t min_fractional_feed_div = 0; 429 uint32_t min_fractional_feed_div = 0;
413 uint32_t max_fractional_feed_div = 0; 430 uint32_t max_fractional_feed_div = 0;
414 uint32_t best_vco = pll->best_vco; 431 uint32_t best_vco = pll->best_vco;
@@ -424,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
424 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 441 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
425 freq = freq * 1000; 442 freq = freq * 1000;
426 443
427 if (flags & RADEON_PLL_USE_REF_DIV) 444 if (pll->flags & RADEON_PLL_USE_REF_DIV)
428 min_ref_div = max_ref_div = pll->reference_div; 445 min_ref_div = max_ref_div = pll->reference_div;
429 else { 446 else {
430 while (min_ref_div < max_ref_div-1) { 447 while (min_ref_div < max_ref_div-1) {
@@ -439,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
439 } 456 }
440 } 457 }
441 458
442 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { 459 if (pll->flags & RADEON_PLL_USE_POST_DIV)
460 min_post_div = max_post_div = pll->post_div;
461
462 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
443 min_fractional_feed_div = pll->min_frac_feedback_div; 463 min_fractional_feed_div = pll->min_frac_feedback_div;
444 max_fractional_feed_div = pll->max_frac_feedback_div; 464 max_fractional_feed_div = pll->max_frac_feedback_div;
445 } 465 }
446 466
447 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { 467 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
448 uint32_t ref_div; 468 uint32_t ref_div;
449 469
450 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 470 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
451 continue; 471 continue;
452 472
453 /* legacy radeons only have a few post_divs */ 473 /* legacy radeons only have a few post_divs */
454 if (flags & RADEON_PLL_LEGACY) { 474 if (pll->flags & RADEON_PLL_LEGACY) {
455 if ((post_div == 5) || 475 if ((post_div == 5) ||
456 (post_div == 7) || 476 (post_div == 7) ||
457 (post_div == 9) || 477 (post_div == 9) ||
@@ -498,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
498 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 518 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
499 current_freq = radeon_div(tmp, ref_div * post_div); 519 current_freq = radeon_div(tmp, ref_div * post_div);
500 520
501 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 521 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
502 error = freq - current_freq; 522 error = freq - current_freq;
503 error = error < 0 ? 0xffffffff : error; 523 error = error < 0 ? 0xffffffff : error;
504 } else 524 } else
@@ -525,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
525 best_freq = current_freq; 545 best_freq = current_freq;
526 best_error = error; 546 best_error = error;
527 best_vco_diff = vco_diff; 547 best_vco_diff = vco_diff;
528 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 548 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
529 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 549 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
530 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 550 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
531 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 551 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
532 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 552 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
533 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 553 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
534 best_post_div = post_div; 554 best_post_div = post_div;
535 best_ref_div = ref_div; 555 best_ref_div = ref_div;
536 best_feedback_div = feedback_div; 556 best_feedback_div = feedback_div;
@@ -566,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
566 uint32_t *fb_div_p, 586 uint32_t *fb_div_p,
567 uint32_t *frac_fb_div_p, 587 uint32_t *frac_fb_div_p,
568 uint32_t *ref_div_p, 588 uint32_t *ref_div_p,
569 uint32_t *post_div_p, 589 uint32_t *post_div_p)
570 int flags)
571{ 590{
572 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 591 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
573 fixed20_12 pll_out_max, pll_out_min; 592 fixed20_12 pll_out_max, pll_out_min;
@@ -661,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
661 radeonfb_remove(dev, fb); 680 radeonfb_remove(dev, fb);
662 681
663 if (radeon_fb->obj) { 682 if (radeon_fb->obj) {
664 radeon_gem_object_unpin(radeon_fb->obj);
665 mutex_lock(&dev->struct_mutex); 683 mutex_lock(&dev->struct_mutex);
666 drm_gem_object_unreference(radeon_fb->obj); 684 drm_gem_object_unreference(radeon_fb->obj);
667 mutex_unlock(&dev->struct_mutex); 685 mutex_unlock(&dev->struct_mutex);
@@ -709,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
709 struct drm_gem_object *obj; 727 struct drm_gem_object *obj;
710 728
711 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 729 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
712 730 if (obj == NULL) {
731 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
732 "can't create framebuffer\n", mode_cmd->handle);
733 return NULL;
734 }
713 return radeon_framebuffer_create(dev, mode_cmd, obj); 735 return radeon_framebuffer_create(dev, mode_cmd, obj);
714} 736}
715 737
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a82..c57ad606504d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
106 * 1.29- R500 3D cmd buffer support 106 * 1.29- R500 3D cmd buffer support
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup
109 */ 110 */
110#define DRIVER_MAJOR 1 111#define DRIVER_MAJOR 1
111#define DRIVER_MINOR 31 112#define DRIVER_MINOR 32
112#define DRIVER_PATCHLEVEL 0 113#define DRIVER_PATCHLEVEL 0
113 114
114enum radeon_cp_microcode_version { 115enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ccba95f83d11..3c91724457ca 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
156 return ret; 156 return ret;
157} 157}
158 158
159static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
160{
161 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
162 switch (radeon_encoder->encoder_id) {
163 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
164 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
165 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
166 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
167 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
168 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
169 case ENCODER_OBJECT_ID_INTERNAL_DDI:
170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
172 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
173 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
174 return true;
175 default:
176 return false;
177 }
178}
159void 179void
160radeon_link_encoder_connector(struct drm_device *dev) 180radeon_link_encoder_connector(struct drm_device *dev)
161{ 181{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
202 222
203 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 223 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
204 radeon_connector = to_radeon_connector(connector); 224 radeon_connector = to_radeon_connector(connector);
205 if (radeon_encoder->devices & radeon_connector->devices) 225 if (radeon_encoder->active_device & radeon_connector->devices)
206 return connector; 226 return connector;
207 } 227 }
208 return NULL; 228 return NULL;
@@ -596,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
596 return ATOM_ENCODER_MODE_LVDS; 616 return ATOM_ENCODER_MODE_LVDS;
597 break; 617 break;
598 case DRM_MODE_CONNECTOR_DisplayPort: 618 case DRM_MODE_CONNECTOR_DisplayPort:
619 case DRM_MODE_CONNECTOR_eDP:
599 radeon_dig_connector = radeon_connector->con_priv; 620 radeon_dig_connector = radeon_connector->con_priv;
600 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 621 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
622 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
601 return ATOM_ENCODER_MODE_DP; 623 return ATOM_ENCODER_MODE_DP;
602 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 624 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
603 return ATOM_ENCODER_MODE_HDMI; 625 return ATOM_ENCODER_MODE_HDMI;
604 else 626 else
605 return ATOM_ENCODER_MODE_DVI; 627 return ATOM_ENCODER_MODE_DVI;
606 break; 628 break;
607 case CONNECTOR_DVI_A: 629 case DRM_MODE_CONNECTOR_DVIA:
608 case CONNECTOR_VGA: 630 case DRM_MODE_CONNECTOR_VGA:
609 return ATOM_ENCODER_MODE_CRT; 631 return ATOM_ENCODER_MODE_CRT;
610 break; 632 break;
611 case CONNECTOR_STV: 633 case DRM_MODE_CONNECTOR_Composite:
612 case CONNECTOR_CTV: 634 case DRM_MODE_CONNECTOR_SVIDEO:
613 case CONNECTOR_DIN: 635 case DRM_MODE_CONNECTOR_9PinDIN:
614 /* fix me */ 636 /* fix me */
615 return ATOM_ENCODER_MODE_TV; 637 return ATOM_ENCODER_MODE_TV;
616 /*return ATOM_ENCODER_MODE_CV;*/ 638 /*return ATOM_ENCODER_MODE_CV;*/
@@ -674,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
674 696
675 memset(&args, 0, sizeof(args)); 697 memset(&args, 0, sizeof(args));
676 698
677 if (ASIC_IS_DCE32(rdev)) { 699 if (dig->dig_encoder)
678 if (dig->dig_block) 700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
679 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 701 else
680 else 702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
681 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 703 num = dig->dig_encoder + 1;
682 num = dig->dig_block + 1;
683 } else {
684 switch (radeon_encoder->encoder_id) {
685 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
686 /* XXX doesn't really matter which dig encoder we pick as long as it's
687 * not already in use
688 */
689 if (dig_connector->linkb)
690 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
691 else
692 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
693 num = 1;
694 break;
695 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
696 /* Only dig2 encoder can drive LVTMA */
697 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
698 num = 2;
699 break;
700 }
701 }
702 704
703 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
704 706
@@ -820,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
820 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
821 } 823 }
822 if (ASIC_IS_DCE32(rdev)) { 824 if (ASIC_IS_DCE32(rdev)) {
823 if (dig->dig_block) 825 if (dig->dig_encoder == 1)
824 args.v2.acConfig.ucEncoderSel = 1; 826 args.v2.acConfig.ucEncoderSel = 1;
825 if (dig_connector->linkb) 827 if (dig_connector->linkb)
826 args.v2.acConfig.ucLinkSel = 1; 828 args.v2.acConfig.ucLinkSel = 1;
@@ -847,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
847 args.v2.acConfig.fCoherentMode = 1; 849 args.v2.acConfig.fCoherentMode = 1;
848 } 850 }
849 } else { 851 } else {
852
850 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
851 854
855 if (dig->dig_encoder)
856 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
857 else
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
859
852 switch (radeon_encoder->encoder_id) { 860 switch (radeon_encoder->encoder_id) {
853 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
854 /* XXX doesn't really matter which dig encoder we pick as long as it's
855 * not already in use
856 */
857 if (dig_connector->linkb)
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
859 else
860 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
861 if (rdev->flags & RADEON_IS_IGP) { 862 if (rdev->flags & RADEON_IS_IGP) {
862 if (radeon_encoder->pixel_clock > 165000) { 863 if (radeon_encoder->pixel_clock > 165000) {
863 if (dig_connector->igp_lane_info & 0x3) 864 if (dig_connector->igp_lane_info & 0x3)
@@ -876,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
876 } 877 }
877 } 878 }
878 break; 879 break;
879 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
880 /* Only dig2 encoder can drive LVTMA */
881 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
882 break;
883 } 880 }
884 881
885 if (radeon_encoder->pixel_clock > 165000) 882 if (radeon_encoder->pixel_clock > 165000)
@@ -1044,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1044 union crtc_sourc_param args; 1041 union crtc_sourc_param args;
1045 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1046 uint8_t frev, crev; 1043 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig;
1047 1045
1048 memset(&args, 0, sizeof(args)); 1046 memset(&args, 0, sizeof(args));
1049 1047
@@ -1107,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1105 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1108 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1106 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1109 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1110 if (ASIC_IS_DCE32(rdev)) { 1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1111 if (radeon_crtc->crtc_id) 1109 dig = radeon_encoder->enc_priv;
1112 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1110 if (dig->dig_encoder)
1113 else 1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1114 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1112 else
1115 } else { 1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1116 struct drm_connector *connector;
1117 struct radeon_connector *radeon_connector;
1118 struct radeon_connector_atom_dig *dig_connector;
1119
1120 connector = radeon_get_connector_for_encoder(encoder);
1121 if (!connector)
1122 return;
1123 radeon_connector = to_radeon_connector(connector);
1124 if (!radeon_connector->con_priv)
1125 return;
1126 dig_connector = radeon_connector->con_priv;
1127
1128 /* XXX doesn't really matter which dig encoder we pick as long as it's
1129 * not already in use
1130 */
1131 if (dig_connector->linkb)
1132 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1133 else
1134 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1135 }
1136 break; 1114 break;
1137 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1138 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1139 break; 1117 break;
1140 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1141 /* Only dig2 encoder can drive LVTMA */
1142 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1143 break;
1144 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1118 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1145 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1119 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1146 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; 1120 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1200,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1200 } 1174 }
1201} 1175}
1202 1176
1177static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1178{
1179 struct drm_device *dev = encoder->dev;
1180 struct radeon_device *rdev = dev->dev_private;
1181 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1182 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1183 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0;
1186 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id;
1189 }
1190
1191 /* on DCE3 - LVTMA can only be driven by DIGB */
1192 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1193 struct radeon_encoder *radeon_test_encoder;
1194
1195 if (encoder == test_encoder)
1196 continue;
1197
1198 if (!radeon_encoder_is_digital(test_encoder))
1199 continue;
1200
1201 radeon_test_encoder = to_radeon_encoder(test_encoder);
1202 dig = radeon_test_encoder->enc_priv;
1203
1204 if (dig->dig_encoder >= 0)
1205 dig_enc_in_use |= (1 << dig->dig_encoder);
1206 }
1207
1208 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
1209 if (dig_enc_in_use & 0x2)
1210 DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
1211 return 1;
1212 }
1213 if (!(dig_enc_in_use & 1))
1214 return 0;
1215 return 1;
1216}
1217
1203static void 1218static void
1204radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1219radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1205 struct drm_display_mode *mode, 1220 struct drm_display_mode *mode,
@@ -1212,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1212 1227
1213 if (radeon_encoder->active_device & 1228 if (radeon_encoder->active_device &
1214 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1229 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1215 if (radeon_encoder->enc_priv) { 1230 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1216 struct radeon_encoder_atom_dig *dig; 1231 if (dig)
1217 1232 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1218 dig = radeon_encoder->enc_priv;
1219 dig->dig_block = radeon_crtc->crtc_id;
1220 }
1221 } 1233 }
1222 radeon_encoder->pixel_clock = adjusted_mode->clock; 1234 radeon_encoder->pixel_clock = adjusted_mode->clock;
1223 1235
@@ -1377,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1377static void radeon_atom_encoder_disable(struct drm_encoder *encoder) 1389static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1378{ 1390{
1379 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1391 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1392 struct radeon_encoder_atom_dig *dig;
1380 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1393 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1394
1395 if (radeon_encoder_is_digital(encoder)) {
1396 dig = radeon_encoder->enc_priv;
1397 dig->dig_encoder = -1;
1398 }
1381 radeon_encoder->active_device = 0; 1399 radeon_encoder->active_device = 0;
1382} 1400}
1383 1401
@@ -1434,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1434 1452
1435 /* coherent mode by default */ 1453 /* coherent mode by default */
1436 dig->coherent_mode = true; 1454 dig->coherent_mode = true;
1455 dig->dig_encoder = -1;
1437 1456
1438 return dig; 1457 return dig;
1439} 1458}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4cdd8b4f7549..8495d4e32e18 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
140 140
141bool radeon_fence_signaled(struct radeon_fence *fence) 141bool radeon_fence_signaled(struct radeon_fence *fence)
142{ 142{
143 struct radeon_device *rdev = fence->rdev;
144 unsigned long irq_flags; 143 unsigned long irq_flags;
145 bool signaled = false; 144 bool signaled = false;
146 145
147 if (rdev->gpu_lockup) { 146 if (!fence)
148 return true; 147 return true;
149 } 148
150 if (fence == NULL) { 149 if (fence->rdev->gpu_lockup)
151 return true; 150 return true;
152 } 151
153 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 152 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154 signaled = fence->signaled; 153 signaled = fence->signaled;
155 /* if we are shuting down report all fence as signaled */ 154 /* if we are shuting down report all fence as signaled */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 60df2d7e7e4c..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
131 printk(KERN_ERR "Failed to wait for object !\n"); 131 printk(KERN_ERR "Failed to wait for object !\n");
132 return r; 132 return r;
133 } 133 }
134 radeon_hdp_flush(robj->rdev);
135 } 134 }
136 return 0; 135 return 0;
137} 136}
@@ -309,10 +308,12 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
309 } 308 }
310 robj = gobj->driver_private; 309 robj = gobj->driver_private;
311 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
312 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
313 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
314 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
315 radeon_hdp_flush(robj->rdev);
316 return r; 317 return r;
317} 318}
318 319
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4a7cc4..2f349a300195 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
289 drm_radeon_irq_emit_t *emit = data; 289 drm_radeon_irq_emit_t *emit = data;
290 int result; 290 int result;
291 291
292 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
293 return -EINVAL;
294
295 LOCK_TEST_WITH_RETURN(dev, file_priv);
296
297 if (!dev_priv) { 292 if (!dev_priv) {
298 DRM_ERROR("called with no initialization\n"); 293 DRM_ERROR("called with no initialization\n");
299 return -EINVAL; 294 return -EINVAL;
300 } 295 }
301 296
297 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
298 return -EINVAL;
299
300 LOCK_TEST_WITH_RETURN(dev, file_priv);
301
302 result = radeon_emit_irq(dev); 302 result = radeon_emit_irq(dev);
303 303
304 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 304 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9223296fe37b..3cfd60fd0083 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
97 rdev->irq.sw_int = false; 97 rdev->irq.sw_int = false;
98 for (i = 0; i < 2; i++) { 98 for (i = 0; i < 2; i++) {
99 rdev->irq.crtc_vblank_int[i] = false; 99 rdev->irq.crtc_vblank_int[i] = false;
100 rdev->irq.hpd[i] = false;
100 } 101 }
101 radeon_irq_set(rdev); 102 radeon_irq_set(rdev);
102} 103}
@@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
128 DRM_INFO("radeon: using MSI.\n"); 129 DRM_INFO("radeon: using MSI.\n");
129 } 130 }
130 } 131 }
131 drm_irq_install(rdev->ddev);
132 rdev->irq.installed = true; 132 rdev->irq.installed = true;
133 r = drm_irq_install(rdev->ddev);
134 if (r) {
135 rdev->irq.installed = false;
136 return r;
137 }
133 DRM_INFO("radeon: irq initialized.\n"); 138 DRM_INFO("radeon: irq initialized.\n");
134 return 0; 139 return 0;
135} 140}
136 141
137void radeon_irq_kms_fini(struct radeon_device *rdev) 142void radeon_irq_kms_fini(struct radeon_device *rdev)
138{ 143{
144 drm_vblank_cleanup(rdev->ddev);
139 if (rdev->irq.installed) { 145 if (rdev->irq.installed) {
140 rdev->irq.installed = false;
141 drm_irq_uninstall(rdev->ddev); 146 drm_irq_uninstall(rdev->ddev);
147 rdev->irq.installed = false;
142 if (rdev->msi_enabled) 148 if (rdev->msi_enabled)
143 pci_disable_msi(rdev->pdev); 149 pci_disable_msi(rdev->pdev);
144 } 150 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cc27485a07ad..b6d8081e1246 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
339 } 339 }
340} 340}
341 341
342/* properly set crtc bpp when using atombios */
343void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
344{
345 struct drm_device *dev = crtc->dev;
346 struct radeon_device *rdev = dev->dev_private;
347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
348 int format;
349 uint32_t crtc_gen_cntl;
350 uint32_t disp_merge_cntl;
351 uint32_t crtc_pitch;
352
353 switch (crtc->fb->bits_per_pixel) {
354 case 8:
355 format = 2;
356 break;
357 case 15: /* 555 */
358 format = 3;
359 break;
360 case 16: /* 565 */
361 format = 4;
362 break;
363 case 24: /* RGB */
364 format = 5;
365 break;
366 case 32: /* xRGB */
367 format = 6;
368 break;
369 default:
370 return;
371 }
372
373 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
374 ((crtc->fb->bits_per_pixel * 8) - 1)) /
375 (crtc->fb->bits_per_pixel * 8));
376 crtc_pitch |= crtc_pitch << 16;
377
378 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
379
380 switch (radeon_crtc->crtc_id) {
381 case 0:
382 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
383 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
384 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
385
386 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
387 crtc_gen_cntl |= (format << 8);
388 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
389 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
390 break;
391 case 1:
392 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
393 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
394 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
395
396 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
397 crtc_gen_cntl |= (format << 8);
398 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
399 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
400 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
401 break;
402 }
403}
404
405int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 342int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
406 struct drm_framebuffer *old_fb) 343 struct drm_framebuffer *old_fb)
407{ 344{
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
755 uint32_t post_divider = 0; 692 uint32_t post_divider = 0;
756 uint32_t freq = 0; 693 uint32_t freq = 0;
757 uint8_t pll_gain; 694 uint8_t pll_gain;
758 int pll_flags = RADEON_PLL_LEGACY;
759 bool use_bios_divs = false; 695 bool use_bios_divs = false;
760 /* PLL registers */ 696 /* PLL registers */
761 uint32_t pll_ref_div = 0; 697 uint32_t pll_ref_div = 0;
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
789 else 725 else
790 pll = &rdev->clock.p1pll; 726 pll = &rdev->clock.p1pll;
791 727
728 pll->flags = RADEON_PLL_LEGACY;
729
792 if (mode->clock > 200000) /* range limits??? */ 730 if (mode->clock > 200000) /* range limits??? */
793 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
794 else 732 else
795 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 733 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
796 734
797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 735 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
798 if (encoder->crtc == crtc) { 736 if (encoder->crtc == crtc) {
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
804 } 742 }
805 743
806 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 744 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
807 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 745 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
808 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 746 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
809 if (!rdev->is_atom_bios) { 747 if (!rdev->is_atom_bios) {
810 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 748 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
819 } 757 }
820 } 758 }
821 } 759 }
822 pll_flags |= RADEON_PLL_USE_REF_DIV; 760 pll->flags |= RADEON_PLL_USE_REF_DIV;
823 } 761 }
824 } 762 }
825 } 763 }
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
829 if (!use_bios_divs) { 767 if (!use_bios_divs) {
830 radeon_compute_pll(pll, mode->clock, 768 radeon_compute_pll(pll, mode->clock,
831 &freq, &feedback_div, &frac_fb_div, 769 &freq, &feedback_div, &frac_fb_div,
832 &reference_div, &post_divider, 770 &reference_div, &post_divider);
833 pll_flags);
834 771
835 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 772 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
836 if (post_div->divider == post_divider) 773 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 981508ff7037..38e45e231ef5 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
46 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 46 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
47 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; 47 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
48 int panel_pwr_delay = 2000; 48 int panel_pwr_delay = 2000;
49 bool is_mac = false;
49 DRM_DEBUG("\n"); 50 DRM_DEBUG("\n");
50 51
51 if (radeon_encoder->enc_priv) { 52 if (radeon_encoder->enc_priv) {
@@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
58 } 59 }
59 } 60 }
60 61
62 /* macs (and possibly some x86 oem systems?) wire up LVDS strangely
63 * Taken from radeonfb.
64 */
65 if ((rdev->mode_info.connector_table == CT_IBOOK) ||
66 (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
67 (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
68 (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
69 is_mac = true;
70
61 switch (mode) { 71 switch (mode) {
62 case DRM_MODE_DPMS_ON: 72 case DRM_MODE_DPMS_ON:
63 disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); 73 disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
@@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
74 84
75 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 85 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
76 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); 86 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
87 if (is_mac)
88 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
77 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); 89 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
78 udelay(panel_pwr_delay * 1000); 90 udelay(panel_pwr_delay * 1000);
79 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 91 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
@@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
85 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); 97 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
86 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 98 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
87 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; 99 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
88 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); 100 if (is_mac) {
101 lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
102 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
103 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
104 } else {
105 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
106 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
107 }
89 udelay(panel_pwr_delay * 1000); 108 udelay(panel_pwr_delay * 1000);
90 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 109 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
91 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 110 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 3a12bb0c0563..417684daef4c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
77 unsigned pix_to_tv; 77 unsigned pix_to_tv;
78}; 78};
79 79
80static const uint16_t hor_timing_NTSC[] = { 80static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
81 0x0007, 81 0x0007,
82 0x003f, 82 0x003f,
83 0x0263, 83 0x0263,
@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
98 0 98 0
99}; 99};
100 100
101static const uint16_t vert_timing_NTSC[] = { 101static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
102 0x2001, 102 0x2001,
103 0x200d, 103 0x200d,
104 0x1006, 104 0x1006,
@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
115 0 115 0
116}; 116};
117 117
118static const uint16_t hor_timing_PAL[] = { 118static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
119 0x0007, 119 0x0007,
120 0x0058, 120 0x0058,
121 0x027c, 121 0x027c,
@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
136 0 136 0
137}; 137};
138 138
139static const uint16_t vert_timing_PAL[] = { 139static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
140 0x2001, 140 0x2001,
141 0x200c, 141 0x200c,
142 0x1005, 142 0x1005,
@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
623 } 623 }
624 flicker_removal = (tmp + 500) / 1000; 624 flicker_removal = (tmp + 500) / 1000;
625 625
626 if (flicker_removal < 3) 626 if (flicker_removal < 2)
627 flicker_removal = 3; 627 flicker_removal = 2;
628 for (i = 0; i < 6; ++i) { 628 for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
629 if (flicker_removal == SLOPE_limit[i]) 629 if (flicker_removal == SLOPE_limit[i])
630 break; 630 break;
631 } 631 }
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 402369db5ba0..e81b2aeb6a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,32 +46,6 @@ struct radeon_device;
46#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) 46#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
47#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) 47#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
48 48
49enum radeon_connector_type {
50 CONNECTOR_NONE,
51 CONNECTOR_VGA,
52 CONNECTOR_DVI_I,
53 CONNECTOR_DVI_D,
54 CONNECTOR_DVI_A,
55 CONNECTOR_STV,
56 CONNECTOR_CTV,
57 CONNECTOR_LVDS,
58 CONNECTOR_DIGITAL,
59 CONNECTOR_SCART,
60 CONNECTOR_HDMI_TYPE_A,
61 CONNECTOR_HDMI_TYPE_B,
62 CONNECTOR_0XC,
63 CONNECTOR_0XD,
64 CONNECTOR_DIN,
65 CONNECTOR_DISPLAY_PORT,
66 CONNECTOR_UNSUPPORTED
67};
68
69enum radeon_dvi_type {
70 DVI_AUTO,
71 DVI_DIGITAL,
72 DVI_ANALOG
73};
74
75enum radeon_rmx_type { 49enum radeon_rmx_type {
76 RMX_OFF, 50 RMX_OFF,
77 RMX_FULL, 51 RMX_FULL,
@@ -151,16 +125,24 @@ struct radeon_tmds_pll {
151#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
152#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
153#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12)
154 129
155struct radeon_pll { 130struct radeon_pll {
156 uint16_t reference_freq; 131 /* reference frequency */
157 uint16_t reference_div; 132 uint32_t reference_freq;
133
134 /* fixed dividers */
135 uint32_t reference_div;
136 uint32_t post_div;
137
138 /* pll in/out limits */
158 uint32_t pll_in_min; 139 uint32_t pll_in_min;
159 uint32_t pll_in_max; 140 uint32_t pll_in_max;
160 uint32_t pll_out_min; 141 uint32_t pll_out_min;
161 uint32_t pll_out_max; 142 uint32_t pll_out_max;
162 uint16_t xclk; 143 uint32_t best_vco;
163 144
145 /* divider limits */
164 uint32_t min_ref_div; 146 uint32_t min_ref_div;
165 uint32_t max_ref_div; 147 uint32_t max_ref_div;
166 uint32_t min_post_div; 148 uint32_t min_post_div;
@@ -169,7 +151,12 @@ struct radeon_pll {
169 uint32_t max_feedback_div; 151 uint32_t max_feedback_div;
170 uint32_t min_frac_feedback_div; 152 uint32_t min_frac_feedback_div;
171 uint32_t max_frac_feedback_div; 153 uint32_t max_frac_feedback_div;
172 uint32_t best_vco; 154
155 /* flags for the current clock */
156 uint32_t flags;
157
158 /* pll id */
159 uint32_t id;
173}; 160};
174 161
175struct radeon_i2c_chan { 162struct radeon_i2c_chan {
@@ -312,7 +299,7 @@ struct radeon_atom_ss {
312struct radeon_encoder_atom_dig { 299struct radeon_encoder_atom_dig {
313 /* atom dig */ 300 /* atom dig */
314 bool coherent_mode; 301 bool coherent_mode;
315 int dig_block; 302 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
316 /* atom lvds */ 303 /* atom lvds */
317 uint32_t lvds_misc; 304 uint32_t lvds_misc;
318 uint16_t panel_pwr_delay; 305 uint16_t panel_pwr_delay;
@@ -443,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
443 uint32_t *fb_div_p, 430 uint32_t *fb_div_p,
444 uint32_t *frac_fb_div_p, 431 uint32_t *frac_fb_div_p,
445 uint32_t *ref_div_p, 432 uint32_t *ref_div_p,
446 uint32_t *post_div_p, 433 uint32_t *post_div_p);
447 int flags);
448 434
449extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
450 uint64_t freq, 436 uint64_t freq,
@@ -452,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
452 uint32_t *fb_div_p, 438 uint32_t *fb_div_p,
453 uint32_t *frac_fb_div_p, 439 uint32_t *frac_fb_div_p,
454 uint32_t *ref_div_p, 440 uint32_t *ref_div_p,
455 uint32_t *post_div_p, 441 uint32_t *post_div_p);
456 int flags);
457 442
458extern void radeon_setup_encoder_clones(struct drm_device *dev); 443extern void radeon_setup_encoder_clones(struct drm_device *dev);
459 444
@@ -479,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
479 464
480extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 465extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
481 struct drm_framebuffer *old_fb); 466 struct drm_framebuffer *old_fb);
482extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
483 467
484extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 468extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
485 struct drm_file *file_priv, 469 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d9ffe1f56e8f..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -220,9 +220,11 @@ int radeon_bo_unpin(struct radeon_bo *bo)
220 220
221int radeon_bo_evict_vram(struct radeon_device *rdev) 221int radeon_bo_evict_vram(struct radeon_device *rdev)
222{ 222{
223 if (rdev->flags & RADEON_IS_IGP) { 223 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
224 /* Useless to evict on IGP chips */ 224 if (0 && (rdev->flags & RADEON_IS_IGP)) {
225 return 0; 225 if (rdev->mc.igp_sideport_enabled == false)
226 /* Useless to evict on IGP chips */
227 return 0;
226 } 228 }
227 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 229 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
228} 230}
@@ -304,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
304 } 306 }
305} 307}
306 308
307int radeon_bo_list_validate(struct list_head *head, void *fence) 309int radeon_bo_list_validate(struct list_head *head)
308{ 310{
309 struct radeon_bo_list *lobj; 311 struct radeon_bo_list *lobj;
310 struct radeon_bo *bo; 312 struct radeon_bo *bo;
311 struct radeon_fence *old_fence = NULL;
312 int r; 313 int r;
313 314
314 r = radeon_bo_list_reserve(head); 315 r = radeon_bo_list_reserve(head);
@@ -332,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
332 } 333 }
333 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 334 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
334 lobj->tiling_flags = bo->tiling_flags; 335 lobj->tiling_flags = bo->tiling_flags;
335 if (fence) {
336 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
337 bo->tbo.sync_obj = radeon_fence_ref(fence);
338 bo->tbo.sync_obj_arg = NULL;
339 }
340 if (old_fence) {
341 radeon_fence_unref(&old_fence);
342 }
343 } 336 }
344 return 0; 337 return 0;
345} 338}
346 339
347void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 340void radeon_bo_list_fence(struct list_head *head, void *fence)
348{ 341{
349 struct radeon_bo_list *lobj; 342 struct radeon_bo_list *lobj;
350 struct radeon_fence *old_fence; 343 struct radeon_bo *bo;
351 344 struct radeon_fence *old_fence = NULL;
352 if (fence) 345
353 list_for_each_entry(lobj, head, list) { 346 list_for_each_entry(lobj, head, list) {
354 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 347 bo = lobj->bo;
355 if (old_fence == fence) { 348 spin_lock(&bo->tbo.lock);
356 lobj->bo->tbo.sync_obj = NULL; 349 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
357 radeon_fence_unref(&old_fence); 350 bo->tbo.sync_obj = radeon_fence_ref(fence);
358 } 351 bo->tbo.sync_obj_arg = NULL;
352 spin_unlock(&bo->tbo.lock);
353 if (old_fence) {
354 radeon_fence_unref(&old_fence);
359 } 355 }
360 radeon_bo_list_unreserve(head); 356 }
361} 357}
362 358
363int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 359int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head); 156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head); 157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head); 158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head, void *fence); 159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma); 162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..6579eb4c1f28 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 41{
42 struct radeon_fence *fence; 42 struct radeon_fence *fence;
43 struct radeon_ib *nib; 43 struct radeon_ib *nib;
44 unsigned long i; 44 int r = 0, i, c;
45 int r = 0;
46 45
47 *ib = NULL; 46 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 47 r = radeon_fence_create(rdev, &fence);
49 if (r) { 48 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 49 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 50 return r;
52 } 51 }
53 mutex_lock(&rdev->ib_pool.mutex); 52 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 53 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 54 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 55 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 56 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 57 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 58 }
60 goto out;
61 } 59 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 60 if (nib == NULL) {
63 /* we go do nothings here */ 61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 67 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 68 radeon_fence_unref(&fence);
66 r = -EINVAL; 69 return -EBUSY;
67 goto out;
68 } 70 }
69 /* get the first ib on the scheduled list */ 71 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 72 nib->free = false;
71 struct radeon_ib, list); 73 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 74 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 76 if (r) {
77 goto out; 77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 79 mutex_lock(&rdev->ib_pool.mutex);
80 80 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 81 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 82 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 84 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 85 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 86 }
88 radeon_fence_unref(&nib->fence); 87 radeon_fence_unref(&nib->fence);
89 88 nib->fence = fence;
90 nib->length_dw = 0; 89 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 90 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 91 *ib = nib;
99out: 92 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 93}
107 94
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113 if (tmp == NULL) { 100 if (tmp == NULL) {
114 return; 101 return;
115 } 102 }
116 mutex_lock(&rdev->ib_pool.mutex); 103 if (!tmp->fence->emited)
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence); 104 radeon_fence_unref(&tmp->fence);
126 105 mutex_lock(&rdev->ib_pool.mutex);
127 tmp->length_dw = 0; 106 tmp->free = true;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 107 mutex_unlock(&rdev->ib_pool.mutex);
130} 108}
131 109
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 113
136 if (!ib->length_dw || !rdev->cp.ready) { 114 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 115 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 116 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 117 return -EINVAL;
140 } 118 }
141 119
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 126 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 127 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 128 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 129 /* once scheduled IB is considered free and protected by the fence */
130 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 131 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 132 radeon_ring_unlock_commit(rdev);
154 return 0; 133 return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
164 if (rdev->ib_pool.robj) 143 if (rdev->ib_pool.robj)
165 return 0; 144 return 0;
166 /* Allocate 1M object buffer */ 145 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 147 true, RADEON_GEM_DOMAIN_GTT,
170 &rdev->ib_pool.robj); 148 &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 173 rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 rdev->ib_pool.ibs[i].idx = i; 174 rdev->ib_pool.ibs[i].idx = i;
197 rdev->ib_pool.ibs[i].length_dw = 0; 175 rdev->ib_pool.ibs[i].length_dw = 0;
198 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 176 rdev->ib_pool.ibs[i].free = true;
199 } 177 }
200 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 178 rdev->ib_pool.head_id = 0;
201 rdev->ib_pool.ready = true; 179 rdev->ib_pool.ready = true;
202 DRM_INFO("radeon: ib pool ready.\n"); 180 DRM_INFO("radeon: ib pool ready.\n");
203 if (radeon_debugfs_ib_init(rdev)) { 181 if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
214 return; 192 return;
215 } 193 }
216 mutex_lock(&rdev->ib_pool.mutex); 194 mutex_lock(&rdev->ib_pool.mutex);
217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 if (rdev->ib_pool.robj) { 195 if (rdev->ib_pool.robj) {
219 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 196 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 if (likely(r == 0)) { 197 if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
363 if (ib == NULL) { 340 if (ib == NULL) {
364 return 0; 341 return 0;
365 } 342 }
366 seq_printf(m, "IB %04lu\n", ib->idx); 343 seq_printf(m, "IB %04u\n", ib->idx);
367 seq_printf(m, "IB fence %p\n", ib->fence); 344 seq_printf(m, "IB fence %p\n", ib->fence);
368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 345 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 for (i = 0; i < ib->length_dw; i++) { 346 for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3b0c07b444a2..58b5adf974ca 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -215,7 +215,10 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
215 rbo = container_of(bo, struct radeon_bo, tbo); 215 rbo = container_of(bo, struct radeon_bo, tbo);
216 switch (bo->mem.mem_type) { 216 switch (bo->mem.mem_type) {
217 case TTM_PL_VRAM: 217 case TTM_PL_VRAM:
218 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 218 if (rbo->rdev->cp.ready == false)
219 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
220 else
221 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
219 break; 222 break;
220 case TTM_PL_TT: 223 case TTM_PL_TT:
221 default: 224 default:
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a16..c29ac434ac9c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
910x22b8 SE_TCL_TEX_CYL_WRAP_CTL 910x22b8 SE_TCL_TEX_CYL_WRAP_CTL
920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
930x22c4 SE_TCL_POINT_SPRITE_CNTL 930x22c4 SE_TCL_POINT_SPRITE_CNTL
940x22d0 SE_PVS_CNTL
950x22d4 SE_PVS_CONST_CNTL
940x2648 RE_POINTSIZE 960x2648 RE_POINTSIZE
950x26c0 RE_TOP_LEFT 970x26c0 RE_TOP_LEFT
960x26c4 RE_MISC 980x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 000000000000..989f7a020832
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,795 @@
1r420 0x4f60
20x1434 SRC_Y_X
30x1438 DST_Y_X
40x143C DST_HEIGHT_WIDTH
50x146C DP_GUI_MASTER_CNTL
60x1474 BRUSH_Y_X
70x1478 DP_BRUSH_BKGD_CLR
80x147C DP_BRUSH_FRGD_CLR
90x1480 BRUSH_DATA0
100x1484 BRUSH_DATA1
110x1598 DST_WIDTH_HEIGHT
120x15C0 CLR_CMP_CNTL
130x15C4 CLR_CMP_CLR_SRC
140x15C8 CLR_CMP_CLR_DST
150x15CC CLR_CMP_MSK
160x15D8 DP_SRC_FRGD_CLR
170x15DC DP_SRC_BKGD_CLR
180x1600 DST_LINE_START
190x1604 DST_LINE_END
200x1608 DST_LINE_PATCOUNT
210x16C0 DP_CNTL
220x16CC DP_WRITE_MSK
230x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
240x16E8 DEFAULT_SC_BOTTOM_RIGHT
250x16EC SC_TOP_LEFT
260x16F0 SC_BOTTOM_RIGHT
270x16F4 SRC_SC_BOTTOM_RIGHT
280x1714 DSTCACHE_CTLSTAT
290x1720 WAIT_UNTIL
300x172C RBBM_GUICNTL
310x1D98 VAP_VPORT_XSCALE
320x1D9C VAP_VPORT_XOFFSET
330x1DA0 VAP_VPORT_YSCALE
340x1DA4 VAP_VPORT_YOFFSET
350x1DA8 VAP_VPORT_ZSCALE
360x1DAC VAP_VPORT_ZOFFSET
370x2080 VAP_CNTL
380x2090 VAP_OUT_VTX_FMT_0
390x2094 VAP_OUT_VTX_FMT_1
400x20B0 VAP_VTE_CNTL
410x2138 VAP_VF_MIN_VTX_INDX
420x2140 VAP_CNTL_STATUS
430x2150 VAP_PROG_STREAM_CNTL_0
440x2154 VAP_PROG_STREAM_CNTL_1
450x2158 VAP_PROG_STREAM_CNTL_2
460x215C VAP_PROG_STREAM_CNTL_3
470x2160 VAP_PROG_STREAM_CNTL_4
480x2164 VAP_PROG_STREAM_CNTL_5
490x2168 VAP_PROG_STREAM_CNTL_6
500x216C VAP_PROG_STREAM_CNTL_7
510x2180 VAP_VTX_STATE_CNTL
520x2184 VAP_VSM_VTX_ASSM
530x2188 VAP_VTX_STATE_IND_REG_0
540x218C VAP_VTX_STATE_IND_REG_1
550x2190 VAP_VTX_STATE_IND_REG_2
560x2194 VAP_VTX_STATE_IND_REG_3
570x2198 VAP_VTX_STATE_IND_REG_4
580x219C VAP_VTX_STATE_IND_REG_5
590x21A0 VAP_VTX_STATE_IND_REG_6
600x21A4 VAP_VTX_STATE_IND_REG_7
610x21A8 VAP_VTX_STATE_IND_REG_8
620x21AC VAP_VTX_STATE_IND_REG_9
630x21B0 VAP_VTX_STATE_IND_REG_10
640x21B4 VAP_VTX_STATE_IND_REG_11
650x21B8 VAP_VTX_STATE_IND_REG_12
660x21BC VAP_VTX_STATE_IND_REG_13
670x21C0 VAP_VTX_STATE_IND_REG_14
680x21C4 VAP_VTX_STATE_IND_REG_15
690x21DC VAP_PSC_SGN_NORM_CNTL
700x21E0 VAP_PROG_STREAM_CNTL_EXT_0
710x21E4 VAP_PROG_STREAM_CNTL_EXT_1
720x21E8 VAP_PROG_STREAM_CNTL_EXT_2
730x21EC VAP_PROG_STREAM_CNTL_EXT_3
740x21F0 VAP_PROG_STREAM_CNTL_EXT_4
750x21F4 VAP_PROG_STREAM_CNTL_EXT_5
760x21F8 VAP_PROG_STREAM_CNTL_EXT_6
770x21FC VAP_PROG_STREAM_CNTL_EXT_7
780x2200 VAP_PVS_VECTOR_INDX_REG
790x2204 VAP_PVS_VECTOR_DATA_REG
800x2208 VAP_PVS_VECTOR_DATA_REG_128
810x221C VAP_CLIP_CNTL
820x2220 VAP_GB_VERT_CLIP_ADJ
830x2224 VAP_GB_VERT_DISC_ADJ
840x2228 VAP_GB_HORZ_CLIP_ADJ
850x222C VAP_GB_HORZ_DISC_ADJ
860x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
870x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
880x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
890x223C VAP_PVS_FLOW_CNTL_ADDRS_3
900x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
910x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
920x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
930x224C VAP_PVS_FLOW_CNTL_ADDRS_7
940x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
950x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
960x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
970x225C VAP_PVS_FLOW_CNTL_ADDRS_11
980x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
990x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
1000x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
1010x226C VAP_PVS_FLOW_CNTL_ADDRS_15
1020x2284 VAP_PVS_STATE_FLUSH_REG
1030x2288 VAP_PVS_VTX_TIMEOUT_REG
1040x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
1050x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
1060x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
1070x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
1080x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
1090x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
1100x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
1110x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
1120x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
1130x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
1140x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
1150x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
1160x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
1170x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
1180x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
1190x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
1200x22D0 VAP_PVS_CODE_CNTL_0
1210x22D4 VAP_PVS_CONST_CNTL
1220x22D8 VAP_PVS_CODE_CNTL_1
1230x22DC VAP_PVS_FLOW_CNTL_OPC
1240x342C RB2D_DSTCACHE_CTLSTAT
1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE
1280x401C GB_SELECT
1290x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE
1310x4100 TX_INVALTAGS
1320x4200 GA_POINT_S0
1330x4204 GA_POINT_T0
1340x4208 GA_POINT_S1
1350x420C GA_POINT_T1
1360x4214 GA_TRIANGLE_STIPPLE
1370x421C GA_POINT_SIZE
1380x4230 GA_POINT_MINMAX
1390x4234 GA_LINE_CNTL
1400x4238 GA_LINE_STIPPLE_CONFIG
1410x4260 GA_LINE_STIPPLE_VALUE
1420x4264 GA_LINE_S0
1430x4268 GA_LINE_S1
1440x4278 GA_COLOR_CONTROL
1450x427C GA_SOLID_RG
1460x4280 GA_SOLID_BA
1470x4288 GA_POLY_MODE
1480x428C GA_ROUND_MODE
1490x4290 GA_OFFSET
1500x4294 GA_FOG_SCALE
1510x4298 GA_FOG_OFFSET
1520x42A0 SU_TEX_WRAP
1530x42A4 SU_POLY_OFFSET_FRONT_SCALE
1540x42A8 SU_POLY_OFFSET_FRONT_OFFSET
1550x42AC SU_POLY_OFFSET_BACK_SCALE
1560x42B0 SU_POLY_OFFSET_BACK_OFFSET
1570x42B4 SU_POLY_OFFSET_ENABLE
1580x42B8 SU_CULL_MODE
1590x42C0 SU_DEPTH_SCALE
1600x42C4 SU_DEPTH_OFFSET
1610x42C8 SU_REG_DEST
1620x4300 RS_COUNT
1630x4304 RS_INST_COUNT
1640x4310 RS_IP_0
1650x4314 RS_IP_1
1660x4318 RS_IP_2
1670x431C RS_IP_3
1680x4320 RS_IP_4
1690x4324 RS_IP_5
1700x4328 RS_IP_6
1710x432C RS_IP_7
1720x4330 RS_INST_0
1730x4334 RS_INST_1
1740x4338 RS_INST_2
1750x433C RS_INST_3
1760x4340 RS_INST_4
1770x4344 RS_INST_5
1780x4348 RS_INST_6
1790x434C RS_INST_7
1800x4350 RS_INST_8
1810x4354 RS_INST_9
1820x4358 RS_INST_10
1830x435C RS_INST_11
1840x4360 RS_INST_12
1850x4364 RS_INST_13
1860x4368 RS_INST_14
1870x436C RS_INST_15
1880x43A4 SC_HYPERZ_EN
1890x43A8 SC_EDGERULE
1900x43B0 SC_CLIP_0_A
1910x43B4 SC_CLIP_0_B
1920x43B8 SC_CLIP_1_A
1930x43BC SC_CLIP_1_B
1940x43C0 SC_CLIP_2_A
1950x43C4 SC_CLIP_2_B
1960x43C8 SC_CLIP_3_A
1970x43CC SC_CLIP_3_B
1980x43D0 SC_CLIP_RULE
1990x43E0 SC_SCISSOR0
2000x43E8 SC_SCREENDOOR
2010x4440 TX_FILTER1_0
2020x4444 TX_FILTER1_1
2030x4448 TX_FILTER1_2
2040x444C TX_FILTER1_3
2050x4450 TX_FILTER1_4
2060x4454 TX_FILTER1_5
2070x4458 TX_FILTER1_6
2080x445C TX_FILTER1_7
2090x4460 TX_FILTER1_8
2100x4464 TX_FILTER1_9
2110x4468 TX_FILTER1_10
2120x446C TX_FILTER1_11
2130x4470 TX_FILTER1_12
2140x4474 TX_FILTER1_13
2150x4478 TX_FILTER1_14
2160x447C TX_FILTER1_15
2170x4580 TX_CHROMA_KEY_0
2180x4584 TX_CHROMA_KEY_1
2190x4588 TX_CHROMA_KEY_2
2200x458C TX_CHROMA_KEY_3
2210x4590 TX_CHROMA_KEY_4
2220x4594 TX_CHROMA_KEY_5
2230x4598 TX_CHROMA_KEY_6
2240x459C TX_CHROMA_KEY_7
2250x45A0 TX_CHROMA_KEY_8
2260x45A4 TX_CHROMA_KEY_9
2270x45A8 TX_CHROMA_KEY_10
2280x45AC TX_CHROMA_KEY_11
2290x45B0 TX_CHROMA_KEY_12
2300x45B4 TX_CHROMA_KEY_13
2310x45B8 TX_CHROMA_KEY_14
2320x45BC TX_CHROMA_KEY_15
2330x45C0 TX_BORDER_COLOR_0
2340x45C4 TX_BORDER_COLOR_1
2350x45C8 TX_BORDER_COLOR_2
2360x45CC TX_BORDER_COLOR_3
2370x45D0 TX_BORDER_COLOR_4
2380x45D4 TX_BORDER_COLOR_5
2390x45D8 TX_BORDER_COLOR_6
2400x45DC TX_BORDER_COLOR_7
2410x45E0 TX_BORDER_COLOR_8
2420x45E4 TX_BORDER_COLOR_9
2430x45E8 TX_BORDER_COLOR_10
2440x45EC TX_BORDER_COLOR_11
2450x45F0 TX_BORDER_COLOR_12
2460x45F4 TX_BORDER_COLOR_13
2470x45F8 TX_BORDER_COLOR_14
2480x45FC TX_BORDER_COLOR_15
2490x4600 US_CONFIG
2500x4604 US_PIXSIZE
2510x4608 US_CODE_OFFSET
2520x460C US_RESET
2530x4610 US_CODE_ADDR_0
2540x4614 US_CODE_ADDR_1
2550x4618 US_CODE_ADDR_2
2560x461C US_CODE_ADDR_3
2570x4620 US_TEX_INST_0
2580x4624 US_TEX_INST_1
2590x4628 US_TEX_INST_2
2600x462C US_TEX_INST_3
2610x4630 US_TEX_INST_4
2620x4634 US_TEX_INST_5
2630x4638 US_TEX_INST_6
2640x463C US_TEX_INST_7
2650x4640 US_TEX_INST_8
2660x4644 US_TEX_INST_9
2670x4648 US_TEX_INST_10
2680x464C US_TEX_INST_11
2690x4650 US_TEX_INST_12
2700x4654 US_TEX_INST_13
2710x4658 US_TEX_INST_14
2720x465C US_TEX_INST_15
2730x4660 US_TEX_INST_16
2740x4664 US_TEX_INST_17
2750x4668 US_TEX_INST_18
2760x466C US_TEX_INST_19
2770x4670 US_TEX_INST_20
2780x4674 US_TEX_INST_21
2790x4678 US_TEX_INST_22
2800x467C US_TEX_INST_23
2810x4680 US_TEX_INST_24
2820x4684 US_TEX_INST_25
2830x4688 US_TEX_INST_26
2840x468C US_TEX_INST_27
2850x4690 US_TEX_INST_28
2860x4694 US_TEX_INST_29
2870x4698 US_TEX_INST_30
2880x469C US_TEX_INST_31
2890x46A4 US_OUT_FMT_0
2900x46A8 US_OUT_FMT_1
2910x46AC US_OUT_FMT_2
2920x46B0 US_OUT_FMT_3
2930x46B4 US_W_FMT
2940x46B8 US_CODE_BANK
2950x46BC US_CODE_EXT
2960x46C0 US_ALU_RGB_ADDR_0
2970x46C4 US_ALU_RGB_ADDR_1
2980x46C8 US_ALU_RGB_ADDR_2
2990x46CC US_ALU_RGB_ADDR_3
3000x46D0 US_ALU_RGB_ADDR_4
3010x46D4 US_ALU_RGB_ADDR_5
3020x46D8 US_ALU_RGB_ADDR_6
3030x46DC US_ALU_RGB_ADDR_7
3040x46E0 US_ALU_RGB_ADDR_8
3050x46E4 US_ALU_RGB_ADDR_9
3060x46E8 US_ALU_RGB_ADDR_10
3070x46EC US_ALU_RGB_ADDR_11
3080x46F0 US_ALU_RGB_ADDR_12
3090x46F4 US_ALU_RGB_ADDR_13
3100x46F8 US_ALU_RGB_ADDR_14
3110x46FC US_ALU_RGB_ADDR_15
3120x4700 US_ALU_RGB_ADDR_16
3130x4704 US_ALU_RGB_ADDR_17
3140x4708 US_ALU_RGB_ADDR_18
3150x470C US_ALU_RGB_ADDR_19
3160x4710 US_ALU_RGB_ADDR_20
3170x4714 US_ALU_RGB_ADDR_21
3180x4718 US_ALU_RGB_ADDR_22
3190x471C US_ALU_RGB_ADDR_23
3200x4720 US_ALU_RGB_ADDR_24
3210x4724 US_ALU_RGB_ADDR_25
3220x4728 US_ALU_RGB_ADDR_26
3230x472C US_ALU_RGB_ADDR_27
3240x4730 US_ALU_RGB_ADDR_28
3250x4734 US_ALU_RGB_ADDR_29
3260x4738 US_ALU_RGB_ADDR_30
3270x473C US_ALU_RGB_ADDR_31
3280x4740 US_ALU_RGB_ADDR_32
3290x4744 US_ALU_RGB_ADDR_33
3300x4748 US_ALU_RGB_ADDR_34
3310x474C US_ALU_RGB_ADDR_35
3320x4750 US_ALU_RGB_ADDR_36
3330x4754 US_ALU_RGB_ADDR_37
3340x4758 US_ALU_RGB_ADDR_38
3350x475C US_ALU_RGB_ADDR_39
3360x4760 US_ALU_RGB_ADDR_40
3370x4764 US_ALU_RGB_ADDR_41
3380x4768 US_ALU_RGB_ADDR_42
3390x476C US_ALU_RGB_ADDR_43
3400x4770 US_ALU_RGB_ADDR_44
3410x4774 US_ALU_RGB_ADDR_45
3420x4778 US_ALU_RGB_ADDR_46
3430x477C US_ALU_RGB_ADDR_47
3440x4780 US_ALU_RGB_ADDR_48
3450x4784 US_ALU_RGB_ADDR_49
3460x4788 US_ALU_RGB_ADDR_50
3470x478C US_ALU_RGB_ADDR_51
3480x4790 US_ALU_RGB_ADDR_52
3490x4794 US_ALU_RGB_ADDR_53
3500x4798 US_ALU_RGB_ADDR_54
3510x479C US_ALU_RGB_ADDR_55
3520x47A0 US_ALU_RGB_ADDR_56
3530x47A4 US_ALU_RGB_ADDR_57
3540x47A8 US_ALU_RGB_ADDR_58
3550x47AC US_ALU_RGB_ADDR_59
3560x47B0 US_ALU_RGB_ADDR_60
3570x47B4 US_ALU_RGB_ADDR_61
3580x47B8 US_ALU_RGB_ADDR_62
3590x47BC US_ALU_RGB_ADDR_63
3600x47C0 US_ALU_ALPHA_ADDR_0
3610x47C4 US_ALU_ALPHA_ADDR_1
3620x47C8 US_ALU_ALPHA_ADDR_2
3630x47CC US_ALU_ALPHA_ADDR_3
3640x47D0 US_ALU_ALPHA_ADDR_4
3650x47D4 US_ALU_ALPHA_ADDR_5
3660x47D8 US_ALU_ALPHA_ADDR_6
3670x47DC US_ALU_ALPHA_ADDR_7
3680x47E0 US_ALU_ALPHA_ADDR_8
3690x47E4 US_ALU_ALPHA_ADDR_9
3700x47E8 US_ALU_ALPHA_ADDR_10
3710x47EC US_ALU_ALPHA_ADDR_11
3720x47F0 US_ALU_ALPHA_ADDR_12
3730x47F4 US_ALU_ALPHA_ADDR_13
3740x47F8 US_ALU_ALPHA_ADDR_14
3750x47FC US_ALU_ALPHA_ADDR_15
3760x4800 US_ALU_ALPHA_ADDR_16
3770x4804 US_ALU_ALPHA_ADDR_17
3780x4808 US_ALU_ALPHA_ADDR_18
3790x480C US_ALU_ALPHA_ADDR_19
3800x4810 US_ALU_ALPHA_ADDR_20
3810x4814 US_ALU_ALPHA_ADDR_21
3820x4818 US_ALU_ALPHA_ADDR_22
3830x481C US_ALU_ALPHA_ADDR_23
3840x4820 US_ALU_ALPHA_ADDR_24
3850x4824 US_ALU_ALPHA_ADDR_25
3860x4828 US_ALU_ALPHA_ADDR_26
3870x482C US_ALU_ALPHA_ADDR_27
3880x4830 US_ALU_ALPHA_ADDR_28
3890x4834 US_ALU_ALPHA_ADDR_29
3900x4838 US_ALU_ALPHA_ADDR_30
3910x483C US_ALU_ALPHA_ADDR_31
3920x4840 US_ALU_ALPHA_ADDR_32
3930x4844 US_ALU_ALPHA_ADDR_33
3940x4848 US_ALU_ALPHA_ADDR_34
3950x484C US_ALU_ALPHA_ADDR_35
3960x4850 US_ALU_ALPHA_ADDR_36
3970x4854 US_ALU_ALPHA_ADDR_37
3980x4858 US_ALU_ALPHA_ADDR_38
3990x485C US_ALU_ALPHA_ADDR_39
4000x4860 US_ALU_ALPHA_ADDR_40
4010x4864 US_ALU_ALPHA_ADDR_41
4020x4868 US_ALU_ALPHA_ADDR_42
4030x486C US_ALU_ALPHA_ADDR_43
4040x4870 US_ALU_ALPHA_ADDR_44
4050x4874 US_ALU_ALPHA_ADDR_45
4060x4878 US_ALU_ALPHA_ADDR_46
4070x487C US_ALU_ALPHA_ADDR_47
4080x4880 US_ALU_ALPHA_ADDR_48
4090x4884 US_ALU_ALPHA_ADDR_49
4100x4888 US_ALU_ALPHA_ADDR_50
4110x488C US_ALU_ALPHA_ADDR_51
4120x4890 US_ALU_ALPHA_ADDR_52
4130x4894 US_ALU_ALPHA_ADDR_53
4140x4898 US_ALU_ALPHA_ADDR_54
4150x489C US_ALU_ALPHA_ADDR_55
4160x48A0 US_ALU_ALPHA_ADDR_56
4170x48A4 US_ALU_ALPHA_ADDR_57
4180x48A8 US_ALU_ALPHA_ADDR_58
4190x48AC US_ALU_ALPHA_ADDR_59
4200x48B0 US_ALU_ALPHA_ADDR_60
4210x48B4 US_ALU_ALPHA_ADDR_61
4220x48B8 US_ALU_ALPHA_ADDR_62
4230x48BC US_ALU_ALPHA_ADDR_63
4240x48C0 US_ALU_RGB_INST_0
4250x48C4 US_ALU_RGB_INST_1
4260x48C8 US_ALU_RGB_INST_2
4270x48CC US_ALU_RGB_INST_3
4280x48D0 US_ALU_RGB_INST_4
4290x48D4 US_ALU_RGB_INST_5
4300x48D8 US_ALU_RGB_INST_6
4310x48DC US_ALU_RGB_INST_7
4320x48E0 US_ALU_RGB_INST_8
4330x48E4 US_ALU_RGB_INST_9
4340x48E8 US_ALU_RGB_INST_10
4350x48EC US_ALU_RGB_INST_11
4360x48F0 US_ALU_RGB_INST_12
4370x48F4 US_ALU_RGB_INST_13
4380x48F8 US_ALU_RGB_INST_14
4390x48FC US_ALU_RGB_INST_15
4400x4900 US_ALU_RGB_INST_16
4410x4904 US_ALU_RGB_INST_17
4420x4908 US_ALU_RGB_INST_18
4430x490C US_ALU_RGB_INST_19
4440x4910 US_ALU_RGB_INST_20
4450x4914 US_ALU_RGB_INST_21
4460x4918 US_ALU_RGB_INST_22
4470x491C US_ALU_RGB_INST_23
4480x4920 US_ALU_RGB_INST_24
4490x4924 US_ALU_RGB_INST_25
4500x4928 US_ALU_RGB_INST_26
4510x492C US_ALU_RGB_INST_27
4520x4930 US_ALU_RGB_INST_28
4530x4934 US_ALU_RGB_INST_29
4540x4938 US_ALU_RGB_INST_30
4550x493C US_ALU_RGB_INST_31
4560x4940 US_ALU_RGB_INST_32
4570x4944 US_ALU_RGB_INST_33
4580x4948 US_ALU_RGB_INST_34
4590x494C US_ALU_RGB_INST_35
4600x4950 US_ALU_RGB_INST_36
4610x4954 US_ALU_RGB_INST_37
4620x4958 US_ALU_RGB_INST_38
4630x495C US_ALU_RGB_INST_39
4640x4960 US_ALU_RGB_INST_40
4650x4964 US_ALU_RGB_INST_41
4660x4968 US_ALU_RGB_INST_42
4670x496C US_ALU_RGB_INST_43
4680x4970 US_ALU_RGB_INST_44
4690x4974 US_ALU_RGB_INST_45
4700x4978 US_ALU_RGB_INST_46
4710x497C US_ALU_RGB_INST_47
4720x4980 US_ALU_RGB_INST_48
4730x4984 US_ALU_RGB_INST_49
4740x4988 US_ALU_RGB_INST_50
4750x498C US_ALU_RGB_INST_51
4760x4990 US_ALU_RGB_INST_52
4770x4994 US_ALU_RGB_INST_53
4780x4998 US_ALU_RGB_INST_54
4790x499C US_ALU_RGB_INST_55
4800x49A0 US_ALU_RGB_INST_56
4810x49A4 US_ALU_RGB_INST_57
4820x49A8 US_ALU_RGB_INST_58
4830x49AC US_ALU_RGB_INST_59
4840x49B0 US_ALU_RGB_INST_60
4850x49B4 US_ALU_RGB_INST_61
4860x49B8 US_ALU_RGB_INST_62
4870x49BC US_ALU_RGB_INST_63
4880x49C0 US_ALU_ALPHA_INST_0
4890x49C4 US_ALU_ALPHA_INST_1
4900x49C8 US_ALU_ALPHA_INST_2
4910x49CC US_ALU_ALPHA_INST_3
4920x49D0 US_ALU_ALPHA_INST_4
4930x49D4 US_ALU_ALPHA_INST_5
4940x49D8 US_ALU_ALPHA_INST_6
4950x49DC US_ALU_ALPHA_INST_7
4960x49E0 US_ALU_ALPHA_INST_8
4970x49E4 US_ALU_ALPHA_INST_9
4980x49E8 US_ALU_ALPHA_INST_10
4990x49EC US_ALU_ALPHA_INST_11
5000x49F0 US_ALU_ALPHA_INST_12
5010x49F4 US_ALU_ALPHA_INST_13
5020x49F8 US_ALU_ALPHA_INST_14
5030x49FC US_ALU_ALPHA_INST_15
5040x4A00 US_ALU_ALPHA_INST_16
5050x4A04 US_ALU_ALPHA_INST_17
5060x4A08 US_ALU_ALPHA_INST_18
5070x4A0C US_ALU_ALPHA_INST_19
5080x4A10 US_ALU_ALPHA_INST_20
5090x4A14 US_ALU_ALPHA_INST_21
5100x4A18 US_ALU_ALPHA_INST_22
5110x4A1C US_ALU_ALPHA_INST_23
5120x4A20 US_ALU_ALPHA_INST_24
5130x4A24 US_ALU_ALPHA_INST_25
5140x4A28 US_ALU_ALPHA_INST_26
5150x4A2C US_ALU_ALPHA_INST_27
5160x4A30 US_ALU_ALPHA_INST_28
5170x4A34 US_ALU_ALPHA_INST_29
5180x4A38 US_ALU_ALPHA_INST_30
5190x4A3C US_ALU_ALPHA_INST_31
5200x4A40 US_ALU_ALPHA_INST_32
5210x4A44 US_ALU_ALPHA_INST_33
5220x4A48 US_ALU_ALPHA_INST_34
5230x4A4C US_ALU_ALPHA_INST_35
5240x4A50 US_ALU_ALPHA_INST_36
5250x4A54 US_ALU_ALPHA_INST_37
5260x4A58 US_ALU_ALPHA_INST_38
5270x4A5C US_ALU_ALPHA_INST_39
5280x4A60 US_ALU_ALPHA_INST_40
5290x4A64 US_ALU_ALPHA_INST_41
5300x4A68 US_ALU_ALPHA_INST_42
5310x4A6C US_ALU_ALPHA_INST_43
5320x4A70 US_ALU_ALPHA_INST_44
5330x4A74 US_ALU_ALPHA_INST_45
5340x4A78 US_ALU_ALPHA_INST_46
5350x4A7C US_ALU_ALPHA_INST_47
5360x4A80 US_ALU_ALPHA_INST_48
5370x4A84 US_ALU_ALPHA_INST_49
5380x4A88 US_ALU_ALPHA_INST_50
5390x4A8C US_ALU_ALPHA_INST_51
5400x4A90 US_ALU_ALPHA_INST_52
5410x4A94 US_ALU_ALPHA_INST_53
5420x4A98 US_ALU_ALPHA_INST_54
5430x4A9C US_ALU_ALPHA_INST_55
5440x4AA0 US_ALU_ALPHA_INST_56
5450x4AA4 US_ALU_ALPHA_INST_57
5460x4AA8 US_ALU_ALPHA_INST_58
5470x4AAC US_ALU_ALPHA_INST_59
5480x4AB0 US_ALU_ALPHA_INST_60
5490x4AB4 US_ALU_ALPHA_INST_61
5500x4AB8 US_ALU_ALPHA_INST_62
5510x4ABC US_ALU_ALPHA_INST_63
5520x4AC0 US_ALU_EXT_ADDR_0
5530x4AC4 US_ALU_EXT_ADDR_1
5540x4AC8 US_ALU_EXT_ADDR_2
5550x4ACC US_ALU_EXT_ADDR_3
5560x4AD0 US_ALU_EXT_ADDR_4
5570x4AD4 US_ALU_EXT_ADDR_5
5580x4AD8 US_ALU_EXT_ADDR_6
5590x4ADC US_ALU_EXT_ADDR_7
5600x4AE0 US_ALU_EXT_ADDR_8
5610x4AE4 US_ALU_EXT_ADDR_9
5620x4AE8 US_ALU_EXT_ADDR_10
5630x4AEC US_ALU_EXT_ADDR_11
5640x4AF0 US_ALU_EXT_ADDR_12
5650x4AF4 US_ALU_EXT_ADDR_13
5660x4AF8 US_ALU_EXT_ADDR_14
5670x4AFC US_ALU_EXT_ADDR_15
5680x4B00 US_ALU_EXT_ADDR_16
5690x4B04 US_ALU_EXT_ADDR_17
5700x4B08 US_ALU_EXT_ADDR_18
5710x4B0C US_ALU_EXT_ADDR_19
5720x4B10 US_ALU_EXT_ADDR_20
5730x4B14 US_ALU_EXT_ADDR_21
5740x4B18 US_ALU_EXT_ADDR_22
5750x4B1C US_ALU_EXT_ADDR_23
5760x4B20 US_ALU_EXT_ADDR_24
5770x4B24 US_ALU_EXT_ADDR_25
5780x4B28 US_ALU_EXT_ADDR_26
5790x4B2C US_ALU_EXT_ADDR_27
5800x4B30 US_ALU_EXT_ADDR_28
5810x4B34 US_ALU_EXT_ADDR_29
5820x4B38 US_ALU_EXT_ADDR_30
5830x4B3C US_ALU_EXT_ADDR_31
5840x4B40 US_ALU_EXT_ADDR_32
5850x4B44 US_ALU_EXT_ADDR_33
5860x4B48 US_ALU_EXT_ADDR_34
5870x4B4C US_ALU_EXT_ADDR_35
5880x4B50 US_ALU_EXT_ADDR_36
5890x4B54 US_ALU_EXT_ADDR_37
5900x4B58 US_ALU_EXT_ADDR_38
5910x4B5C US_ALU_EXT_ADDR_39
5920x4B60 US_ALU_EXT_ADDR_40
5930x4B64 US_ALU_EXT_ADDR_41
5940x4B68 US_ALU_EXT_ADDR_42
5950x4B6C US_ALU_EXT_ADDR_43
5960x4B70 US_ALU_EXT_ADDR_44
5970x4B74 US_ALU_EXT_ADDR_45
5980x4B78 US_ALU_EXT_ADDR_46
5990x4B7C US_ALU_EXT_ADDR_47
6000x4B80 US_ALU_EXT_ADDR_48
6010x4B84 US_ALU_EXT_ADDR_49
6020x4B88 US_ALU_EXT_ADDR_50
6030x4B8C US_ALU_EXT_ADDR_51
6040x4B90 US_ALU_EXT_ADDR_52
6050x4B94 US_ALU_EXT_ADDR_53
6060x4B98 US_ALU_EXT_ADDR_54
6070x4B9C US_ALU_EXT_ADDR_55
6080x4BA0 US_ALU_EXT_ADDR_56
6090x4BA4 US_ALU_EXT_ADDR_57
6100x4BA8 US_ALU_EXT_ADDR_58
6110x4BAC US_ALU_EXT_ADDR_59
6120x4BB0 US_ALU_EXT_ADDR_60
6130x4BB4 US_ALU_EXT_ADDR_61
6140x4BB8 US_ALU_EXT_ADDR_62
6150x4BBC US_ALU_EXT_ADDR_63
6160x4BC0 FG_FOG_BLEND
6170x4BC4 FG_FOG_FACTOR
6180x4BC8 FG_FOG_COLOR_R
6190x4BCC FG_FOG_COLOR_G
6200x4BD0 FG_FOG_COLOR_B
6210x4BD4 FG_ALPHA_FUNC
6220x4BD8 FG_DEPTH_SRC
6230x4C00 US_ALU_CONST_R_0
6240x4C04 US_ALU_CONST_G_0
6250x4C08 US_ALU_CONST_B_0
6260x4C0C US_ALU_CONST_A_0
6270x4C10 US_ALU_CONST_R_1
6280x4C14 US_ALU_CONST_G_1
6290x4C18 US_ALU_CONST_B_1
6300x4C1C US_ALU_CONST_A_1
6310x4C20 US_ALU_CONST_R_2
6320x4C24 US_ALU_CONST_G_2
6330x4C28 US_ALU_CONST_B_2
6340x4C2C US_ALU_CONST_A_2
6350x4C30 US_ALU_CONST_R_3
6360x4C34 US_ALU_CONST_G_3
6370x4C38 US_ALU_CONST_B_3
6380x4C3C US_ALU_CONST_A_3
6390x4C40 US_ALU_CONST_R_4
6400x4C44 US_ALU_CONST_G_4
6410x4C48 US_ALU_CONST_B_4
6420x4C4C US_ALU_CONST_A_4
6430x4C50 US_ALU_CONST_R_5
6440x4C54 US_ALU_CONST_G_5
6450x4C58 US_ALU_CONST_B_5
6460x4C5C US_ALU_CONST_A_5
6470x4C60 US_ALU_CONST_R_6
6480x4C64 US_ALU_CONST_G_6
6490x4C68 US_ALU_CONST_B_6
6500x4C6C US_ALU_CONST_A_6
6510x4C70 US_ALU_CONST_R_7
6520x4C74 US_ALU_CONST_G_7
6530x4C78 US_ALU_CONST_B_7
6540x4C7C US_ALU_CONST_A_7
6550x4C80 US_ALU_CONST_R_8
6560x4C84 US_ALU_CONST_G_8
6570x4C88 US_ALU_CONST_B_8
6580x4C8C US_ALU_CONST_A_8
6590x4C90 US_ALU_CONST_R_9
6600x4C94 US_ALU_CONST_G_9
6610x4C98 US_ALU_CONST_B_9
6620x4C9C US_ALU_CONST_A_9
6630x4CA0 US_ALU_CONST_R_10
6640x4CA4 US_ALU_CONST_G_10
6650x4CA8 US_ALU_CONST_B_10
6660x4CAC US_ALU_CONST_A_10
6670x4CB0 US_ALU_CONST_R_11
6680x4CB4 US_ALU_CONST_G_11
6690x4CB8 US_ALU_CONST_B_11
6700x4CBC US_ALU_CONST_A_11
6710x4CC0 US_ALU_CONST_R_12
6720x4CC4 US_ALU_CONST_G_12
6730x4CC8 US_ALU_CONST_B_12
6740x4CCC US_ALU_CONST_A_12
6750x4CD0 US_ALU_CONST_R_13
6760x4CD4 US_ALU_CONST_G_13
6770x4CD8 US_ALU_CONST_B_13
6780x4CDC US_ALU_CONST_A_13
6790x4CE0 US_ALU_CONST_R_14
6800x4CE4 US_ALU_CONST_G_14
6810x4CE8 US_ALU_CONST_B_14
6820x4CEC US_ALU_CONST_A_14
6830x4CF0 US_ALU_CONST_R_15
6840x4CF4 US_ALU_CONST_G_15
6850x4CF8 US_ALU_CONST_B_15
6860x4CFC US_ALU_CONST_A_15
6870x4D00 US_ALU_CONST_R_16
6880x4D04 US_ALU_CONST_G_16
6890x4D08 US_ALU_CONST_B_16
6900x4D0C US_ALU_CONST_A_16
6910x4D10 US_ALU_CONST_R_17
6920x4D14 US_ALU_CONST_G_17
6930x4D18 US_ALU_CONST_B_17
6940x4D1C US_ALU_CONST_A_17
6950x4D20 US_ALU_CONST_R_18
6960x4D24 US_ALU_CONST_G_18
6970x4D28 US_ALU_CONST_B_18
6980x4D2C US_ALU_CONST_A_18
6990x4D30 US_ALU_CONST_R_19
7000x4D34 US_ALU_CONST_G_19
7010x4D38 US_ALU_CONST_B_19
7020x4D3C US_ALU_CONST_A_19
7030x4D40 US_ALU_CONST_R_20
7040x4D44 US_ALU_CONST_G_20
7050x4D48 US_ALU_CONST_B_20
7060x4D4C US_ALU_CONST_A_20
7070x4D50 US_ALU_CONST_R_21
7080x4D54 US_ALU_CONST_G_21
7090x4D58 US_ALU_CONST_B_21
7100x4D5C US_ALU_CONST_A_21
7110x4D60 US_ALU_CONST_R_22
7120x4D64 US_ALU_CONST_G_22
7130x4D68 US_ALU_CONST_B_22
7140x4D6C US_ALU_CONST_A_22
7150x4D70 US_ALU_CONST_R_23
7160x4D74 US_ALU_CONST_G_23
7170x4D78 US_ALU_CONST_B_23
7180x4D7C US_ALU_CONST_A_23
7190x4D80 US_ALU_CONST_R_24
7200x4D84 US_ALU_CONST_G_24
7210x4D88 US_ALU_CONST_B_24
7220x4D8C US_ALU_CONST_A_24
7230x4D90 US_ALU_CONST_R_25
7240x4D94 US_ALU_CONST_G_25
7250x4D98 US_ALU_CONST_B_25
7260x4D9C US_ALU_CONST_A_25
7270x4DA0 US_ALU_CONST_R_26
7280x4DA4 US_ALU_CONST_G_26
7290x4DA8 US_ALU_CONST_B_26
7300x4DAC US_ALU_CONST_A_26
7310x4DB0 US_ALU_CONST_R_27
7320x4DB4 US_ALU_CONST_G_27
7330x4DB8 US_ALU_CONST_B_27
7340x4DBC US_ALU_CONST_A_27
7350x4DC0 US_ALU_CONST_R_28
7360x4DC4 US_ALU_CONST_G_28
7370x4DC8 US_ALU_CONST_B_28
7380x4DCC US_ALU_CONST_A_28
7390x4DD0 US_ALU_CONST_R_29
7400x4DD4 US_ALU_CONST_G_29
7410x4DD8 US_ALU_CONST_B_29
7420x4DDC US_ALU_CONST_A_29
7430x4DE0 US_ALU_CONST_R_30
7440x4DE4 US_ALU_CONST_G_30
7450x4DE8 US_ALU_CONST_B_30
7460x4DEC US_ALU_CONST_A_30
7470x4DF0 US_ALU_CONST_R_31
7480x4DF4 US_ALU_CONST_G_31
7490x4DF8 US_ALU_CONST_B_31
7500x4DFC US_ALU_CONST_A_31
7510x4E04 RB3D_BLENDCNTL_R3
7520x4E08 RB3D_ABLENDCNTL_R3
7530x4E0C RB3D_COLOR_CHANNEL_MASK
7540x4E10 RB3D_CONSTANT_COLOR
7550x4E14 RB3D_COLOR_CLEAR_VALUE
7560x4E18 RB3D_ROPCNTL_R3
7570x4E1C RB3D_CLRCMP_FLIPE_R3
7580x4E20 RB3D_CLRCMP_CLR_R3
7590x4E24 RB3D_CLRCMP_MSK_R3
7600x4E48 RB3D_DEBUG_CTL
7610x4E4C RB3D_DSTCACHE_CTLSTAT_R3
7620x4E50 RB3D_DITHER_CTL
7630x4E54 RB3D_CMASK_OFFSET0
7640x4E58 RB3D_CMASK_OFFSET1
7650x4E5C RB3D_CMASK_OFFSET2
7660x4E60 RB3D_CMASK_OFFSET3
7670x4E64 RB3D_CMASK_PITCH0
7680x4E68 RB3D_CMASK_PITCH1
7690x4E6C RB3D_CMASK_PITCH2
7700x4E70 RB3D_CMASK_PITCH3
7710x4E74 RB3D_CMASK_WRINDEX
7720x4E78 RB3D_CMASK_DWORD
7730x4E7C RB3D_CMASK_RDINDEX
7740x4E80 RB3D_AARESOLVE_OFFSET
7750x4E84 RB3D_AARESOLVE_PITCH
7760x4E88 RB3D_AARESOLVE_CTL
7770x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7780x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7790x4F04 ZB_ZSTENCILCNTL
7800x4F08 ZB_STENCILREFMASK
7810x4F14 ZB_ZTOP
7820x4F18 ZB_ZCACHE_CTLSTAT
7830x4F1C ZB_BW_CNTL
7840x4F28 ZB_DEPTHCLEARVALUE
7850x4F30 ZB_ZMASK_OFFSET
7860x4F34 ZB_ZMASK_PITCH
7870x4F38 ZB_ZMASK_WRINDEX
7880x4F3C ZB_ZMASK_DWORD
7890x4F40 ZB_ZMASK_RDINDEX
7900x4F44 ZB_HIZ_OFFSET
7910x4F48 ZB_HIZ_WRINDEX
7920x4F4C ZB_HIZ_DWORD
7930x4F50 ZB_HIZ_RDINDEX
7940x4F54 ZB_HIZ_PITCH
7950x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 8e3c0b807add..6801b865d1c4 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -153,7 +153,7 @@ rs600 0x6d40
1530x42A4 SU_POLY_OFFSET_FRONT_SCALE 1530x42A4 SU_POLY_OFFSET_FRONT_SCALE
1540x42A8 SU_POLY_OFFSET_FRONT_OFFSET 1540x42A8 SU_POLY_OFFSET_FRONT_OFFSET
1550x42AC SU_POLY_OFFSET_BACK_SCALE 1550x42AC SU_POLY_OFFSET_BACK_SCALE
1560x42B0 SU_POLY_OFFSET_BACK_OFFSET 1560x42B0 SU_POLY_OFFSET_BACK_OFFSET
1570x42B4 SU_POLY_OFFSET_ENABLE 1570x42B4 SU_POLY_OFFSET_ENABLE
1580x42B8 SU_CULL_MODE 1580x42B8 SU_CULL_MODE
1590x42C0 SU_DEPTH_SCALE 1590x42C0 SU_DEPTH_SCALE
@@ -291,6 +291,8 @@ rs600 0x6d40
2910x46AC US_OUT_FMT_2 2910x46AC US_OUT_FMT_2
2920x46B0 US_OUT_FMT_3 2920x46B0 US_OUT_FMT_3
2930x46B4 US_W_FMT 2930x46B4 US_W_FMT
2940x46B8 US_CODE_BANK
2950x46BC US_CODE_EXT
2940x46C0 US_ALU_RGB_ADDR_0 2960x46C0 US_ALU_RGB_ADDR_0
2950x46C4 US_ALU_RGB_ADDR_1 2970x46C4 US_ALU_RGB_ADDR_1
2960x46C8 US_ALU_RGB_ADDR_2 2980x46C8 US_ALU_RGB_ADDR_2
@@ -547,6 +549,70 @@ rs600 0x6d40
5470x4AB4 US_ALU_ALPHA_INST_61 5490x4AB4 US_ALU_ALPHA_INST_61
5480x4AB8 US_ALU_ALPHA_INST_62 5500x4AB8 US_ALU_ALPHA_INST_62
5490x4ABC US_ALU_ALPHA_INST_63 5510x4ABC US_ALU_ALPHA_INST_63
5520x4AC0 US_ALU_EXT_ADDR_0
5530x4AC4 US_ALU_EXT_ADDR_1
5540x4AC8 US_ALU_EXT_ADDR_2
5550x4ACC US_ALU_EXT_ADDR_3
5560x4AD0 US_ALU_EXT_ADDR_4
5570x4AD4 US_ALU_EXT_ADDR_5
5580x4AD8 US_ALU_EXT_ADDR_6
5590x4ADC US_ALU_EXT_ADDR_7
5600x4AE0 US_ALU_EXT_ADDR_8
5610x4AE4 US_ALU_EXT_ADDR_9
5620x4AE8 US_ALU_EXT_ADDR_10
5630x4AEC US_ALU_EXT_ADDR_11
5640x4AF0 US_ALU_EXT_ADDR_12
5650x4AF4 US_ALU_EXT_ADDR_13
5660x4AF8 US_ALU_EXT_ADDR_14
5670x4AFC US_ALU_EXT_ADDR_15
5680x4B00 US_ALU_EXT_ADDR_16
5690x4B04 US_ALU_EXT_ADDR_17
5700x4B08 US_ALU_EXT_ADDR_18
5710x4B0C US_ALU_EXT_ADDR_19
5720x4B10 US_ALU_EXT_ADDR_20
5730x4B14 US_ALU_EXT_ADDR_21
5740x4B18 US_ALU_EXT_ADDR_22
5750x4B1C US_ALU_EXT_ADDR_23
5760x4B20 US_ALU_EXT_ADDR_24
5770x4B24 US_ALU_EXT_ADDR_25
5780x4B28 US_ALU_EXT_ADDR_26
5790x4B2C US_ALU_EXT_ADDR_27
5800x4B30 US_ALU_EXT_ADDR_28
5810x4B34 US_ALU_EXT_ADDR_29
5820x4B38 US_ALU_EXT_ADDR_30
5830x4B3C US_ALU_EXT_ADDR_31
5840x4B40 US_ALU_EXT_ADDR_32
5850x4B44 US_ALU_EXT_ADDR_33
5860x4B48 US_ALU_EXT_ADDR_34
5870x4B4C US_ALU_EXT_ADDR_35
5880x4B50 US_ALU_EXT_ADDR_36
5890x4B54 US_ALU_EXT_ADDR_37
5900x4B58 US_ALU_EXT_ADDR_38
5910x4B5C US_ALU_EXT_ADDR_39
5920x4B60 US_ALU_EXT_ADDR_40
5930x4B64 US_ALU_EXT_ADDR_41
5940x4B68 US_ALU_EXT_ADDR_42
5950x4B6C US_ALU_EXT_ADDR_43
5960x4B70 US_ALU_EXT_ADDR_44
5970x4B74 US_ALU_EXT_ADDR_45
5980x4B78 US_ALU_EXT_ADDR_46
5990x4B7C US_ALU_EXT_ADDR_47
6000x4B80 US_ALU_EXT_ADDR_48
6010x4B84 US_ALU_EXT_ADDR_49
6020x4B88 US_ALU_EXT_ADDR_50
6030x4B8C US_ALU_EXT_ADDR_51
6040x4B90 US_ALU_EXT_ADDR_52
6050x4B94 US_ALU_EXT_ADDR_53
6060x4B98 US_ALU_EXT_ADDR_54
6070x4B9C US_ALU_EXT_ADDR_55
6080x4BA0 US_ALU_EXT_ADDR_56
6090x4BA4 US_ALU_EXT_ADDR_57
6100x4BA8 US_ALU_EXT_ADDR_58
6110x4BAC US_ALU_EXT_ADDR_59
6120x4BB0 US_ALU_EXT_ADDR_60
6130x4BB4 US_ALU_EXT_ADDR_61
6140x4BB8 US_ALU_EXT_ADDR_62
6150x4BBC US_ALU_EXT_ADDR_63
5500x4BC0 FG_FOG_BLEND 6160x4BC0 FG_FOG_BLEND
5510x4BC4 FG_FOG_FACTOR 6170x4BC4 FG_FOG_FACTOR
5520x4BC8 FG_FOG_COLOR_R 6180x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 0102a0d5735c..38abf63bf2cd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -161,7 +161,12 @@ rv515 0x6d40
1610x401C GB_SELECT 1610x401C GB_SELECT
1620x4020 GB_AA_CONFIG 1620x4020 GB_AA_CONFIG
1630x4024 GB_FIFO_SIZE 1630x4024 GB_FIFO_SIZE
1640x4028 GB_Z_PEQ_CONFIG
1640x4100 TX_INVALTAGS 1650x4100 TX_INVALTAGS
1660x4114 SU_TEX_WRAP_PS3
1670x4118 PS3_ENABLE
1680x411c PS3_VTX_FMT
1690x4120 PS3_TEX_SOURCE
1650x4200 GA_POINT_S0 1700x4200 GA_POINT_S0
1660x4204 GA_POINT_T0 1710x4204 GA_POINT_T0
1670x4208 GA_POINT_S1 1720x4208 GA_POINT_S1
@@ -171,6 +176,7 @@ rv515 0x6d40
1710x4230 GA_POINT_MINMAX 1760x4230 GA_POINT_MINMAX
1720x4234 GA_LINE_CNTL 1770x4234 GA_LINE_CNTL
1730x4238 GA_LINE_STIPPLE_CONFIG 1780x4238 GA_LINE_STIPPLE_CONFIG
1790x4258 GA_COLOR_CONTROL_PS3
1740x4260 GA_LINE_STIPPLE_VALUE 1800x4260 GA_LINE_STIPPLE_VALUE
1750x4264 GA_LINE_S0 1810x4264 GA_LINE_S0
1760x4268 GA_LINE_S1 1820x4268 GA_LINE_S1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 368415df5f3a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -356,6 +372,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; 372 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
357 rdev->mc.gtt_location = 0xFFFFFFFFUL; 373 rdev->mc.gtt_location = 0xFFFFFFFFUL;
358 r = radeon_mc_setup(rdev); 374 r = radeon_mc_setup(rdev);
375 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
359 if (r) 376 if (r)
360 return r; 377 return r;
361 return 0; 378 return 0;
@@ -369,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
369 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
370 387
371 /* Wait for mc idle */ 388 /* Wait for mc idle */
372 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
373 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
374 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
375 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
376 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -395,6 +412,7 @@ static int rs400_startup(struct radeon_device *rdev)
395 return r; 412 return r;
396 /* Enable IRQ */ 413 /* Enable IRQ */
397 r100_irq_set(rdev); 414 r100_irq_set(rdev);
415 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
398 /* 1M ring buffer */ 416 /* 1M ring buffer */
399 r = r100_cp_init(rdev, 1024 * 1024); 417 r = r100_cp_init(rdev, 1024 * 1024);
400 if (r) { 418 if (r) {
@@ -446,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
446 464
447void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
448{ 466{
449 rs400_suspend(rdev);
450 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
451 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
452 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -525,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
525 if (r) { 542 if (r) {
526 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
527 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
528 rs400_suspend(rdev);
529 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
530 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
531 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea4260572..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; 56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL; 57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev); 58 r = radeon_mc_setup(rdev);
59 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
59 if (r) 60 if (r)
60 return r; 61 return r;
61 return 0; 62 return 0;
@@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
134 break; 135 break;
135 } 136 }
136 } 137 }
137 rs600_irq_set(rdev); 138 if (rdev->irq.installed)
139 rs600_irq_set(rdev);
138} 140}
139 141
140void rs600_hpd_fini(struct radeon_device *rdev) 142void rs600_hpd_fini(struct radeon_device *rdev)
@@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev)
315 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 317 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
316 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 318 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
317 319
320 if (!rdev->irq.installed) {
321 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
322 WREG32(R_000040_GEN_INT_CNTL, 0);
323 return -EINVAL;
324 }
318 if (rdev->irq.sw_int) { 325 if (rdev->irq.sw_int) {
319 tmp |= S_000040_SW_INT_EN(1); 326 tmp |= S_000040_SW_INT_EN(1);
320 } 327 }
@@ -396,7 +403,7 @@ int rs600_irq_process(struct radeon_device *rdev)
396 } 403 }
397 while (status || r500_disp_int) { 404 while (status || r500_disp_int) {
398 /* SW interrupt */ 405 /* SW interrupt */
399 if (G_000040_SW_INT_EN(status)) 406 if (G_000044_SW_INT(status))
400 radeon_fence_process(rdev); 407 radeon_fence_process(rdev);
401 /* Vertical blank interrupts */ 408 /* Vertical blank interrupts */
402 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) 409 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
@@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev)
553 return r; 560 return r;
554 /* Enable IRQ */ 561 /* Enable IRQ */
555 rs600_irq_set(rdev); 562 rs600_irq_set(rdev);
563 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
556 /* 1M ring buffer */ 564 /* 1M ring buffer */
557 r = r100_cp_init(rdev, 1024 * 1024); 565 r = r100_cp_init(rdev, 1024 * 1024);
558 if (r) { 566 if (r) {
@@ -602,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
602 610
603void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
604{ 612{
605 rs600_suspend(rdev);
606 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
607 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
608 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -681,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
681 if (r) { 688 if (r) {
682 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
683 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
684 rs600_suspend(rdev);
685 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
686 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
687 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1e22f52d6039..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev)
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; 172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL; 173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev); 174 r = radeon_mc_setup(rdev);
175 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
175 if (r) 176 if (r)
176 return r; 177 return r;
177 return 0; 178 return 0;
@@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
625 return r; 626 return r;
626 /* Enable IRQ */ 627 /* Enable IRQ */
627 rs600_irq_set(rdev); 628 rs600_irq_set(rdev);
629 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
628 /* 1M ring buffer */ 630 /* 1M ring buffer */
629 r = r100_cp_init(rdev, 1024 * 1024); 631 r = r100_cp_init(rdev, 1024 * 1024);
630 if (r) { 632 if (r) {
@@ -674,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
674 676
675void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
676{ 678{
677 rs690_suspend(rdev);
678 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
679 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
680 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -754,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
754 if (r) { 755 if (r) {
755 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
756 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
757 rs690_suspend(rdev);
758 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
759 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
760 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 59632a506b46..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
479 } 479 }
480 /* Enable IRQ */ 480 /* Enable IRQ */
481 rs600_irq_set(rdev); 481 rs600_irq_set(rdev);
482 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
482 /* 1M ring buffer */ 483 /* 1M ring buffer */
483 r = r100_cp_init(rdev, 1024 * 1024); 484 r = r100_cp_init(rdev, 1024 * 1024);
484 if (r) { 485 if (r) {
@@ -536,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
536 537
537void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
538{ 539{
539 rv515_suspend(rdev);
540 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
541 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
542 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -614,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
614 if (r) { 614 if (r) {
615 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
616 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
617 rv515_suspend(rdev);
618 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
619 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
620 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
621 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
622 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
623 radeon_irq_kms_fini(rdev);
624 rdev->accel_working = false; 623 rdev->accel_working = false;
625 } 624 }
626 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3bcb66e52786..03021674d097 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 549
550 gb_tiling_config |= BANK_SWAPS(1); 550 gb_tiling_config |= BANK_SWAPS(1);
551 551
552 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 552 if (rdev->family == CHIP_RV740)
553 rdev->config.rv770.max_backends, 553 backend_map = 0x28;
554 (0xff << rdev->config.rv770.max_backends) & 0xff); 554 else
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
555 gb_tiling_config |= BACKEND_MAP(backend_map); 558 gb_tiling_config |= BACKEND_MAP(backend_map);
556 559
557 cc_gc_shader_pipe_config = 560 cc_gc_shader_pipe_config =
@@ -779,7 +782,6 @@ int rv770_mc_init(struct radeon_device *rdev)
779 fixed20_12 a; 782 fixed20_12 a;
780 u32 tmp; 783 u32 tmp;
781 int chansize, numchan; 784 int chansize, numchan;
782 int r;
783 785
784 /* Get VRAM informations */ 786 /* Get VRAM informations */
785 rdev->mc.vram_is_ddr = true; 787 rdev->mc.vram_is_ddr = true;
@@ -822,9 +824,6 @@ int rv770_mc_init(struct radeon_device *rdev)
822 rdev->mc.real_vram_size = rdev->mc.aper_size; 824 rdev->mc.real_vram_size = rdev->mc.aper_size;
823 825
824 if (rdev->flags & RADEON_IS_AGP) { 826 if (rdev->flags & RADEON_IS_AGP) {
825 r = radeon_agp_init(rdev);
826 if (r)
827 return r;
828 /* gtt_size is setup by radeon_agp_init */ 827 /* gtt_size is setup by radeon_agp_init */
829 rdev->mc.gtt_location = rdev->mc.agp_base; 828 rdev->mc.gtt_location = rdev->mc.agp_base;
830 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 829 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -891,26 +890,25 @@ static int rv770_startup(struct radeon_device *rdev)
891 return r; 890 return r;
892 } 891 }
893 rv770_gpu_init(rdev); 892 rv770_gpu_init(rdev);
894 893 r = r600_blit_init(rdev);
895 if (!rdev->r600_blit.shader_obj) { 894 if (r) {
896 r = r600_blit_init(rdev); 895 r600_blit_fini(rdev);
896 rdev->asic->copy = NULL;
897 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
898 }
899 /* pin copy shader into vram */
900 if (rdev->r600_blit.shader_obj) {
901 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
902 if (unlikely(r != 0))
903 return r;
904 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
905 &rdev->r600_blit.shader_gpu_addr);
906 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
897 if (r) { 907 if (r) {
898 DRM_ERROR("radeon: failed blitter (%d).\n", r); 908 DRM_ERROR("failed to pin blit object %d\n", r);
899 return r; 909 return r;
900 } 910 }
901 } 911 }
902
903 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
904 if (unlikely(r != 0))
905 return r;
906 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
907 &rdev->r600_blit.shader_gpu_addr);
908 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
909 if (r) {
910 DRM_ERROR("failed to pin blit object %d\n", r);
911 return r;
912 }
913
914 /* Enable IRQ */ 912 /* Enable IRQ */
915 r = r600_irq_init(rdev); 913 r = r600_irq_init(rdev);
916 if (r) { 914 if (r) {
@@ -972,13 +970,16 @@ int rv770_suspend(struct radeon_device *rdev)
972 /* FIXME: we should wait for ring to be empty */ 970 /* FIXME: we should wait for ring to be empty */
973 r700_cp_stop(rdev); 971 r700_cp_stop(rdev);
974 rdev->cp.ready = false; 972 rdev->cp.ready = false;
973 r600_irq_suspend(rdev);
975 r600_wb_disable(rdev); 974 r600_wb_disable(rdev);
976 rv770_pcie_gart_disable(rdev); 975 rv770_pcie_gart_disable(rdev);
977 /* unpin shaders bo */ 976 /* unpin shaders bo */
978 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 977 if (rdev->r600_blit.shader_obj) {
979 if (likely(r == 0)) { 978 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
980 radeon_bo_unpin(rdev->r600_blit.shader_obj); 979 if (likely(r == 0)) {
981 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 980 radeon_bo_unpin(rdev->r600_blit.shader_obj);
981 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
982 }
982 } 983 }
983 return 0; 984 return 0;
984} 985}
@@ -1037,6 +1038,11 @@ int rv770_init(struct radeon_device *rdev)
1037 r = radeon_fence_driver_init(rdev); 1038 r = radeon_fence_driver_init(rdev);
1038 if (r) 1039 if (r)
1039 return r; 1040 return r;
1041 if (rdev->flags & RADEON_IS_AGP) {
1042 r = radeon_agp_init(rdev);
1043 if (r)
1044 radeon_agp_disable(rdev);
1045 }
1040 r = rv770_mc_init(rdev); 1046 r = rv770_mc_init(rdev);
1041 if (r) 1047 if (r)
1042 return r; 1048 return r;
@@ -1062,22 +1068,25 @@ int rv770_init(struct radeon_device *rdev)
1062 rdev->accel_working = true; 1068 rdev->accel_working = true;
1063 r = rv770_startup(rdev); 1069 r = rv770_startup(rdev);
1064 if (r) { 1070 if (r) {
1065 rv770_suspend(rdev); 1071 dev_err(rdev->dev, "disabling GPU acceleration\n");
1072 r600_cp_fini(rdev);
1066 r600_wb_fini(rdev); 1073 r600_wb_fini(rdev);
1067 radeon_ring_fini(rdev); 1074 r600_irq_fini(rdev);
1075 radeon_irq_kms_fini(rdev);
1068 rv770_pcie_gart_fini(rdev); 1076 rv770_pcie_gart_fini(rdev);
1069 rdev->accel_working = false; 1077 rdev->accel_working = false;
1070 } 1078 }
1071 if (rdev->accel_working) { 1079 if (rdev->accel_working) {
1072 r = radeon_ib_pool_init(rdev); 1080 r = radeon_ib_pool_init(rdev);
1073 if (r) { 1081 if (r) {
1074 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1082 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1075 rdev->accel_working = false;
1076 }
1077 r = r600_ib_test(rdev);
1078 if (r) {
1079 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1080 rdev->accel_working = false; 1083 rdev->accel_working = false;
1084 } else {
1085 r = r600_ib_test(rdev);
1086 if (r) {
1087 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1088 rdev->accel_working = false;
1089 }
1081 } 1090 }
1082 } 1091 }
1083 return 0; 1092 return 0;
@@ -1085,19 +1094,16 @@ int rv770_init(struct radeon_device *rdev)
1085 1094
1086void rv770_fini(struct radeon_device *rdev) 1095void rv770_fini(struct radeon_device *rdev)
1087{ 1096{
1088 rv770_suspend(rdev);
1089
1090 r600_blit_fini(rdev); 1097 r600_blit_fini(rdev);
1098 r600_cp_fini(rdev);
1099 r600_wb_fini(rdev);
1091 r600_irq_fini(rdev); 1100 r600_irq_fini(rdev);
1092 radeon_irq_kms_fini(rdev); 1101 radeon_irq_kms_fini(rdev);
1093 radeon_ring_fini(rdev);
1094 r600_wb_fini(rdev);
1095 rv770_pcie_gart_fini(rdev); 1102 rv770_pcie_gart_fini(rdev);
1096 radeon_gem_fini(rdev); 1103 radeon_gem_fini(rdev);
1097 radeon_fence_driver_fini(rdev); 1104 radeon_fence_driver_fini(rdev);
1098 radeon_clocks_fini(rdev); 1105 radeon_clocks_fini(rdev);
1099 if (rdev->flags & RADEON_IS_AGP) 1106 radeon_agp_fini(rdev);
1100 radeon_agp_fini(rdev);
1101 radeon_bo_fini(rdev); 1107 radeon_bo_fini(rdev);
1102 radeon_atombios_fini(rdev); 1108 radeon_atombios_fini(rdev);
1103 kfree(rdev->bios); 1109 kfree(rdev->bios);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
426 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
429 } 429 } else
430 bo->offset = 0;
430 431
431 return 0; 432 return 0;
432 433
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{ 525{
525 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
526 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
527 struct list_head *list, *next; 528 int ret = 0;
528 int ret;
529 529
530 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
531 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
532 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
533 nentry = NULL;
534 533
535 /* 534 entry = list_first_entry(&bdev->ddestroy,
536 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
537 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
538 */
539 537
540 if (next != &bdev->ddestroy) { 538 for (;;) {
541 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
542 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
543 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
544 } 545 }
545 kref_get(&entry->list_kref);
546 546
547 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
548 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
549 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
550 554
551 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
552 if (nentry) { 556 if (list_empty(&entry->ddestroy))
553 bool next_onlist = !list_empty(next);
554 spin_unlock(&glob->lru_lock);
555 kref_put(&nentry->list_kref, ttm_bo_release_list);
556 spin_lock(&glob->lru_lock);
557 /*
558 * Someone might have raced us and removed the
559 * next entry from the list. We don't bother restarting
560 * list traversal.
561 */
562
563 if (!next_onlist)
564 break;
565 }
566 if (ret)
567 break; 557 break;
568 } 558 }
569 ret = !list_empty(&bdev->ddestroy);
570 spin_unlock(&glob->lru_lock);
571 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
572 return ret; 565 return ret;
573} 566}
574 567
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
951 ~TTM_PL_MASK_MEMTYPE); 944 ~TTM_PL_MASK_MEMTYPE);
952 945
946
947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
951 return 0;
952 }
953
953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
954 interruptible, no_wait); 955 interruptible, no_wait);
955 if (ret == 0 && mem->mm_node) { 956 if (ret == 0 && mem->mm_node) {
@@ -1019,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1019 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1020{ 1021{
1021 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1022 1029
1023 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1024 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
@@ -1844,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1844 * anyone tries to access a ttm page. 1851 * anyone tries to access a ttm page.
1845 */ 1852 */
1846 1853
1854 if (bo->bdev->driver->swap_notify)
1855 bo->bdev->driver->swap_notify(bo);
1856
1847 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1857 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1848out: 1858out:
1849 1859
@@ -1864,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1874 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1865 ; 1875 ;
1866} 1876}
1877EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f6..5ca37a58a98c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53{ 53{
54 struct ttm_tt *ttm = bo->ttm; 54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret; 56 int ret;
58 57
59 if (old_mem->mem_type != TTM_PL_SYSTEM) { 58 if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 61 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM); 62 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM; 63 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 } 64 }
67 65
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 66 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
77 75
78 *old_mem = *new_mem; 76 *old_mem = *new_mem;
79 new_mem->mm_node = NULL; 77 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 78
81 return 0; 79 return 0;
82} 80}
83EXPORT_SYMBOL(ttm_bo_move_ttm); 81EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
219 void *old_iomap; 217 void *old_iomap;
220 void *new_iomap; 218 void *new_iomap;
221 int ret; 219 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i; 220 unsigned long i;
224 unsigned long page; 221 unsigned long page;
225 unsigned long add = 0; 222 unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
270 267
271 *old_mem = *new_mem; 268 *old_mem = *new_mem;
272 new_mem->mm_node = NULL; 269 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274 270
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 271 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm); 272 ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
537 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 533 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
538 struct ttm_mem_reg *old_mem = &bo->mem; 534 struct ttm_mem_reg *old_mem = &bo->mem;
539 int ret; 535 int ret;
540 uint32_t save_flags = old_mem->placement;
541 struct ttm_buffer_object *ghost_obj; 536 struct ttm_buffer_object *ghost_obj;
542 void *tmp_obj = NULL; 537 void *tmp_obj = NULL;
543 538
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
598 593
599 *old_mem = *new_mem; 594 *old_mem = *new_mem;
600 new_mem->mm_node = NULL; 595 new_mem->mm_node = NULL;
601 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 596
602 return 0; 597 return 0;
603} 598}
604EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 599EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
288 wake_up_all(&lock->queue); 288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock); 289 spin_unlock(&lock->lock);
290} 290}
291EXPORT_SYMBOL(ttm_suspend_unlock);
291 292
292static bool __ttm_suspend_lock(struct ttm_lock *lock) 293static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{ 294{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
309{ 310{
310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 wait_event(lock->queue, __ttm_suspend_lock(lock));
311} 312}
313EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824b..75e9d6f86ba4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
109 struct drm_hash_item hash; 109 struct drm_hash_item hash;
110 struct list_head head; 110 struct list_head head;
111 struct kref kref; 111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type; 112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile; 114 struct ttm_object_file *tfile;
115}; 115};
116 116
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba5..3d47a2c12322 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,23 +196,34 @@ EXPORT_SYMBOL(ttm_tt_populate);
196 196
197#ifdef CONFIG_X86 197#ifdef CONFIG_X86
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_old,
200 enum ttm_caching_state c_new)
200{ 201{
202 int ret = 0;
203
201 if (PageHighMem(p)) 204 if (PageHighMem(p))
202 return 0; 205 return 0;
203 206
204 switch (c_state) { 207 if (c_old != tt_cached) {
205 case tt_cached: 208 /* p isn't in the default caching state, set it to
206 return set_pages_wb(p, 1); 209 * writeback first to free its current memtype. */
207 case tt_wc: 210
208 return set_memory_wc((unsigned long) page_address(p), 1); 211 ret = set_pages_wb(p, 1);
209 default: 212 if (ret)
210 return set_pages_uc(p, 1); 213 return ret;
211 } 214 }
215
216 if (c_new == tt_wc)
217 ret = set_memory_wc((unsigned long) page_address(p), 1);
218 else if (c_new == tt_uncached)
219 ret = set_pages_uc(p, 1);
220
221 return ret;
212} 222}
213#else /* CONFIG_X86 */ 223#else /* CONFIG_X86 */
214static inline int ttm_tt_set_page_caching(struct page *p, 224static inline int ttm_tt_set_page_caching(struct page *p,
215 enum ttm_caching_state c_state) 225 enum ttm_caching_state c_old,
226 enum ttm_caching_state c_new)
216{ 227{
217 return 0; 228 return 0;
218} 229}
@@ -245,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
245 for (i = 0; i < ttm->num_pages; ++i) { 256 for (i = 0; i < ttm->num_pages; ++i) {
246 cur_page = ttm->pages[i]; 257 cur_page = ttm->pages[i];
247 if (likely(cur_page != NULL)) { 258 if (likely(cur_page != NULL)) {
248 ret = ttm_tt_set_page_caching(cur_page, c_state); 259 ret = ttm_tt_set_page_caching(cur_page,
260 ttm->caching_state,
261 c_state);
249 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
250 goto out_err; 263 goto out_err;
251 } 264 }
@@ -259,7 +272,7 @@ out_err:
259 for (j = 0; j < i; ++j) { 272 for (j = 0; j < i; ++j) {
260 cur_page = ttm->pages[j]; 273 cur_page = ttm->pages[j];
261 if (likely(cur_page != NULL)) { 274 if (likely(cur_page != NULL)) {
262 (void)ttm_tt_set_page_caching(cur_page, 275 (void)ttm_tt_set_page_caching(cur_page, c_state,
263 ttm->caching_state); 276 ttm->caching_state);
264 } 277 }
265 } 278 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index d6f2d2b882e9..825ebe3d89d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 48 .busy_placement = &vram_placement_flags
49}; 49};
50 50
51struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &sys_placement_flags
58};
59
51struct ttm_placement vmw_vram_ne_placement = { 60struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0, 61 .fpfn = 0,
53 .lpfn = 0, 62 .lpfn = 0,
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
172 return 0; 181 return 0;
173} 182}
174 183
184static void vmw_move_notify(struct ttm_buffer_object *bo,
185 struct ttm_mem_reg *new_mem)
186{
187 if (new_mem->mem_type != TTM_PL_SYSTEM)
188 vmw_dmabuf_gmr_unbind(bo);
189}
190
191static void vmw_swap_notify(struct ttm_buffer_object *bo)
192{
193 vmw_dmabuf_gmr_unbind(bo);
194}
195
175/** 196/**
176 * FIXME: We're using the old vmware polling method to sync. 197 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead. 198 * Do this with fences instead.
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = {
225 .sync_obj_wait = vmw_sync_obj_wait, 246 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush, 247 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref, 248 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref 249 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify
229}; 252};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1db1ef30be2b..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx";
147 147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *); 149static void vmw_master_init(struct vmw_master *);
150static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
151 void *ptr);
150 152
151static void vmw_print_capabilities(uint32_t capabilities) 153static void vmw_print_capabilities(uint32_t capabilities)
152{ 154{
@@ -207,6 +209,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
207{ 209{
208 struct vmw_private *dev_priv; 210 struct vmw_private *dev_priv;
209 int ret; 211 int ret;
212 uint32_t svga_id;
210 213
211 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 214 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
212 if (unlikely(dev_priv == NULL)) { 215 if (unlikely(dev_priv == NULL)) {
@@ -217,6 +220,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
217 220
218 dev_priv->dev = dev; 221 dev_priv->dev = dev;
219 dev_priv->vmw_chipset = chipset; 222 dev_priv->vmw_chipset = chipset;
223 dev_priv->last_read_sequence = (uint32_t) -100;
220 mutex_init(&dev_priv->hw_mutex); 224 mutex_init(&dev_priv->hw_mutex);
221 mutex_init(&dev_priv->cmdbuf_mutex); 225 mutex_init(&dev_priv->cmdbuf_mutex);
222 rwlock_init(&dev_priv->resource_lock); 226 rwlock_init(&dev_priv->resource_lock);
@@ -236,6 +240,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
236 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 240 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
237 241
238 mutex_lock(&dev_priv->hw_mutex); 242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
245 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
246 if (svga_id != SVGA_ID_2) {
247 ret = -ENOSYS;
248 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
249 mutex_unlock(&dev_priv->hw_mutex);
250 goto out_err0;
251 }
252
239 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 253 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
240 254
241 if (dev_priv->capabilities & SVGA_CAP_GMR) { 255 if (dev_priv->capabilities & SVGA_CAP_GMR) {
@@ -334,22 +348,24 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
334 */ 348 */
335 349
336 DRM_INFO("It appears like vesafb is loaded. " 350 DRM_INFO("It appears like vesafb is loaded. "
337 "Ignore above error if any. Entering stealth mode.\n"); 351 "Ignore above error if any.\n");
338 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
339 if (unlikely(ret != 0)) { 353 if (unlikely(ret != 0)) {
340 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
341 goto out_no_device; 355 goto out_no_device;
342 } 356 }
343 vmw_kms_init(dev_priv);
344 vmw_overlay_init(dev_priv);
345 } else {
346 ret = vmw_request_device(dev_priv);
347 if (unlikely(ret != 0))
348 goto out_no_device;
349 vmw_kms_init(dev_priv);
350 vmw_overlay_init(dev_priv);
351 vmw_fb_init(dev_priv);
352 } 357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
364
365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
366 register_pm_notifier(&dev_priv->pm_nb);
367
368 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
353 369
354 return 0; 370 return 0;
355 371
@@ -385,17 +401,17 @@ static int vmw_driver_unload(struct drm_device *dev)
385 401
386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 402 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
387 403
388 if (!dev_priv->stealth) { 404 unregister_pm_notifier(&dev_priv->pm_nb);
389 vmw_fb_close(dev_priv); 405
390 vmw_kms_close(dev_priv); 406 vmw_fb_close(dev_priv);
391 vmw_overlay_close(dev_priv); 407 vmw_kms_close(dev_priv);
392 vmw_release_device(dev_priv); 408 vmw_overlay_close(dev_priv);
393 pci_release_regions(dev->pdev); 409 vmw_release_device(dev_priv);
394 } else { 410 if (dev_priv->stealth)
395 vmw_kms_close(dev_priv);
396 vmw_overlay_close(dev_priv);
397 pci_release_region(dev->pdev, 2); 411 pci_release_region(dev->pdev, 2);
398 } 412 else
413 pci_release_regions(dev->pdev);
414
399 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
400 drm_irq_uninstall(dev_priv->dev); 416 drm_irq_uninstall(dev_priv->dev);
401 if (dev->devname == vmw_devname) 417 if (dev->devname == vmw_devname)
@@ -564,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
564 int ret = 0; 580 int ret = 0;
565 581
566 DRM_INFO("Master set.\n"); 582 DRM_INFO("Master set.\n");
567 if (dev_priv->stealth) {
568 ret = vmw_request_device(dev_priv);
569 if (unlikely(ret != 0))
570 return ret;
571 }
572 583
573 if (active) { 584 if (active) {
574 BUG_ON(active != &dev_priv->fbdev_master); 585 BUG_ON(active != &dev_priv->fbdev_master);
@@ -628,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
628 639
629 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
630 641
631 if (dev_priv->stealth) {
632 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
633 if (unlikely(ret != 0))
634 DRM_ERROR("Unable to clean VRAM on master drop.\n");
635 vmw_release_device(dev_priv);
636 }
637 dev_priv->active_master = &dev_priv->fbdev_master; 642 dev_priv->active_master = &dev_priv->fbdev_master;
638 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
639 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
640 645
641 if (!dev_priv->stealth) 646 vmw_fb_on(dev_priv);
642 vmw_fb_on(dev_priv);
643} 647}
644 648
645 649
@@ -650,6 +654,57 @@ static void vmw_remove(struct pci_dev *pdev)
650 drm_put_dev(dev); 654 drm_put_dev(dev);
651} 655}
652 656
657static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
658 void *ptr)
659{
660 struct vmw_private *dev_priv =
661 container_of(nb, struct vmw_private, pm_nb);
662 struct vmw_master *vmaster = dev_priv->active_master;
663
664 switch (val) {
665 case PM_HIBERNATION_PREPARE:
666 case PM_SUSPEND_PREPARE:
667 ttm_suspend_lock(&vmaster->lock);
668
669 /**
670 * This empties VRAM and unbinds all GMR bindings.
671 * Buffer contents is moved to swappable memory.
672 */
673 ttm_bo_swapout_all(&dev_priv->bdev);
674 break;
675 case PM_POST_HIBERNATION:
676 case PM_POST_SUSPEND:
677 ttm_suspend_unlock(&vmaster->lock);
678 break;
679 case PM_RESTORE_PREPARE:
680 break;
681 case PM_POST_RESTORE:
682 break;
683 default:
684 break;
685 }
686 return 0;
687}
688
689/**
690 * These might not be needed with the virtual SVGA device.
691 */
692
693int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
694{
695 pci_save_state(pdev);
696 pci_disable_device(pdev);
697 pci_set_power_state(pdev, PCI_D3hot);
698 return 0;
699}
700
701int vmw_pci_resume(struct pci_dev *pdev)
702{
703 pci_set_power_state(pdev, PCI_D0);
704 pci_restore_state(pdev);
705 return pci_enable_device(pdev);
706}
707
653static struct drm_driver driver = { 708static struct drm_driver driver = {
654 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 709 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
655 DRIVER_MODESET, 710 DRIVER_MODESET,
@@ -689,7 +744,9 @@ static struct drm_driver driver = {
689 .name = VMWGFX_DRIVER_NAME, 744 .name = VMWGFX_DRIVER_NAME,
690 .id_table = vmw_pci_id_list, 745 .id_table = vmw_pci_id_list,
691 .probe = vmw_probe, 746 .probe = vmw_probe,
692 .remove = vmw_remove 747 .remove = vmw_remove,
748 .suspend = vmw_pci_suspend,
749 .resume = vmw_pci_resume
693 }, 750 },
694 .name = VMWGFX_DRIVER_NAME, 751 .name = VMWGFX_DRIVER_NAME,
695 .desc = VMWGFX_DRIVER_DESC, 752 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e61bd85b6975..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,16 +32,17 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "vmwgfx_drm.h" 33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h" 34#include "drm_hashtab.h"
35#include "linux/suspend.h"
35#include "ttm/ttm_bo_driver.h" 36#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h" 37#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h" 38#include "ttm/ttm_lock.h"
38#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
39#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
40 41
41#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
42#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
43#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
44#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
45#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
46#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
47#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -95,6 +96,8 @@ struct vmw_surface {
95 struct drm_vmw_size *sizes; 96 struct drm_vmw_size *sizes;
96 uint32_t num_sizes; 97 uint32_t num_sizes;
97 98
99 bool scanout;
100
98 /* TODO so far just a extra pointer */ 101 /* TODO so far just a extra pointer */
99 struct vmw_cursor_snooper snooper; 102 struct vmw_cursor_snooper snooper;
100}; 103};
@@ -110,6 +113,7 @@ struct vmw_fifo_state {
110 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
111 bool using_bounce_buffer; 114 bool using_bounce_buffer;
112 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
113 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
114}; 118};
115 119
@@ -210,7 +214,7 @@ struct vmw_private {
210 * Fencing and IRQs. 214 * Fencing and IRQs.
211 */ 215 */
212 216
213 uint32_t fence_seq; 217 atomic_t fence_seq;
214 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
215 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
216 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
@@ -258,6 +262,7 @@ struct vmw_private {
258 262
259 struct vmw_master *active_master; 263 struct vmw_master *active_master;
260 struct vmw_master fbdev_master; 264 struct vmw_master fbdev_master;
265 struct notifier_block pm_nb;
261}; 266};
262 267
263static inline struct vmw_private *vmw_priv(struct drm_device *dev) 268static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -353,6 +358,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo); 358 struct vmw_dma_buffer *bo);
354extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 359extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
355 struct vmw_dma_buffer *bo); 360 struct vmw_dma_buffer *bo);
361extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
356extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 362extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv); 363 struct drm_file *file_priv);
358extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 364extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -386,6 +392,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
386 uint32_t *sequence); 392 uint32_t *sequence);
387extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
388extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); 394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
389 396
390/** 397/**
391 * TTM glue - vmwgfx_ttm_glue.c 398 * TTM glue - vmwgfx_ttm_glue.c
@@ -401,6 +408,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
401 408
402extern struct ttm_placement vmw_vram_placement; 409extern struct ttm_placement vmw_vram_placement;
403extern struct ttm_placement vmw_vram_ne_placement; 410extern struct ttm_placement vmw_vram_ne_placement;
411extern struct ttm_placement vmw_vram_sys_placement;
404extern struct ttm_placement vmw_sys_placement; 412extern struct ttm_placement vmw_sys_placement;
405extern struct ttm_bo_driver vmw_bo_driver; 413extern struct ttm_bo_driver vmw_bo_driver;
406extern int vmw_dma_quiescent(struct drm_device *dev); 414extern int vmw_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e92da567403..d69caf92ffe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0; 491 return 0;
492 492
493 /**
494 * Put BO in VRAM, only if there is space.
495 */
496
497 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
498 if (unlikely(ret == -ERESTARTSYS))
499 return ret;
500
501 /**
502 * Otherwise, set it up as GMR.
503 */
504
505 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
506 return 0;
507
493 ret = vmw_gmr_bind(dev_priv, bo); 508 ret = vmw_gmr_bind(dev_priv, bo);
494 if (likely(ret == 0 || ret == -ERESTARTSYS)) 509 if (likely(ret == 0 || ret == -ERESTARTSYS))
495 return ret; 510 return ret;
496 511
512 /**
513 * If that failed, try VRAM again, this time evicting
514 * previous contents.
515 */
497 516
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 517 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret; 518 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 641dde76ada1..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
562 /* 565 /*
563 * Dirty & Deferred IO 566 * Dirty & Deferred IO
564 */ 567 */
@@ -649,14 +652,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
649 if (unlikely(ret != 0)) 652 if (unlikely(ret != 0))
650 goto err_unlock; 653 goto err_unlock;
651 654
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false); 655 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo); 656 ttm_bo_unreserve(bo);
662err_unlock: 657err_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 01feb48af333..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -29,6 +29,25 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false;
40
41 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
42 if (hwversion == 0)
43 return false;
44
45 if (hwversion < SVGA3D_HWVERSION_WS65_B1)
46 return false;
47
48 return true;
49}
50
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{ 52{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -55,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
55 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
57 76
77 mutex_init(&fifo->fifo_mutex);
58 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
59 79
60 /* 80 /*
@@ -98,8 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 (unsigned int) min, 118 (unsigned int) min,
99 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
100 120
101 dev_priv->fence_seq = (uint32_t) -100; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104 123
105 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -265,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
265 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
266 int ret; 285 int ret;
267 286
268 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
269 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
270 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
271 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -333,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
333 } 352 }
334out_err: 353out_err:
335 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
336 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
337 return NULL; 356 return NULL;
338} 357}
339 358
@@ -408,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
408 427
409 } 428 }
410 429
430 down_write(&fifo_state->rwsem);
411 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
412 next_cmd += bytes; 432 next_cmd += bytes;
413 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -419,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
419 if (reserveable) 439 if (reserveable)
420 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
421 mb(); 441 mb();
422 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
423 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
424} 445}
425 446
426int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -433,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
433 454
434 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
435 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
436 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
437 *sequence = dev_priv->fence_seq;
438 up_write(&fifo_state->rwsem);
439 ret = -ENOMEM; 458 ret = -ENOMEM;
440 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
441 false, 3*HZ); 460 false, 3*HZ);
@@ -443,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
443 } 462 }
444 463
445 do { 464 do {
446 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
447 } while (*sequence == 0); 466 } while (*sequence == 0);
448 467
449 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5fa6a4ed238a..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -43,11 +43,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
43 param->value = vmw_overlay_num_free_overlays(dev_priv); 43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break; 44 break;
45 case DRM_VMW_PARAM_3D: 45 case DRM_VMW_PARAM_3D:
46 param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; 46 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
47 break; 47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b1af76e371c3..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
553 } *cmd; 553 } *cmd;
554 int i, increment = 1; 554 int i, increment = 1;
555 555
556 if (!num_clips || 556 if (!num_clips) {
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1; 557 num_clips = 1;
560 clips = &norect; 558 clips = &norect;
561 norect.x1 = norect.y1 = 0; 559 norect.x1 = norect.y1 = 0;
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
574 572
575 for (i = 0; i < num_clips; i++, clips += increment) { 573 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1); 575 cmd[i].body.x = cpu_to_le32(clips->x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1); 576 cmd[i].body.y = cpu_to_le32(clips->y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 577 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 578 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
581 } 579 }
582 580
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
@@ -709,6 +707,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
709 if (ret) 707 if (ret)
710 goto try_dmabuf; 708 goto try_dmabuf;
711 709
710 if (!surface->scanout)
711 goto err_not_scanout;
712
712 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 713 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
713 mode_cmd->width, mode_cmd->height); 714 mode_cmd->width, mode_cmd->height);
714 715
@@ -742,6 +743,13 @@ try_dmabuf:
742 } 743 }
743 744
744 return &vfb->base; 745 return &vfb->base;
746
747err_not_scanout:
748 DRM_ERROR("surface not marked as scanout\n");
749 /* vmw_user_surface_lookup takes one ref */
750 vmw_surface_unreference(&surface);
751
752 return NULL;
745} 753}
746 754
747static int vmw_kms_fb_changed(struct drm_device *dev) 755static int vmw_kms_fb_changed(struct drm_device *dev)
@@ -761,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
761 769
762 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
763 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
764 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
765 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
766 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
767 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
768 776
769 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
770 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index bb6e6a096d25..5b6eabeb7f51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
104 bool pin, bool interruptible) 104 bool pin, bool interruptible)
105{ 105{
106 struct ttm_buffer_object *bo = &buf->base; 106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret; 108 int ret;
110 109
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
116 if (unlikely(ret != 0)) 115 if (unlikely(ret != 0))
117 goto err; 116 goto err;
118 117
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin) 118 if (pin)
128 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
129 120
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c012d5927f65..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
574 574
575 srf->flags = req->flags; 575 srf->flags = req->flags;
576 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578 srf->num_sizes = 0; 579 srf->num_sizes = 0;
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -599,6 +600,26 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
600 goto out_err1; 601 goto out_err1;
601 602
603 if (srf->scanout &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
608
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
617 }
618 } else {
619 srf->snooper.image = NULL;
620 }
621 srf->snooper.crtc = NULL;
622
602 user_srf->base.shareable = false; 623 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL; 624 user_srf->base.tfile = NULL;
604 625
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
622 return ret; 643 return ret;
623 } 644 }
624 645
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
630
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
635 else
636 DRM_ERROR("Failed to allocate cursor_image\n");
637
638 } else {
639 srf->snooper.image = NULL;
640 }
641 srf->snooper.crtc = NULL;
642
643 rep->sid = user_srf->base.hash.key; 646 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID) 647 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n"); 648 DRM_ERROR("Created bad Surface ID.\n");
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754 return bo_user_size + page_array_size; 757 return bo_user_size + page_array_size;
755} 758}
756 759
757void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 760void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
758{ 761{
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob; 763 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv = 764 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev); 765 container_of(bo->bdev, struct vmw_private, bdev);
763 766
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) { 767 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock); 769 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock); 771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
770 } 773 }
774}
775
776void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777{
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
780
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
771 kfree(vmw_bo); 783 kfree(vmw_bo);
772} 784}
773 785
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 825static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
814{ 826{
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob; 828 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
820 829
830 vmw_dmabuf_gmr_unbind(bo);
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
827 }
828 kfree(vmw_user_bo); 832 kfree(vmw_user_bo);
829} 833}
830 834
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
868 } 872 }
869 873
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true, 875 &vmw_vram_sys_placement, true,
872 &vmw_user_dmabuf_destroy); 876 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0)) 877 if (unlikely(ret != 0))
874 return ret; 878 return ret;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c9..2f6cf69ecb39 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 remaining -= 7; 961 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv); 962 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */ 963 /* if target is default */
964 if (!strncmp(buf, "default", 7)) 964 if (!strncmp(curr_pos, "default", 7))
965 pdev = pci_dev_get(vga_default_device()); 965 pdev = pci_dev_get(vga_default_device());
966 else { 966 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining, 967 if (!vga_pci_str_to_vars(curr_pos, remaining,